From e7555c8d7519c89b96a0fed42cc342a6350c2a2f Mon Sep 17 00:00:00 2001 From: Sergey Lyalin Date: Thu, 25 Jul 2024 13:32:41 +0000 Subject: [PATCH] Use common function to compute dynamic dimension values in MatMul and Relu. --- .../src/transformations/mlir/op/matmul.cpp | 18 ++++-------------- .../src/transformations/mlir/op/relu.cpp | 14 ++------------ 2 files changed, 6 insertions(+), 26 deletions(-) diff --git a/src/common/transformations/src/transformations/mlir/op/matmul.cpp b/src/common/transformations/src/transformations/mlir/op/matmul.cpp index f125e40ed409b6..229b4734358cfd 100644 --- a/src/common/transformations/src/transformations/mlir/op/matmul.cpp +++ b/src/common/transformations/src/transformations/mlir/op/matmul.cpp @@ -20,6 +20,7 @@ struct ConvertMatMul { void operator()(ConversionContext& context, NodePtr node) { auto matmul_node = std::dynamic_pointer_cast(node); assert(matmul_node); + // FIXME: current code limitation assert(!matmul_node->get_transpose_a() && matmul_node->get_transpose_b()); @@ -29,20 +30,9 @@ struct ConvertMatMul { const auto inputs = context.getInputs(node); const auto ov_output_element_type = node->get_output_element_type(0); const auto ov_output_shape = node->get_output_partial_shape(0); - auto outType = importTensor(context.context, ov_output_shape, ov_output_element_type); // Instead of this (WRONG): cast(inputs[0].getType()); - - llvm::SmallVector dynamicSizes; - for (auto [idx, dim] : llvm::enumerate(outType.getShape())) { - if (!mlir::ShapedType::isDynamic(dim)) - continue; - // FIXME: correct in case if (!transpose_a && transpose_b) - auto dimSize = - builder.create(loc, - idx == 0 ? inputs[0] : inputs[1], - 0); // TODO: Use symbols instead of taking dims directly from inputs - dynamicSizes.push_back(dimSize); - } - auto empty = builder.create(loc, outType, dynamicSizes); + auto outType = importTensor(context.context, ov_output_shape, ov_output_element_type); + auto dynamic_dimensions = context.get_dynamic_dimension_values(ov_output_shape); + auto empty = builder.create(loc, outType, dynamic_dimensions); auto zero = getConstant(builder, ov_output_element_type, 0); auto fill = builder.create(loc, mlir::ValueRange{zero}, mlir::ValueRange{empty}); // TODO: Add other variants of transpose_a/transpose_b diff --git a/src/common/transformations/src/transformations/mlir/op/relu.cpp b/src/common/transformations/src/transformations/mlir/op/relu.cpp index a25f571f61cddf..116fd24de7229a 100644 --- a/src/common/transformations/src/transformations/mlir/op/relu.cpp +++ b/src/common/transformations/src/transformations/mlir/op/relu.cpp @@ -19,22 +19,12 @@ struct ConvertRelu { void operator()(ConversionContext& context, NodePtr node) { auto loc = createLocation(context.context, node); auto& builder = context.builder(); - // TODO: Support broadcasts const auto input = context.getInputs(node)[0]; const auto ov_output_element_type = node->get_output_element_type(0); const auto ov_output_shape = node->get_output_partial_shape(0); auto outType = importTensor(context.context, ov_output_shape, ov_output_element_type); - // Named unary ops directly overwrite data in `outs` buffer so, there is no need to provide non-empty - // destination at the tensor-level. - // Use `tensor.empty` to avoid temporary buffer allocation and memcpy after bufferization. - llvm::SmallVector dynamicSizes; - for (auto [idx, dim] : llvm::enumerate(outType.getShape())) { - if (!mlir::ShapedType::isDynamic(dim)) - continue; - auto dimSize = builder.create(loc, input, idx); - dynamicSizes.push_back(dimSize); - } - auto empty = builder.create(loc, outType, dynamicSizes); + auto dynamic_dimensions = context.get_dynamic_dimension_values(ov_output_shape); + auto empty = builder.create(loc, outType, dynamic_dimensions); auto zero = getConstant(builder, ov_output_element_type, 0); auto fill = builder.create(loc, mlir::ValueRange{zero}, mlir::ValueRange{empty}); auto relu =