Skip to content

Commit

Permalink
Use common function to compute dynamic dimension values in MatMul and…
Browse files Browse the repository at this point in the history
… Relu.
  • Loading branch information
slyalin committed Jul 25, 2024
1 parent 13b7952 commit e7555c8
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 26 deletions.
18 changes: 4 additions & 14 deletions src/common/transformations/src/transformations/mlir/op/matmul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ struct ConvertMatMul {
void operator()(ConversionContext& context, NodePtr node) {
auto matmul_node = std::dynamic_pointer_cast<ov::op::v0::MatMul>(node);
assert(matmul_node);

// FIXME: current code limitation
assert(!matmul_node->get_transpose_a() && matmul_node->get_transpose_b());

Expand All @@ -29,20 +30,9 @@ struct ConvertMatMul {
const auto inputs = context.getInputs(node);
const auto ov_output_element_type = node->get_output_element_type(0);
const auto ov_output_shape = node->get_output_partial_shape(0);
auto outType = importTensor(context.context, ov_output_shape, ov_output_element_type); // Instead of this (WRONG): cast<mlir::ShapedType>(inputs[0].getType());

llvm::SmallVector<Value> dynamicSizes;
for (auto [idx, dim] : llvm::enumerate(outType.getShape())) {
if (!mlir::ShapedType::isDynamic(dim))
continue;
// FIXME: correct in case if (!transpose_a && transpose_b)
auto dimSize =
builder.create<tensor::DimOp>(loc,
idx == 0 ? inputs[0] : inputs[1],
0); // TODO: Use symbols instead of taking dims directly from inputs
dynamicSizes.push_back(dimSize);
}
auto empty = builder.create<tensor::EmptyOp>(loc, outType, dynamicSizes);
auto outType = importTensor(context.context, ov_output_shape, ov_output_element_type);
auto dynamic_dimensions = context.get_dynamic_dimension_values(ov_output_shape);
auto empty = builder.create<tensor::EmptyOp>(loc, outType, dynamic_dimensions);
auto zero = getConstant(builder, ov_output_element_type, 0);
auto fill = builder.create<linalg::FillOp>(loc, mlir::ValueRange{zero}, mlir::ValueRange{empty});
// TODO: Add other variants of transpose_a/transpose_b
Expand Down
14 changes: 2 additions & 12 deletions src/common/transformations/src/transformations/mlir/op/relu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,22 +19,12 @@ struct ConvertRelu {
void operator()(ConversionContext& context, NodePtr node) {
auto loc = createLocation(context.context, node);
auto& builder = context.builder();
// TODO: Support broadcasts
const auto input = context.getInputs(node)[0];
const auto ov_output_element_type = node->get_output_element_type(0);
const auto ov_output_shape = node->get_output_partial_shape(0);
auto outType = importTensor(context.context, ov_output_shape, ov_output_element_type);
// Named unary ops directly overwrite data in `outs` buffer so, there is no need to provide non-empty
// destination at the tensor-level.
// Use `tensor.empty` to avoid temporary buffer allocation and memcpy after bufferization.
llvm::SmallVector<Value> dynamicSizes;
for (auto [idx, dim] : llvm::enumerate(outType.getShape())) {
if (!mlir::ShapedType::isDynamic(dim))
continue;
auto dimSize = builder.create<tensor::DimOp>(loc, input, idx);
dynamicSizes.push_back(dimSize);
}
auto empty = builder.create<tensor::EmptyOp>(loc, outType, dynamicSizes);
auto dynamic_dimensions = context.get_dynamic_dimension_values(ov_output_shape);
auto empty = builder.create<tensor::EmptyOp>(loc, outType, dynamic_dimensions);
auto zero = getConstant(builder, ov_output_element_type, 0);
auto fill = builder.create<linalg::FillOp>(loc, mlir::ValueRange{zero}, mlir::ValueRange{empty});
auto relu =
Expand Down

0 comments on commit e7555c8

Please sign in to comment.