diff --git a/test/Integration/mlir-gen.mlir b/test/Integration/mlir-gen.mlir index 1f2e4b879..9bf89af54 100644 --- a/test/Integration/mlir-gen.mlir +++ b/test/Integration/mlir-gen.mlir @@ -1,5 +1,8 @@ // MLP with Softmax version // RUN: mlir-gen --kernel=const --bias --relu --seed=123 --batch=10 --layers=10,10,10 --softmax | tpp-run -e entry -entry-point-result=void +// RUN: not --crash mlir-gen --output=named --kernel=const --bias --relu --seed=123 --batch=10 --layers=10,10,10 --softmax 2>&1 | FileCheck %s --check-prefix=SOFTMAX-TODO +// SOFTMAX-TODO: Linalg named ops for softmax not implemented yet +// SOFTMAX-TODO: UNREACHABLE executed // MLP without softmax // RUN: mlir-gen --kernel=const --bias --relu --seed=123 --batch=10 --layers=10,10,10 | tpp-run -e entry -entry-point-result=void diff --git a/test/Passes/tile-and-fuse_named-op.mlir b/test/Passes/tile-and-fuse-named-op.mlir similarity index 99% rename from test/Passes/tile-and-fuse_named-op.mlir rename to test/Passes/tile-and-fuse-named-op.mlir index 26450ed78..281022de7 100644 --- a/test/Passes/tile-and-fuse_named-op.mlir +++ b/test/Passes/tile-and-fuse-named-op.mlir @@ -128,4 +128,4 @@ func.func @matmul_sequence_fusion_with_relu(%arg0: tensor<32x64xf32>, %arg1: ten // CHECK: scf.yield %{{.+}} : tensor<32x32xf32> // CHECK-NEXT: } -// ----- \ No newline at end of file +// ----- diff --git a/tools/mlir-gen/MLIRGen.cpp b/tools/mlir-gen/MLIRGen.cpp index 0b2cb3c17..8e78603f9 100644 --- a/tools/mlir-gen/MLIRGen.cpp +++ b/tools/mlir-gen/MLIRGen.cpp @@ -517,7 +517,8 @@ Value MLIRGenerator::lowerNamedSoftmax(Value input, Value output) { return input; // TODO: Add lowering of softmax to sequence of named Ops - + llvm_unreachable("Linalg named ops for softmax not implemented yet"); + auto outTy = cast(input.getType()); // Softmax flops = 4 * M * N = 4 * prod(outputDims) int64_t softmaxFlops = 1;