diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 4aef409e1f6e63..a6cbc2d9e9f9cf 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -682,6 +682,10 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents( LogicalResult PadOp::verify() { ShapedType inputType = llvm::cast(getInput1().getType()); + if (inputType.hasRank() && inputType.getRank() == 0) { + return emitOpError() << "input tensor rank must not be 0"; + } + ShapedType paddingType = llvm::cast(getPadding().getType()); if (paddingType.hasRank()) { if (paddingType.getRank() != 2) { diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir index fcff330b7107d2..3d6fc238d97649 100644 --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -171,6 +171,22 @@ func.func @test_pad_negative_padding(%arg0: tensor<13x21xf32>) -> tensor f32 { + // expected-error@+1 {{'tosa.pad' op operand #0 must be ranked tensor of number values, but got 'f32'}} + %1 = "tosa.pad"(%arg0, %arg1) : (f32, i32) -> f32 + return %1 : f32 +} + +// ----- + +func.func @test_pad_zero_rank_input(%arg0: tensor, %arg1: tensor) -> tensor { + // expected-error@+1 {{'tosa.pad' op input tensor rank must not be 0}} + %1 = "tosa.pad"(%arg0, %arg1) : (tensor, tensor) -> tensor + return %1 : tensor +} + +// ----- + func.func @test_transpose_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3xi32>) -> tensor<3x13x21xf32> { // expected-error@+1 {{'tosa.transpose' op perms of transpose is not constant}} %0 = "tosa.transpose"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3xi32>) -> tensor<3x13x21xf32>