Skip to content

Commit

Permalink
Merge pull request #87 from Xilinx/sayeddla.new_tosa_verifiers
Browse files Browse the repository at this point in the history
new tosa verifiers
  • Loading branch information
sayeddla authored Jan 18, 2024
2 parents 20684a4 + 6cd0a91 commit 3be558d
Show file tree
Hide file tree
Showing 4 changed files with 202 additions and 10 deletions.
13 changes: 10 additions & 3 deletions mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -1429,15 +1429,14 @@ def Tosa_ConcatOp : Tosa_Op<"concat", [
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
}];
let hasFolder = 1;
let hasVerifier = 1;
}

//===----------------------------------------------------------------------===//
// Operator: pad
//===----------------------------------------------------------------------===//
def Tosa_PadOp : Tosa_Op<"pad", [
DeclareOpInterfaceMethods<InferShapedTypeOpInterface,
["inferReturnTypeComponents"]>,
Pure]> {
InferTensorType, Pure]> {
let summary = "Pads a tensor with value specified.";

let description = [{
Expand Down Expand Up @@ -1476,6 +1475,14 @@ def Tosa_PadOp : Tosa_Op<"pad", [

let hasCanonicalizer = 1;
let hasFolder = 1;
let hasVerifier = 1;

let extraClassDeclaration = [{
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
}];

}

//===----------------------------------------------------------------------===//
Expand Down
77 changes: 74 additions & 3 deletions mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -503,6 +503,41 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
return success();
}

LogicalResult ConcatOp::verify() {
OperandRange inputs = getInput1();

auto inputRank = ShapedType::kDynamic;
bool hasRankedInputs = false;
for (auto input : inputs) {
auto inputType = llvm::cast<ShapedType>(input.getType());
if (inputType.hasRank()) {
hasRankedInputs = true;
inputRank = inputType.getRank();
break;
}
}

if (hasRankedInputs) {
int64_t axis = getAxis();
if (axis < 0 || axis >= std::max((int64_t)1, inputRank)) {
return emitOpError() << "axis must be in range 0 to " << inputRank - 1;
}

for (auto input : inputs) {
auto inputType = llvm::cast<ShapedType>(input.getType());
if (!inputType.hasRank()) {
continue;
}
if (inputRank != inputType.getRank()) {
return emitOpError()
<< "rank of input " << inputType
<< " does not match other input rank(s) (" << inputRank << ")";
}
}
}
return success();
}

LogicalResult tosa::EqualOp::inferReturnTypeComponents(
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes,
Expand Down Expand Up @@ -590,6 +625,7 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
ValueShapeRange operands, DictionaryAttr attributes,
OpaqueProperties properties, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
Type inputType = getElementTypeOrSelf(operands[0]);
ShapeAdaptor inputShape = operands.getShape(0);
ShapeAdaptor paddingShape = operands.getShape(1);
SmallVector<int64_t> outputShape;
Expand All @@ -610,15 +646,17 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
}

outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
inferredReturnShapes.push_back(
ShapedTypeComponents(outputShape, inputType));
return success();
}

DenseIntElementsAttr paddings;
// If the paddings value is not a constant, all dimensions must be dynamic.
if (!matchPattern(operands[1], m_Constant(&paddings))) {
outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
inferredReturnShapes.push_back(
ShapedTypeComponents(outputShape, inputType));
return success();
}

Expand All @@ -638,7 +676,39 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
paddingValues[i * 2 + 1]);
}

inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape, inputType));
return success();
}

LogicalResult PadOp::verify() {
ShapedType inputType = llvm::cast<ShapedType>(getInput1().getType());
if (inputType.hasRank() && inputType.getRank() == 0) {
return emitOpError() << "input tensor rank must not be 0";
}

ShapedType paddingType = llvm::cast<ShapedType>(getPadding().getType());
if (paddingType.hasRank()) {
if (paddingType.getRank() != 2) {
return emitOpError() << "paddings must be a tensor of rank 2";
}
if (inputType.hasRank() && !paddingType.isDynamicDim(0) &&
inputType.getRank() != paddingType.getDimSize(0)) {
return emitOpError() << "paddings must be a tensor of shape ["
<< inputType.getRank() << ", 2]";
}
if (!paddingType.isDynamicDim(1) && paddingType.getDimSize(1) != 2) {
return emitOpError() << "paddings must be a tensor of shape ["
<< inputType.getRank() << ", 2]";
}

DenseIntElementsAttr paddings;
if (matchPattern(getPadding(), m_Constant(&paddings))) {
if (llvm::any_of(paddings,
[](auto val) { return val.getSExtValue() < 0; })) {
return emitOpError() << "number of pad elements must be positive";
}
}
}
return success();
}

Expand Down Expand Up @@ -1069,6 +1139,7 @@ REDUCE_SHAPE_INFER(tosa::ReduceProdOp)
REDUCE_SHAPE_INFER(tosa::ReduceSumOp)
#undef REDUCE_SHAPE_INFER
COMPATIBLE_RETURN_TYPES(tosa::ConcatOp)
COMPATIBLE_RETURN_TYPES(tosa::PadOp)
#undef COMPATIBLE_RETURN_TYPES

static LogicalResult NAryInferReturnTypes(
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func.func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
}

func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
%0 = arith.constant dense<[[-1, 2], [3, 4]]> : tensor<2x2xi32>
%0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
// TODO: Output contains multiple "arith.constant 1 : index".
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
Expand Down
120 changes: 117 additions & 3 deletions mlir/test/Dialect/Tosa/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,48 @@ func.func @test_concat_element_type_mismatch(%arg0 : tensor<1x2xf32>, %arg1 : te

// -----

func.func @test_concat_output_shape_mismatch(%arg0 : tensor<2x1xf32>, %arg1 : tensor<2x2xf32>) -> tensor<2x2xf32> {
// expected-error@+2 {{failed to infer returned types}}
// expected-error@+1 {{inferred type(s) 'tensor<2x3xf32>' are incompatible with return type(s) of operation 'tensor<2x2xf32>}}
%0 = "tosa.concat"(%arg0, %arg1) {axis = 1 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
return %0 : tensor<2x2xf32>
}

// -----

func.func @test_concat_output_rank_mismatch(%arg0 : tensor<2x1xf32>, %arg1 : tensor<2x2xf32>) -> tensor<?x?x?xf32> {
// expected-error@+2 {{failed to infer returned types}}
// expected-error@+1 {{inferred type(s) 'tensor<2x3xf32>' are incompatible with return type(s) of operation 'tensor<?x?x?xf32>}}
%0 = "tosa.concat"(%arg0, %arg1) {axis = 1 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor<?x?x?xf32>
return %0 : tensor<?x?x?xf32>
}

// -----

func.func @test_concat_input_rank_mismatch(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2x2xf32>) -> tensor<?x?xf32> {
// expected-error@+1 {{'tosa.concat' op rank of input 'tensor<2x2x2xf32>' does not match other input rank(s) (2)}}
%0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2x2xf32>) -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}

// -----

func.func @test_concat_axis_out_of_range(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2xf32>) -> tensor<?x?xf32> {
// expected-error@+1 {{'tosa.concat' op axis must be in range 0 to 1}}
%0 = "tosa.concat"(%arg0, %arg1) {axis = -1 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}

// -----

func.func @test_concat_axis_out_of_range(%arg0 : tensor<10x11x12xf32>, %arg1 : tensor<10x11x21xf32>) -> tensor<?x?x?xf32> {
// expected-error@+1 {{'tosa.concat' op axis must be in range 0 to 2}}
%0 = "tosa.concat"(%arg0, %arg1) {axis = 3 : i64} : (tensor<10x11x12xf32>, tensor<10x11x21xf32>) -> tensor<?x?x?xf32>
return %0 : tensor<?x?x?xf32>
}

// -----

func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> {
// expected-error@+1 {{'tosa.pad' op padding of pad is not constant}}
%0 = "tosa.pad"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32>
Expand All @@ -64,11 +106,83 @@ func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>

// -----

func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor<i8>) -> tensor<13x21x3xi8> {
func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor<i8>) -> tensor<?x?x?xi8> {
%0 = "tosa.const"() {value = dense<[[0, 0], [0, 1], [0, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
// expected-error@+1 {{'tosa.pad' op pad_const of pad is not constant}}
%1 = "tosa.pad"(%arg0, %0, %arg1) : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor<i8>) -> tensor<13x21x3xi8>
return %1 : tensor<13x21x3xi8>
%1 = "tosa.pad"(%arg0, %0, %arg1) : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor<i8>) -> tensor<?x?x?xi8>
return %1 : tensor<?x?x?xi8>
}

// -----

func.func @test_pad_output_shape_mismatch(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = "tosa.const"() {value = dense<[[1, 1], [1, 1], [1, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
// expected-error@+2 {{'tosa.pad' op failed to infer returned types}}
// expected-error@+1 {{'tosa.pad' op inferred type(s) 'tensor<15x23x5xf32>' are incompatible with return type(s) of operation 'tensor<13x21x3xf32>}}
%1 = "tosa.pad"(%arg0, %0) : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32>
return %1 : tensor<13x21x3xf32>
}

// -----

func.func @test_pad_type_mismatch(%arg0: tensor<13x21x3xf32>) -> tensor<15x23x5xi32> {
%0 = "tosa.const"() {value = dense<[[1, 1], [1, 1], [1, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
// expected-error@+2 {{'tosa.pad' op failed to infer returned types}}
// expected-error@+1 {{'tosa.pad' op inferred type(s) 'tensor<15x23x5xf32>' are incompatible with return type(s) of operation 'tensor<15x23x5xi32>}}
%1 = "tosa.pad"(%arg0, %0) : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<15x23x5xi32>
return %1 : tensor<15x23x5xi32>
}

// -----

func.func @test_pad_incorret_padding_rank(%arg0: tensor<13x21xf32>) -> tensor<13x21xf32> {
%0 = "tosa.const"() {value = dense<[0, 1]> : tensor<2xi32>} : () -> tensor<2xi32>
// expected-error@+1 {{'tosa.pad' op paddings must be a tensor of rank 2}}
%1 = "tosa.pad"(%arg0, %0) : (tensor<13x21xf32>, tensor<2xi32>) -> tensor<13x21xf32>
return %1 : tensor<13x21xf32>
}

// -----

func.func @test_pad_incorret_padding_shape(%arg0: tensor<13x21xf32>) -> tensor<13x21xf32> {
%0 = "tosa.const"() {value = dense<[[0, 0], [0, 1], [0, 1], [1, 1]]> : tensor<4x2xi32>} : () -> tensor<4x2xi32>
// expected-error@+1 {{'tosa.pad' op paddings must be a tensor of shape [2, 2]}}
%1 = "tosa.pad"(%arg0, %0) : (tensor<13x21xf32>, tensor<4x2xi32>) -> tensor<13x21xf32>
return %1 : tensor<13x21xf32>
}

// -----

func.func @test_pad_incorret_padding_shape(%arg0: tensor<13x21xf32>) -> tensor<13x21xf32> {
%0 = "tosa.const"() {value = dense<[[0, 0, 0, 1], [0, 1, 1, 1]]> : tensor<2x4xi32>} : () -> tensor<2x4xi32>
// expected-error@+1 {{'tosa.pad' op paddings must be a tensor of shape [2, 2]}}
%1 = "tosa.pad"(%arg0, %0) : (tensor<13x21xf32>, tensor<2x4xi32>) -> tensor<13x21xf32>
return %1 : tensor<13x21xf32>
}

// -----

func.func @test_pad_negative_padding(%arg0: tensor<13x21xf32>) -> tensor<?x?xf32> {
%0 = "tosa.const"() {value = dense<[[0, 0], [0, -1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
// expected-error@+1 {{'tosa.pad' op number of pad elements must be positive}}
%1 = "tosa.pad"(%arg0, %0) : (tensor<13x21xf32>, tensor<2x2xi32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}

// -----

func.func @test_pad_incorrect_input(%arg0: f32, %arg1: i32) -> f32 {
// expected-error@+1 {{'tosa.pad' op operand #0 must be ranked tensor of number values, but got 'f32'}}
%1 = "tosa.pad"(%arg0, %arg1) : (f32, i32) -> f32
return %1 : f32
}

// -----

func.func @test_pad_zero_rank_input(%arg0: tensor<f32>, %arg1: tensor<i32>) -> tensor<f32> {
// expected-error@+1 {{'tosa.pad' op input tensor rank must not be 0}}
%1 = "tosa.pad"(%arg0, %arg1) : (tensor<f32>, tensor<i32>) -> tensor<f32>
return %1 : tensor<f32>
}

// -----
Expand Down

0 comments on commit 3be558d

Please sign in to comment.