Skip to content

Commit

Permalink
Initialize duplicated return activities in dense backward analysis, m…
Browse files Browse the repository at this point in the history
…ore tests
  • Loading branch information
pengmai committed Oct 18, 2023
1 parent c0b6c70 commit d470b17
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 6 deletions.
18 changes: 12 additions & 6 deletions enzyme/Enzyme/MLIR/Analysis/DataFlowActivityAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -291,12 +291,8 @@ class SparseForwardActivityAnalysis
void visitOperation(Operation *op,
ArrayRef<const ForwardValueActivity *> operands,
ArrayRef<ForwardValueActivity *> results) override {
if (op->hasTrait<OpTrait::ConstantLike>()) {
for (auto result : results) {
result->join(ValueActivity::getConstant());
}
if (op->hasTrait<OpTrait::ConstantLike>())
return;
}

// Bail out if this op affects memory.
if (auto memory = dyn_cast<MemoryEffectOpInterface>(op)) {
Expand Down Expand Up @@ -559,6 +555,16 @@ class DenseBackwardActivityAnalysis
propagateIfChanged(before, before->setActiveOut(argAliasClass));
}
}

// Initialize the return activity of the operands
for (Value operand : op->getOperands()) {
if (isa<MemRefType, LLVM::LLVMPointerType>(operand.getType())) {
auto *retAliasClasses =
getOrCreateFor<AliasClassLattice>(op, operand);
for (DistinctAttr retAliasClass : retAliasClasses->aliasClasses)
propagateIfChanged(before, before->setActiveOut(retAliasClass));
}
}
}

ChangeResult result = before->meet(after);
Expand Down Expand Up @@ -763,7 +769,7 @@ void printActivityAnalysisResults(const DataFlowSolver &solver,
for (Operation *returnOp : returnOps) {
auto *state = solver.lookupState<ForwardMemoryActivity>(returnOp);
if (state)
errs() << "resulting forward state:\n" << *state << "\n";
errs() << "forward end state:\n" << *state << "\n";
else
errs() << "state was null\n";
}
Expand Down
1 change: 1 addition & 0 deletions enzyme/test/MLIR/ActivityAnalysis/allocator.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<f16, dense<16> :
}
// CHECK-LABEL: @_Z10reduce_maxPdi:
// CHECK: "arg0": Active
// CHECK: "allocator": Active
// CHECK: "loaded": Active
llvm.func @_Z10reduce_maxPdi(%arg0: f64 {enzyme.tag = "arg0"}) -> f64 {
%0 = llvm.mlir.constant(1 : i64) : i64
Expand Down
14 changes: 14 additions & 0 deletions enzyme/test/MLIR/ActivityAnalysis/allocret.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
// RUN: %eopt --print-activity-analysis --split-input-file %s 2>&1 | FileCheck %s

module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<i64, dense<[32, 64]> : vector<2xi32>>, #dlti.dl_entry<f16, dense<16> : vector<2xi32>>, #dlti.dl_entry<i32, dense<32> : vector<2xi32>>, #dlti.dl_entry<f128, dense<128> : vector<2xi32>>, #dlti.dl_entry<f64, dense<64> : vector<2xi32>>, #dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi32>>, #dlti.dl_entry<i8, dense<8> : vector<2xi32>>, #dlti.dl_entry<i16, dense<16> : vector<2xi32>>, #dlti.dl_entry<i1, dense<8> : vector<2xi32>>, #dlti.dl_entry<"dlti.endianness", "little">>} {
llvm.func @malloc(i64) -> !llvm.ptr
// CHECK-LABEL: @kernel_main
// CHECK: "malloc": Active
llvm.func @kernel_main(%arg0: f32) -> !llvm.ptr {
%0 = llvm.mlir.constant(4 : i64) : i64
%1 = llvm.call @malloc(%0) {tag = "malloc"} : (i64) -> !llvm.ptr
%2 = llvm.bitcast %1 : !llvm.ptr to !llvm.ptr
llvm.store %arg0, %2 {alignment = 4 : i64} : f32, !llvm.ptr
llvm.return %2 : !llvm.ptr
}
}
19 changes: 19 additions & 0 deletions enzyme/test/MLIR/ActivityAnalysis/subld.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
// RUN: %eopt --print-activity-analysis --split-input-file %s 2>&1 | FileCheck %s

module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<i16, dense<16> : vector<2xi32>>, #dlti.dl_entry<i32, dense<32> : vector<2xi32>>, #dlti.dl_entry<i8, dense<8> : vector<2xi32>>, #dlti.dl_entry<i64, dense<[32, 64]> : vector<2xi32>>, #dlti.dl_entry<i1, dense<8> : vector<2xi32>>, #dlti.dl_entry<f16, dense<16> : vector<2xi32>>, #dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi32>>, #dlti.dl_entry<f64, dense<64> : vector<2xi32>>, #dlti.dl_entry<f128, dense<128> : vector<2xi32>>, #dlti.dl_entry<"dlti.endianness", "little">>} {
llvm.func @f(%arg0: !llvm.ptr {llvm.nocapture, llvm.readonly}) -> !llvm.ptr attributes {memory = #llvm.memory_effects<other = read, argMem = read, inaccessibleMem = read>, sym_visibility = "private"} {
%0 = llvm.load %arg0 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
llvm.return %0 : !llvm.ptr
}
// CHECK-LABEL: @submalloced
// CHECK: "ptrtoptr": Active
// CHECK: "retval": Active
llvm.func @submalloced(%arg0: !llvm.ptr) -> f64 {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64, tag = "ptrtoptr"} : (i32) -> !llvm.ptr
llvm.store %arg0, %1 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
%2 = llvm.call @f(%1) : (!llvm.ptr) -> !llvm.ptr
%3 = llvm.load %2 {alignment = 8 : i64, tag = "retval"} : !llvm.ptr -> f64
llvm.return %3 : f64
}
}

0 comments on commit d470b17

Please sign in to comment.