Skip to content

Commit

Permalink
[flang][OpenMP] Share DataSharingProcessing instance for simd loops
Browse files Browse the repository at this point in the history
For `!$omp target ...` constructs, we need to share the DSP instance to prevent privatization for the same variable from happening more than once. This was partially the case already. However, we forgot to do so for `simd` variants of the construct. This commit fixes the issue.

This partially fixes: https://ontrack-internal.amd.com/browse/SWDEV-446525.
  • Loading branch information
ergawy committed Apr 25, 2024
2 parents 2a2be38 + b66e21f commit b8baaa8
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 8 deletions.
17 changes: 9 additions & 8 deletions flang/lib/Lower/OpenMP/OpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2032,12 +2032,14 @@ static void createWsloop(Fortran::lower::AbstractConverter &converter,
}
}

static void createSimdWsloop(
Fortran::lower::AbstractConverter &converter,
Fortran::semantics::SemanticsContext &semaCtx,
Fortran::lower::pft::Evaluation &eval, llvm::omp::Directive ompDirective,
const Fortran::parser::OmpClauseList &beginClauseList,
const Fortran::parser::OmpClauseList *endClauseList, mlir::Location loc) {
static void
createSimdWsloop(Fortran::lower::AbstractConverter &converter,
Fortran::semantics::SemanticsContext &semaCtx,
Fortran::lower::pft::Evaluation &eval,
llvm::omp::Directive ompDirective,
const Fortran::parser::OmpClauseList &beginClauseList,
const Fortran::parser::OmpClauseList *endClauseList,
mlir::Location loc, DataSharingProcessor &dsp) {
ClauseProcessor cp(converter, semaCtx, beginClauseList);
cp.processTODO<clause::Aligned, clause::Allocate, clause::Linear,
clause::Safelen, clause::Simdlen, clause::Order>(loc,
Expand All @@ -2050,7 +2052,6 @@ static void createSimdWsloop(
// When support for vectorization is enabled, then we need to add handling of
// if clause. Currently if clause can be skipped because we always assume
// SIMD length = 1.
DataSharingProcessor dsp(converter, semaCtx, beginClauseList, eval);
createWsloop(converter, semaCtx, eval, ompDirective, beginClauseList,
endClauseList, loc, dsp);
}
Expand Down Expand Up @@ -2501,7 +2502,7 @@ static void genOMP(Fortran::lower::AbstractConverter &converter,
if (llvm::omp::allDoSimdSet.test(ompDirective)) {
// 2.9.3.2 Workshare SIMD construct
createSimdWsloop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
endClauseList, currentLocation);
endClauseList, currentLocation, dsp);

} else if (llvm::omp::allSimdSet.test(ompDirective)) {
// 2.9.3.1 SIMD construct
Expand Down
33 changes: 33 additions & 0 deletions flang/test/Lower/OpenMP/target_private.f90
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,36 @@ subroutine omp_target_private
!CHECK-NEXT: }

end subroutine omp_target_private

!CHECK-LABEL: func.func @_QPomp_target_target_do_simd()
subroutine omp_target_target_do_simd()
implicit none

real(8) :: var
integer(8) :: iv

!$omp target teams distribute parallel do simd private(iv,var)
do iv=0,10
var = 3.14
end do
!$omp end target teams distribute parallel do simd

!CHECK: omp.target trip_count
!CHECK: fir.alloca f64 {bindc_name = "var", pinned
!CHECK: omp.teams {
!CHECK-NEXT: omp.distribute {
!CHECK-NEXT: omp.parallel {
!CHECK: fir.alloca i64
!CHECK: omp.wsloop
!CHECK: omp.yield
!CHECK-NEXT: }
!CHECK-NEXT: omp.terminator
!CHECK-NEXT: }
!CHECK-NEXT: omp.terminator
!CHECK-NEXT: }
!CHECK-NEXT: omp.terminator
!CHECK-NEXT: }
!CHECK-NEXT: omp.terminator
!CHECK-NEXT: }

end subroutine omp_target_target_do_simd

0 comments on commit b8baaa8

Please sign in to comment.