From 7e97ae35ae2d1c38d149e670139a538bdba86e93 Mon Sep 17 00:00:00 2001 From: Yeting Kuo <46629943+yetingk@users.noreply.github.com> Date: Thu, 22 Feb 2024 15:51:19 +0800 Subject: [PATCH 01/19] [RISCV] Teach RISCVMakeCompressible handle Zca/Zcf/Zce/Zcd. (#81844) Make targets which don't have C but have Zca/Zcf/Zce/Zcd benefit from this pass. --- .../Target/RISCV/RISCVMakeCompressible.cpp | 31 +- llvm/lib/Target/RISCV/RISCVSubtarget.h | 4 + llvm/test/CodeGen/RISCV/make-compressible.mir | 499 +++++++++++++----- 3 files changed, 400 insertions(+), 134 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp index ff21fe1d406463..af864ba0fbc46f 100644 --- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp +++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp @@ -143,19 +143,35 @@ static bool isCompressedReg(Register Reg) { // Return true if MI is a load for which there exists a compressed version. static bool isCompressibleLoad(const MachineInstr &MI) { const RISCVSubtarget &STI = MI.getMF()->getSubtarget(); - const unsigned Opcode = MI.getOpcode(); - return Opcode == RISCV::LW || (!STI.is64Bit() && Opcode == RISCV::FLW) || - Opcode == RISCV::LD || Opcode == RISCV::FLD; + switch (MI.getOpcode()) { + default: + return false; + case RISCV::LW: + case RISCV::LD: + return STI.hasStdExtCOrZca(); + case RISCV::FLW: + return !STI.is64Bit() && STI.hasStdExtCOrZcfOrZce(); + case RISCV::FLD: + return STI.hasStdExtCOrZcd(); + } } // Return true if MI is a store for which there exists a compressed version. static bool isCompressibleStore(const MachineInstr &MI) { const RISCVSubtarget &STI = MI.getMF()->getSubtarget(); - const unsigned Opcode = MI.getOpcode(); - return Opcode == RISCV::SW || (!STI.is64Bit() && Opcode == RISCV::FSW) || - Opcode == RISCV::SD || Opcode == RISCV::FSD; + switch (MI.getOpcode()) { + default: + return false; + case RISCV::SW: + case RISCV::SD: + return STI.hasStdExtCOrZca(); + case RISCV::FSW: + return !STI.is64Bit() && STI.hasStdExtCOrZcfOrZce(); + case RISCV::FSD: + return STI.hasStdExtCOrZcd(); + } } // Find a single register and/or large offset which, if compressible, would @@ -324,8 +340,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) { const RISCVInstrInfo &TII = *STI.getInstrInfo(); // This optimization only makes sense if compressed instructions are emitted. - // FIXME: Support Zca, Zcf, Zcd granularity. - if (!STI.hasStdExtC()) + if (!STI.hasStdExtCOrZca()) return false; for (MachineBasicBlock &MBB : Fn) { diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 4b60d7aff22a0f..9ebf278d6749f0 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -143,6 +143,10 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { #include "RISCVGenSubtargetInfo.inc" bool hasStdExtCOrZca() const { return HasStdExtC || HasStdExtZca; } + bool hasStdExtCOrZcd() const { return HasStdExtC || HasStdExtZcd; } + bool hasStdExtCOrZcfOrZce() const { + return HasStdExtC || HasStdExtZcf || HasStdExtZce; + } bool hasStdExtZvl() const { return ZvlLen != 0; } bool hasStdExtFOrZfinx() const { return HasStdExtF || HasStdExtZfinx; } bool hasStdExtDOrZdinx() const { return HasStdExtD || HasStdExtZdinx; } diff --git a/llvm/test/CodeGen/RISCV/make-compressible.mir b/llvm/test/CodeGen/RISCV/make-compressible.mir index 2105a13bc8c7b7..03da38a6863e71 100644 --- a/llvm/test/CodeGen/RISCV/make-compressible.mir +++ b/llvm/test/CodeGen/RISCV/make-compressible.mir @@ -1,8 +1,14 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -o - %s -mtriple=riscv32 -mattr=+c,+f,+d -simplify-mir \ -# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefix=RV32 %s +# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV32,RV32C %s # RUN: llc -o - %s -mtriple=riscv64 -mattr=+c,+f,+d -simplify-mir \ -# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefix=RV64 %s +# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV64,RV64C %s +# RUN: llc -o - %s -mtriple=riscv32 -mattr=+d,+zcf -simplify-mir \ +# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV32,RV32ZCF %s +# RUN: llc -o - %s -mtriple=riscv32 -mattr=+d,+zca -simplify-mir \ +# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV32,RV32ZCA %s +# RUN: llc -o - %s -mtriple=riscv64 -mattr=+d,+zca -simplify-mir \ +# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV64,RV64ZCA %s --- | define void @store_common_value(ptr %a, ptr %b, ptr %c) #0 { @@ -288,7 +294,7 @@ ret { double, double } %3 } - attributes #0 = { minsize "target-features"="+c,+f,+d" } + attributes #0 = { minsize } ... --- @@ -306,6 +312,7 @@ body: | ; RV32-NEXT: SW $x13, killed renamable $x11, 0 :: (store (s32) into %ir.b) ; RV32-NEXT: SW $x13, killed renamable $x12, 0 :: (store (s32) into %ir.c) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_value ; RV64: liveins: $x10, $x11, $x12 ; RV64-NEXT: {{ $}} @@ -327,14 +334,15 @@ body: | bb.0.entry: liveins: $x10, $x11, $x12, $f16_f - ; RV32-LABEL: name: store_common_value_float - ; RV32: liveins: $x10, $x11, $x12, $f16_f - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $f15_f = FSGNJ_S $f16_f, $f16_f - ; RV32-NEXT: FSW $f15_f, killed renamable $x10, 0 :: (store (s32) into %ir.a) - ; RV32-NEXT: FSW $f15_f, killed renamable $x11, 0 :: (store (s32) into %ir.b) - ; RV32-NEXT: FSW killed $f15_f, killed renamable $x12, 0 :: (store (s32) into %ir.c) - ; RV32-NEXT: PseudoRET + ; RV32C-LABEL: name: store_common_value_float + ; RV32C: liveins: $x10, $x11, $x12, $f16_f + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $f15_f = FSGNJ_S $f16_f, $f16_f + ; RV32C-NEXT: FSW $f15_f, killed renamable $x10, 0 :: (store (s32) into %ir.a) + ; RV32C-NEXT: FSW $f15_f, killed renamable $x11, 0 :: (store (s32) into %ir.b) + ; RV32C-NEXT: FSW killed $f15_f, killed renamable $x12, 0 :: (store (s32) into %ir.c) + ; RV32C-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_value_float ; RV64: liveins: $x10, $x11, $x12, $f16_f ; RV64-NEXT: {{ $}} @@ -342,6 +350,23 @@ body: | ; RV64-NEXT: FSW renamable $f16_f, killed renamable $x11, 0 :: (store (s32) into %ir.b) ; RV64-NEXT: FSW killed renamable $f16_f, killed renamable $x12, 0 :: (store (s32) into %ir.c) ; RV64-NEXT: PseudoRET + ; + ; RV32ZCF-LABEL: name: store_common_value_float + ; RV32ZCF: liveins: $x10, $x11, $x12, $f16_f + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: $f15_f = FSGNJ_S $f16_f, $f16_f + ; RV32ZCF-NEXT: FSW $f15_f, killed renamable $x10, 0 :: (store (s32) into %ir.a) + ; RV32ZCF-NEXT: FSW $f15_f, killed renamable $x11, 0 :: (store (s32) into %ir.b) + ; RV32ZCF-NEXT: FSW killed $f15_f, killed renamable $x12, 0 :: (store (s32) into %ir.c) + ; RV32ZCF-NEXT: PseudoRET + ; + ; RV32ZCA-LABEL: name: store_common_value_float + ; RV32ZCA: liveins: $x10, $x11, $x12, $f16_f + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: FSW renamable $f16_f, killed renamable $x10, 0 :: (store (s32) into %ir.a) + ; RV32ZCA-NEXT: FSW renamable $f16_f, killed renamable $x11, 0 :: (store (s32) into %ir.b) + ; RV32ZCA-NEXT: FSW killed renamable $f16_f, killed renamable $x12, 0 :: (store (s32) into %ir.c) + ; RV32ZCA-NEXT: PseudoRET FSW renamable $f16_f, killed renamable $x10, 0 :: (store (s32) into %ir.a) FSW renamable $f16_f, killed renamable $x11, 0 :: (store (s32) into %ir.b) FSW killed renamable $f16_f, killed renamable $x12, 0 :: (store (s32) into %ir.c) @@ -355,22 +380,47 @@ body: | bb.0.entry: liveins: $x10, $x11, $x12, $f16_d - ; RV32-LABEL: name: store_common_value_double - ; RV32: liveins: $x10, $x11, $x12, $f16_d - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d - ; RV32-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) - ; RV32-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) - ; RV32-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) - ; RV32-NEXT: PseudoRET - ; RV64-LABEL: name: store_common_value_double - ; RV64: liveins: $x10, $x11, $x12, $f16_d - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d - ; RV64-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) - ; RV64-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) - ; RV64-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) - ; RV64-NEXT: PseudoRET + ; RV32C-LABEL: name: store_common_value_double + ; RV32C: liveins: $x10, $x11, $x12, $f16_d + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d + ; RV32C-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) + ; RV32C-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) + ; RV32C-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) + ; RV32C-NEXT: PseudoRET + ; + ; RV64C-LABEL: name: store_common_value_double + ; RV64C: liveins: $x10, $x11, $x12, $f16_d + ; RV64C-NEXT: {{ $}} + ; RV64C-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d + ; RV64C-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) + ; RV64C-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) + ; RV64C-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) + ; RV64C-NEXT: PseudoRET + ; + ; RV32ZCF-LABEL: name: store_common_value_double + ; RV32ZCF: liveins: $x10, $x11, $x12, $f16_d + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) + ; RV32ZCF-NEXT: FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) + ; RV32ZCF-NEXT: FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) + ; RV32ZCF-NEXT: PseudoRET + ; + ; RV32ZCA-LABEL: name: store_common_value_double + ; RV32ZCA: liveins: $x10, $x11, $x12, $f16_d + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) + ; RV32ZCA-NEXT: FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) + ; RV32ZCA-NEXT: FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) + ; RV32ZCA-NEXT: PseudoRET + ; + ; RV64ZCA-LABEL: name: store_common_value_double + ; RV64ZCA: liveins: $x10, $x11, $x12, $f16_d + ; RV64ZCA-NEXT: {{ $}} + ; RV64ZCA-NEXT: FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) + ; RV64ZCA-NEXT: FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) + ; RV64ZCA-NEXT: FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) + ; RV64ZCA-NEXT: PseudoRET FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b) FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c) @@ -395,6 +445,7 @@ body: | ; RV32-NEXT: renamable $x10 = ADDI $x0, 5 ; RV32-NEXT: SW killed renamable $x10, killed $x11, 0 :: (volatile store (s32) into %ir.p) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_ptr ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -432,6 +483,7 @@ body: | ; RV32-NEXT: SW killed renamable $x10, $x11, 0 :: (volatile store (s32) into %ir.p) ; RV32-NEXT: SW killed $x11, $x11, 0 :: (volatile store (s32) into %ir.q) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_ptr_self ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -457,14 +509,15 @@ body: | bb.0.entry: liveins: $x16, $f10_f, $f11_f, $f12_f - ; RV32-LABEL: name: store_common_ptr_float - ; RV32: liveins: $x16, $f10_f, $f11_f, $f12_f - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x10 = ADDI $x16, 0 - ; RV32-NEXT: FSW killed renamable $f10_f, $x10, 0 :: (volatile store (s32) into %ir.p) - ; RV32-NEXT: FSW killed renamable $f11_f, $x10, 0 :: (volatile store (s32) into %ir.p) - ; RV32-NEXT: FSW killed renamable $f12_f, killed $x10, 0 :: (volatile store (s32) into %ir.p) - ; RV32-NEXT: PseudoRET + ; RV32C-LABEL: name: store_common_ptr_float + ; RV32C: liveins: $x16, $f10_f, $f11_f, $f12_f + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x10 = ADDI $x16, 0 + ; RV32C-NEXT: FSW killed renamable $f10_f, $x10, 0 :: (volatile store (s32) into %ir.p) + ; RV32C-NEXT: FSW killed renamable $f11_f, $x10, 0 :: (volatile store (s32) into %ir.p) + ; RV32C-NEXT: FSW killed renamable $f12_f, killed $x10, 0 :: (volatile store (s32) into %ir.p) + ; RV32C-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_ptr_float ; RV64: liveins: $x16, $f10_f, $f11_f, $f12_f ; RV64-NEXT: {{ $}} @@ -472,6 +525,23 @@ body: | ; RV64-NEXT: FSW killed renamable $f11_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p) ; RV64-NEXT: FSW killed renamable $f12_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p) ; RV64-NEXT: PseudoRET + ; + ; RV32ZCF-LABEL: name: store_common_ptr_float + ; RV32ZCF: liveins: $x16, $f10_f, $f11_f, $f12_f + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: $x10 = ADDI $x16, 0 + ; RV32ZCF-NEXT: FSW killed renamable $f10_f, $x10, 0 :: (volatile store (s32) into %ir.p) + ; RV32ZCF-NEXT: FSW killed renamable $f11_f, $x10, 0 :: (volatile store (s32) into %ir.p) + ; RV32ZCF-NEXT: FSW killed renamable $f12_f, killed $x10, 0 :: (volatile store (s32) into %ir.p) + ; RV32ZCF-NEXT: PseudoRET + ; + ; RV32ZCA-LABEL: name: store_common_ptr_float + ; RV32ZCA: liveins: $x16, $f10_f, $f11_f, $f12_f + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: FSW killed renamable $f10_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p) + ; RV32ZCA-NEXT: FSW killed renamable $f11_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p) + ; RV32ZCA-NEXT: FSW killed renamable $f12_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p) + ; RV32ZCA-NEXT: PseudoRET FSW killed renamable $f10_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p) FSW killed renamable $f11_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p) FSW killed renamable $f12_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p) @@ -485,22 +555,47 @@ body: | bb.0.entry: liveins: $x16, $f10_d, $f11_d, $f12_d - ; RV32-LABEL: name: store_common_ptr_double - ; RV32: liveins: $x16, $f10_d, $f11_d, $f12_d - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x10 = ADDI $x16, 0 - ; RV32-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p) - ; RV32-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p) - ; RV32-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p) - ; RV32-NEXT: PseudoRET - ; RV64-LABEL: name: store_common_ptr_double - ; RV64: liveins: $x16, $f10_d, $f11_d, $f12_d - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: $x10 = ADDI $x16, 0 - ; RV64-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p) - ; RV64-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p) - ; RV64-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p) - ; RV64-NEXT: PseudoRET + ; RV32C-LABEL: name: store_common_ptr_double + ; RV32C: liveins: $x16, $f10_d, $f11_d, $f12_d + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x10 = ADDI $x16, 0 + ; RV32C-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p) + ; RV32C-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p) + ; RV32C-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p) + ; RV32C-NEXT: PseudoRET + ; + ; RV64C-LABEL: name: store_common_ptr_double + ; RV64C: liveins: $x16, $f10_d, $f11_d, $f12_d + ; RV64C-NEXT: {{ $}} + ; RV64C-NEXT: $x10 = ADDI $x16, 0 + ; RV64C-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p) + ; RV64C-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p) + ; RV64C-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p) + ; RV64C-NEXT: PseudoRET + ; + ; RV32ZCF-LABEL: name: store_common_ptr_double + ; RV32ZCF: liveins: $x16, $f10_d, $f11_d, $f12_d + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV32ZCF-NEXT: FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV32ZCF-NEXT: FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV32ZCF-NEXT: PseudoRET + ; + ; RV32ZCA-LABEL: name: store_common_ptr_double + ; RV32ZCA: liveins: $x16, $f10_d, $f11_d, $f12_d + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV32ZCA-NEXT: FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV32ZCA-NEXT: FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV32ZCA-NEXT: PseudoRET + ; + ; RV64ZCA-LABEL: name: store_common_ptr_double + ; RV64ZCA: liveins: $x16, $f10_d, $f11_d, $f12_d + ; RV64ZCA-NEXT: {{ $}} + ; RV64ZCA-NEXT: FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV64ZCA-NEXT: FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV64ZCA-NEXT: FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p) + ; RV64ZCA-NEXT: PseudoRET FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p) FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p) @@ -522,6 +617,7 @@ body: | ; RV32-NEXT: dead renamable $x10 = LW $x11, 0 :: (volatile load (s32) from %ir.p) ; RV32-NEXT: dead renamable $x10 = LW killed $x11, 0 :: (volatile load (s32) from %ir.p) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: load_common_ptr ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -543,14 +639,15 @@ body: | bb.0.entry: liveins: $x16 - ; RV32-LABEL: name: load_common_ptr_float - ; RV32: liveins: $x16 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x10 = ADDI $x16, 0 - ; RV32-NEXT: renamable $f10_f = FLW $x10, 0 :: (load (s32) from %ir.g) - ; RV32-NEXT: renamable $f11_f = FLW $x10, 4 :: (load (s32) from %ir.arrayidx1) - ; RV32-NEXT: renamable $f12_f = FLW killed $x10, 8 :: (load (s32) from %ir.arrayidx2) - ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; RV32C-LABEL: name: load_common_ptr_float + ; RV32C: liveins: $x16 + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x10 = ADDI $x16, 0 + ; RV32C-NEXT: renamable $f10_f = FLW $x10, 0 :: (load (s32) from %ir.g) + ; RV32C-NEXT: renamable $f11_f = FLW $x10, 4 :: (load (s32) from %ir.arrayidx1) + ; RV32C-NEXT: renamable $f12_f = FLW killed $x10, 8 :: (load (s32) from %ir.arrayidx2) + ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; ; RV64-LABEL: name: load_common_ptr_float ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -558,6 +655,23 @@ body: | ; RV64-NEXT: renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1) ; RV64-NEXT: renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2) ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; + ; RV32ZCF-LABEL: name: load_common_ptr_float + ; RV32ZCF: liveins: $x16 + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: $x10 = ADDI $x16, 0 + ; RV32ZCF-NEXT: renamable $f10_f = FLW $x10, 0 :: (load (s32) from %ir.g) + ; RV32ZCF-NEXT: renamable $f11_f = FLW $x10, 4 :: (load (s32) from %ir.arrayidx1) + ; RV32ZCF-NEXT: renamable $f12_f = FLW killed $x10, 8 :: (load (s32) from %ir.arrayidx2) + ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; + ; RV32ZCA-LABEL: name: load_common_ptr_float + ; RV32ZCA: liveins: $x16 + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: renamable $f10_f = FLW renamable $x16, 0 :: (load (s32) from %ir.g) + ; RV32ZCA-NEXT: renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1) + ; RV32ZCA-NEXT: renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2) + ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f renamable $f10_f = FLW renamable $x16, 0 :: (load (s32) from %ir.g) renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1) renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2) @@ -571,22 +685,47 @@ body: | bb.0.entry: liveins: $x16 - ; RV32-LABEL: name: load_common_ptr_double - ; RV32: liveins: $x16 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x10 = ADDI $x16, 0 - ; RV32-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g) - ; RV32-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1) - ; RV32-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2) - ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d - ; RV64-LABEL: name: load_common_ptr_double - ; RV64: liveins: $x16 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: $x10 = ADDI $x16, 0 - ; RV64-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g) - ; RV64-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1) - ; RV64-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2) - ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; RV32C-LABEL: name: load_common_ptr_double + ; RV32C: liveins: $x16 + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x10 = ADDI $x16, 0 + ; RV32C-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g) + ; RV32C-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1) + ; RV32C-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2) + ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV64C-LABEL: name: load_common_ptr_double + ; RV64C: liveins: $x16 + ; RV64C-NEXT: {{ $}} + ; RV64C-NEXT: $x10 = ADDI $x16, 0 + ; RV64C-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g) + ; RV64C-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1) + ; RV64C-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2) + ; RV64C-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV32ZCF-LABEL: name: load_common_ptr_double + ; RV32ZCF: liveins: $x16 + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g) + ; RV32ZCF-NEXT: renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1) + ; RV32ZCF-NEXT: renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2) + ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV32ZCA-LABEL: name: load_common_ptr_double + ; RV32ZCA: liveins: $x16 + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g) + ; RV32ZCA-NEXT: renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1) + ; RV32ZCA-NEXT: renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2) + ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV64ZCA-LABEL: name: load_common_ptr_double + ; RV64ZCA: liveins: $x16 + ; RV64ZCA-NEXT: {{ $}} + ; RV64ZCA-NEXT: renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g) + ; RV64ZCA-NEXT: renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1) + ; RV64ZCA-NEXT: renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2) + ; RV64ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g) renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1) renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2) @@ -613,6 +752,7 @@ body: | ; RV32-NEXT: renamable $x11 = ADDI $x0, 7 ; RV32-NEXT: SW killed renamable $x11, killed $x12, 28 :: (volatile store (s32) into %ir.3) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_large_offset ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} @@ -644,15 +784,16 @@ body: | bb.0.entry: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f - ; RV32-LABEL: name: store_large_offset_float - ; RV32: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x11 = ADDI $x10, 384 - ; RV32-NEXT: FSW killed renamable $f10_f, $x11, 16 :: (volatile store (s32) into %ir.0) - ; RV32-NEXT: FSW killed renamable $f11_f, $x11, 20 :: (volatile store (s32) into %ir.1) - ; RV32-NEXT: FSW killed renamable $f12_f, $x11, 24 :: (volatile store (s32) into %ir.2) - ; RV32-NEXT: FSW killed renamable $f13_f, killed $x11, 28 :: (volatile store (s32) into %ir.3) - ; RV32-NEXT: PseudoRET + ; RV32C-LABEL: name: store_large_offset_float + ; RV32C: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x11 = ADDI $x10, 384 + ; RV32C-NEXT: FSW killed renamable $f10_f, $x11, 16 :: (volatile store (s32) into %ir.0) + ; RV32C-NEXT: FSW killed renamable $f11_f, $x11, 20 :: (volatile store (s32) into %ir.1) + ; RV32C-NEXT: FSW killed renamable $f12_f, $x11, 24 :: (volatile store (s32) into %ir.2) + ; RV32C-NEXT: FSW killed renamable $f13_f, killed $x11, 28 :: (volatile store (s32) into %ir.3) + ; RV32C-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_large_offset_float ; RV64: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f ; RV64-NEXT: {{ $}} @@ -661,6 +802,25 @@ body: | ; RV64-NEXT: FSW killed renamable $f12_f, renamable $x10, 408 :: (volatile store (s32) into %ir.2) ; RV64-NEXT: FSW killed renamable $f13_f, killed renamable $x10, 412 :: (volatile store (s32) into %ir.3) ; RV64-NEXT: PseudoRET + ; + ; RV32ZCF-LABEL: name: store_large_offset_float + ; RV32ZCF: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: $x11 = ADDI $x10, 384 + ; RV32ZCF-NEXT: FSW killed renamable $f10_f, $x11, 16 :: (volatile store (s32) into %ir.0) + ; RV32ZCF-NEXT: FSW killed renamable $f11_f, $x11, 20 :: (volatile store (s32) into %ir.1) + ; RV32ZCF-NEXT: FSW killed renamable $f12_f, $x11, 24 :: (volatile store (s32) into %ir.2) + ; RV32ZCF-NEXT: FSW killed renamable $f13_f, killed $x11, 28 :: (volatile store (s32) into %ir.3) + ; RV32ZCF-NEXT: PseudoRET + ; + ; RV32ZCA-LABEL: name: store_large_offset_float + ; RV32ZCA: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: FSW killed renamable $f10_f, renamable $x10, 400 :: (volatile store (s32) into %ir.0) + ; RV32ZCA-NEXT: FSW killed renamable $f11_f, renamable $x10, 404 :: (volatile store (s32) into %ir.1) + ; RV32ZCA-NEXT: FSW killed renamable $f12_f, renamable $x10, 408 :: (volatile store (s32) into %ir.2) + ; RV32ZCA-NEXT: FSW killed renamable $f13_f, killed renamable $x10, 412 :: (volatile store (s32) into %ir.3) + ; RV32ZCA-NEXT: PseudoRET FSW killed renamable $f10_f, renamable $x10, 400 :: (volatile store (s32) into %ir.0) FSW killed renamable $f11_f, renamable $x10, 404 :: (volatile store (s32) into %ir.1) FSW killed renamable $f12_f, renamable $x10, 408 :: (volatile store (s32) into %ir.2) @@ -675,24 +835,52 @@ body: | bb.0.entry: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d - ; RV32-LABEL: name: store_large_offset_double - ; RV32: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x11 = ADDI $x10, 768 - ; RV32-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0) - ; RV32-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1) - ; RV32-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2) - ; RV32-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3) - ; RV32-NEXT: PseudoRET - ; RV64-LABEL: name: store_large_offset_double - ; RV64: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: $x11 = ADDI $x10, 768 - ; RV64-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0) - ; RV64-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1) - ; RV64-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2) - ; RV64-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3) - ; RV64-NEXT: PseudoRET + ; RV32C-LABEL: name: store_large_offset_double + ; RV32C: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x11 = ADDI $x10, 768 + ; RV32C-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0) + ; RV32C-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1) + ; RV32C-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2) + ; RV32C-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3) + ; RV32C-NEXT: PseudoRET + ; + ; RV64C-LABEL: name: store_large_offset_double + ; RV64C: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d + ; RV64C-NEXT: {{ $}} + ; RV64C-NEXT: $x11 = ADDI $x10, 768 + ; RV64C-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0) + ; RV64C-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1) + ; RV64C-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2) + ; RV64C-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3) + ; RV64C-NEXT: PseudoRET + ; + ; RV32ZCF-LABEL: name: store_large_offset_double + ; RV32ZCF: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0) + ; RV32ZCF-NEXT: FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1) + ; RV32ZCF-NEXT: FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2) + ; RV32ZCF-NEXT: FSD killed renamable $f13_d, killed renamable $x10, 824 :: (volatile store (s64) into %ir.3) + ; RV32ZCF-NEXT: PseudoRET + ; + ; RV32ZCA-LABEL: name: store_large_offset_double + ; RV32ZCA: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0) + ; RV32ZCA-NEXT: FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1) + ; RV32ZCA-NEXT: FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2) + ; RV32ZCA-NEXT: FSD killed renamable $f13_d, killed renamable $x10, 824 :: (volatile store (s64) into %ir.3) + ; RV32ZCA-NEXT: PseudoRET + ; + ; RV64ZCA-LABEL: name: store_large_offset_double + ; RV64ZCA: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d + ; RV64ZCA-NEXT: {{ $}} + ; RV64ZCA-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0) + ; RV64ZCA-NEXT: FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1) + ; RV64ZCA-NEXT: FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2) + ; RV64ZCA-NEXT: FSD killed renamable $f13_d, killed renamable $x10, 824 :: (volatile store (s64) into %ir.3) + ; RV64ZCA-NEXT: PseudoRET FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0) FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1) FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2) @@ -716,6 +904,7 @@ body: | ; RV32-NEXT: dead renamable $x11 = LW $x12, 24 :: (volatile load (s32) from %ir.2) ; RV32-NEXT: dead renamable $x10 = LW killed $x12, 28 :: (volatile load (s32) from %ir.3) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: load_large_offset ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} @@ -739,14 +928,15 @@ body: | bb.0.entry: liveins: $x10 - ; RV32-LABEL: name: load_large_offset_float - ; RV32: liveins: $x10 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x11 = ADDI $x10, 384 - ; RV32-NEXT: renamable $f10_f = FLW $x11, 16 :: (load (s32) from %ir.arrayidx) - ; RV32-NEXT: renamable $f11_f = FLW $x11, 20 :: (load (s32) from %ir.arrayidx1) - ; RV32-NEXT: renamable $f12_f = FLW killed $x11, 24 :: (load (s32) from %ir.arrayidx2) - ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; RV32C-LABEL: name: load_large_offset_float + ; RV32C: liveins: $x10 + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x11 = ADDI $x10, 384 + ; RV32C-NEXT: renamable $f10_f = FLW $x11, 16 :: (load (s32) from %ir.arrayidx) + ; RV32C-NEXT: renamable $f11_f = FLW $x11, 20 :: (load (s32) from %ir.arrayidx1) + ; RV32C-NEXT: renamable $f12_f = FLW killed $x11, 24 :: (load (s32) from %ir.arrayidx2) + ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; ; RV64-LABEL: name: load_large_offset_float ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} @@ -754,6 +944,23 @@ body: | ; RV64-NEXT: renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1) ; RV64-NEXT: renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2) ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; + ; RV32ZCF-LABEL: name: load_large_offset_float + ; RV32ZCF: liveins: $x10 + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: $x11 = ADDI $x10, 384 + ; RV32ZCF-NEXT: renamable $f10_f = FLW $x11, 16 :: (load (s32) from %ir.arrayidx) + ; RV32ZCF-NEXT: renamable $f11_f = FLW $x11, 20 :: (load (s32) from %ir.arrayidx1) + ; RV32ZCF-NEXT: renamable $f12_f = FLW killed $x11, 24 :: (load (s32) from %ir.arrayidx2) + ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f + ; + ; RV32ZCA-LABEL: name: load_large_offset_float + ; RV32ZCA: liveins: $x10 + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx) + ; RV32ZCA-NEXT: renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1) + ; RV32ZCA-NEXT: renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2) + ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx) renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1) renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2) @@ -767,22 +974,47 @@ body: | bb.0.entry: liveins: $x10 - ; RV32-LABEL: name: load_large_offset_double - ; RV32: liveins: $x10 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: $x11 = ADDI $x10, 768 - ; RV32-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx) - ; RV32-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1) - ; RV32-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2) - ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d - ; RV64-LABEL: name: load_large_offset_double - ; RV64: liveins: $x10 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: $x11 = ADDI $x10, 768 - ; RV64-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx) - ; RV64-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1) - ; RV64-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2) - ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; RV32C-LABEL: name: load_large_offset_double + ; RV32C: liveins: $x10 + ; RV32C-NEXT: {{ $}} + ; RV32C-NEXT: $x11 = ADDI $x10, 768 + ; RV32C-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx) + ; RV32C-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1) + ; RV32C-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2) + ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV64C-LABEL: name: load_large_offset_double + ; RV64C: liveins: $x10 + ; RV64C-NEXT: {{ $}} + ; RV64C-NEXT: $x11 = ADDI $x10, 768 + ; RV64C-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx) + ; RV64C-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1) + ; RV64C-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2) + ; RV64C-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV32ZCF-LABEL: name: load_large_offset_double + ; RV32ZCF: liveins: $x10 + ; RV32ZCF-NEXT: {{ $}} + ; RV32ZCF-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx) + ; RV32ZCF-NEXT: renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1) + ; RV32ZCF-NEXT: renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2) + ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV32ZCA-LABEL: name: load_large_offset_double + ; RV32ZCA: liveins: $x10 + ; RV32ZCA-NEXT: {{ $}} + ; RV32ZCA-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx) + ; RV32ZCA-NEXT: renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1) + ; RV32ZCA-NEXT: renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2) + ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d + ; + ; RV64ZCA-LABEL: name: load_large_offset_double + ; RV64ZCA: liveins: $x10 + ; RV64ZCA-NEXT: {{ $}} + ; RV64ZCA-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx) + ; RV64ZCA-NEXT: renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1) + ; RV64ZCA-NEXT: renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2) + ; RV64ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx) renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1) renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2) @@ -801,6 +1033,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: SW $x0, killed renamable $x10, 0 :: (store (s32) into %ir.a) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_value_no_opt ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} @@ -822,6 +1055,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: FSW killed renamable $f16_f, killed renamable $x10, 0 :: (store (s32) into %ir.a) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_value_float_no_opt ; RV64: liveins: $x10, $f16_f ; RV64-NEXT: {{ $}} @@ -843,6 +1077,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: FSD killed renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_value_double_no_opt ; RV64: liveins: $x10, $f16_d ; RV64-NEXT: {{ $}} @@ -865,6 +1100,7 @@ body: | ; RV32-NEXT: renamable $x10 = ADDI $x0, 1 ; RV32-NEXT: SW killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_ptr_no_opt ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -888,6 +1124,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: FSW killed renamable $f10_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_ptr_float_no_opt ; RV64: liveins: $x16, $f10_f ; RV64-NEXT: {{ $}} @@ -909,6 +1146,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: FSD killed renamable $f10_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_common_ptr_double_no_opt ; RV64: liveins: $x16, $f10_d ; RV64-NEXT: {{ $}} @@ -930,6 +1168,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: dead renamable $x10 = LW killed renamable $x16, 0 :: (volatile load (s32) from %ir.p) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: load_common_ptr_no_opt ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -951,6 +1190,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: renamable $f10_f = FLW killed renamable $x16, 0 :: (load (s32) from %ir.g) ; RV32-NEXT: PseudoRET implicit $f10_f + ; ; RV64-LABEL: name: load_common_ptr_float_no_opt ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -972,6 +1212,7 @@ body: | ; RV32-NEXT: {{ $}} ; RV32-NEXT: renamable $f10_d = FLD killed renamable $x16, 0 :: (load (s64) from %ir.g) ; RV32-NEXT: PseudoRET implicit $f10_d + ; ; RV64-LABEL: name: load_common_ptr_double_no_opt ; RV64: liveins: $x16 ; RV64-NEXT: {{ $}} @@ -996,6 +1237,7 @@ body: | ; RV32-NEXT: renamable $x11 = ADDI $x0, 3 ; RV32-NEXT: SW killed renamable $x11, killed renamable $x10, 404 :: (volatile store (s32) into %ir.1) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_large_offset_no_opt ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} @@ -1024,6 +1266,7 @@ body: | ; RV32-NEXT: FSW killed renamable $f10_f, renamable $x10, 400 :: (volatile store (s32) into %ir.0) ; RV32-NEXT: FSW killed renamable $f11_f, killed renamable $x10, 404 :: (volatile store (s32) into %ir.1) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_large_offset_float_no_opt ; RV64: liveins: $x10, $f10_f, $f11_f ; RV64-NEXT: {{ $}} @@ -1048,6 +1291,7 @@ body: | ; RV32-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0) ; RV32-NEXT: FSD killed renamable $f11_d, killed renamable $x10, 808 :: (volatile store (s64) into %ir.1) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: store_large_offset_double_no_opt ; RV64: liveins: $x10, $f10_d, $f11_d ; RV64-NEXT: {{ $}} @@ -1072,6 +1316,7 @@ body: | ; RV32-NEXT: dead renamable $x11 = LW renamable $x10, 400 :: (volatile load (s32) from %ir.0) ; RV32-NEXT: dead renamable $x10 = LW killed renamable $x10, 404 :: (volatile load (s32) from %ir.1) ; RV32-NEXT: PseudoRET + ; ; RV64-LABEL: name: load_large_offset_no_opt ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} @@ -1096,6 +1341,7 @@ body: | ; RV32-NEXT: renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx) ; RV32-NEXT: renamable $f11_f = FLW killed renamable $x10, 404 :: (load (s32) from %ir.arrayidx1) ; RV32-NEXT: PseudoRET implicit $f10_f, implicit $f11_f + ; ; RV64-LABEL: name: load_large_offset_float_no_opt ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} @@ -1120,6 +1366,7 @@ body: | ; RV32-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx) ; RV32-NEXT: renamable $f11_d = FLD killed renamable $x10, 808 :: (load (s64) from %ir.arrayidx1) ; RV32-NEXT: PseudoRET implicit $f10_d, implicit $f11_d + ; ; RV64-LABEL: name: load_large_offset_double_no_opt ; RV64: liveins: $x10 ; RV64-NEXT: {{ $}} From edd4aee4dd9b5b98b2576a6f783e4086173d902a Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Thu, 22 Feb 2024 15:57:57 +0800 Subject: [PATCH 02/19] [RISCV] Compute integers once in isSimpleVIDSequence. NFCI (#82590) We need to iterate through the integers twice in isSimpleVIDSequence, so instead of computing them twice just compute them once at the start. This also replaces the individual checks that each element is constant with a single call to BuildVectorSDNode::isConstant. --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 64 ++++++++++----------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 75be97ff32bbe5..cf0dc36a51b61b 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3242,44 +3242,47 @@ static std::optional getExactInteger(const APFloat &APF, // determine whether this is worth generating code for. static std::optional isSimpleVIDSequence(SDValue Op, unsigned EltSizeInBits) { - unsigned NumElts = Op.getNumOperands(); assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR"); + if (!cast(Op)->isConstant()) + return std::nullopt; bool IsInteger = Op.getValueType().isInteger(); std::optional SeqStepDenom; std::optional SeqStepNum, SeqAddend; std::optional> PrevElt; assert(EltSizeInBits >= Op.getValueType().getScalarSizeInBits()); - for (unsigned Idx = 0; Idx < NumElts; Idx++) { - // Assume undef elements match the sequence; we just have to be careful - // when interpolating across them. - if (Op.getOperand(Idx).isUndef()) - continue; - uint64_t Val; + // First extract the ops into a list of constant integer values. This may not + // be possible for floats if they're not all representable as integers. + SmallVector> Elts(Op.getNumOperands()); + const unsigned OpSize = Op.getScalarValueSizeInBits(); + for (auto [Idx, Elt] : enumerate(Op->op_values())) { + if (Elt.isUndef()) { + Elts[Idx] = std::nullopt; + continue; + } if (IsInteger) { - // The BUILD_VECTOR must be all constants. - if (!isa(Op.getOperand(Idx))) - return std::nullopt; - Val = Op.getConstantOperandVal(Idx) & - maskTrailingOnes(Op.getScalarValueSizeInBits()); + Elts[Idx] = Elt->getAsZExtVal() & maskTrailingOnes(OpSize); } else { - // The BUILD_VECTOR must be all constants. - if (!isa(Op.getOperand(Idx))) - return std::nullopt; - if (auto ExactInteger = getExactInteger( - cast(Op.getOperand(Idx))->getValueAPF(), - Op.getScalarValueSizeInBits())) - Val = *ExactInteger; - else + auto ExactInteger = + getExactInteger(cast(Elt)->getValueAPF(), OpSize); + if (!ExactInteger) return std::nullopt; + Elts[Idx] = *ExactInteger; } + } + + for (auto [Idx, Elt] : enumerate(Elts)) { + // Assume undef elements match the sequence; we just have to be careful + // when interpolating across them. + if (!Elt) + continue; if (PrevElt) { // Calculate the step since the last non-undef element, and ensure // it's consistent across the entire sequence. unsigned IdxDiff = Idx - PrevElt->second; - int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits); + int64_t ValDiff = SignExtend64(*Elt - PrevElt->first, EltSizeInBits); // A zero-value value difference means that we're somewhere in the middle // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a @@ -3309,8 +3312,8 @@ static std::optional isSimpleVIDSequence(SDValue Op, } // Record this non-undef element for later. - if (!PrevElt || PrevElt->first != Val) - PrevElt = std::make_pair(Val, Idx); + if (!PrevElt || PrevElt->first != *Elt) + PrevElt = std::make_pair(*Elt, Idx); } // We need to have logged a step for this to count as a legal index sequence. @@ -3319,21 +3322,12 @@ static std::optional isSimpleVIDSequence(SDValue Op, // Loop back through the sequence and validate elements we might have skipped // while waiting for a valid step. While doing this, log any sequence addend. - for (unsigned Idx = 0; Idx < NumElts; Idx++) { - if (Op.getOperand(Idx).isUndef()) + for (auto [Idx, Elt] : enumerate(Elts)) { + if (!Elt) continue; - uint64_t Val; - if (IsInteger) { - Val = Op.getConstantOperandVal(Idx) & - maskTrailingOnes(Op.getScalarValueSizeInBits()); - } else { - Val = *getExactInteger( - cast(Op.getOperand(Idx))->getValueAPF(), - Op.getScalarValueSizeInBits()); - } uint64_t ExpectedVal = (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom; - int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits); + int64_t Addend = SignExtend64(*Elt - ExpectedVal, EltSizeInBits); if (!SeqAddend) SeqAddend = Addend; else if (Addend != SeqAddend) From e899641df2391179e8ec29ca14c53b09ae7ce85c Mon Sep 17 00:00:00 2001 From: martinboehme Date: Thu, 22 Feb 2024 09:00:20 +0100 Subject: [PATCH 03/19] [clang][dataflow] Fix inaccuracies in `buildStmtToBasicBlockMap()`. (#82496) See the comments added to the code for details on the inaccuracies that have now been fixed. The patch adds tests that fail with the old implementation. --- .../FlowSensitive/ControlFlowContext.cpp | 31 +++- .../TypeErasedDataflowAnalysisTest.cpp | 143 ++++++++++++++---- 2 files changed, 140 insertions(+), 34 deletions(-) diff --git a/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp index c9ebffe6f37801..8aed19544be6a2 100644 --- a/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp +++ b/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp @@ -39,8 +39,35 @@ buildStmtToBasicBlockMap(const CFG &Cfg) { StmtToBlock[Stmt->getStmt()] = Block; } - if (const Stmt *TerminatorStmt = Block->getTerminatorStmt()) - StmtToBlock[TerminatorStmt] = Block; + } + // Some terminator conditions don't appear as a `CFGElement` anywhere else - + // for example, this is true if the terminator condition is a `&&` or `||` + // operator. + // We associate these conditions with the block the terminator appears in, + // but only if the condition has not already appeared as a regular + // `CFGElement`. (The `insert()` below does nothing if the key already exists + // in the map.) + for (const CFGBlock *Block : Cfg) { + if (Block != nullptr) + if (const Stmt *TerminatorCond = Block->getTerminatorCondition()) + StmtToBlock.insert({TerminatorCond, Block}); + } + // Terminator statements typically don't appear as a `CFGElement` anywhere + // else, so we want to associate them with the block that they terminate. + // However, there are some important special cases: + // - The conditional operator is a type of terminator, but it also appears + // as a regular `CFGElement`, and we want to associate it with the block + // in which it appears as a `CFGElement`. + // - The `&&` and `||` operators are types of terminators, but like the + // conditional operator, they can appear as a regular `CFGElement` or + // as a terminator condition (see above). + // We process terminators last to make sure that we only associate them with + // the block they terminate if they haven't previously occurred as a regular + // `CFGElement` or as a terminator condition. + for (const CFGBlock *Block : Cfg) { + if (Block != nullptr) + if (const Stmt *TerminatorStmt = Block->getTerminatorStmt()) + StmtToBlock.insert({TerminatorStmt, Block}); } return StmtToBlock; } diff --git a/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp b/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp index 3bca9cced8d6f7..34f9b0b23719fe 100644 --- a/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp @@ -77,17 +77,33 @@ class DataflowAnalysisTest : public Test { return runDataflowAnalysis(*CFCtx, Analysis, Env); } + /// Returns the `CFGBlock` containing `S` (and asserts that it exists). + const CFGBlock *blockForStmt(const Stmt &S) { + const CFGBlock *Block = CFCtx->getStmtToBlock().lookup(&S); + assert(Block != nullptr); + return Block; + } + template const StateT & blockStateForStmt(const std::vector> &BlockStates, - const Stmt *S) { - const CFGBlock *Block = CFCtx->getStmtToBlock().lookup(S); - assert(Block != nullptr); - const std::optional &MaybeState = BlockStates[Block->getBlockID()]; + const Stmt &S) { + const std::optional &MaybeState = + BlockStates[blockForStmt(S)->getBlockID()]; assert(MaybeState.has_value()); return *MaybeState; } + /// Returns the first node that matches `Matcher` (and asserts that the match + /// was successful, i.e. the returned node is not null). + template + const NodeT &matchNode(MatcherT Matcher) { + const auto *Node = selectFirst( + "node", match(Matcher.bind("node"), AST->getASTContext())); + assert(Node != nullptr); + return *Node; + } + std::unique_ptr AST; std::unique_ptr CFCtx; std::unique_ptr DACtx; @@ -130,6 +146,79 @@ TEST_F(DataflowAnalysisTest, DiagnoseFunctionDiagnoserCalledOnEachElement) { " (Lifetime ends)\n"))); } +// Tests for the statement-to-block map. +using StmtToBlockTest = DataflowAnalysisTest; + +TEST_F(StmtToBlockTest, ConditionalOperator) { + std::string Code = R"( + void target(bool b) { + int i = b ? 1 : 0; + } + )"; + ASSERT_THAT_ERROR(runAnalysis( + Code, [](ASTContext &C) { return NoopAnalysis(C); }) + .takeError(), + llvm::Succeeded()); + + const auto &IDecl = matchNode(declStmt(has(varDecl(hasName("i"))))); + const auto &ConditionalOp = + matchNode(conditionalOperator()); + + // The conditional operator should be associated with the same block as the + // `DeclStmt` for `i`. (Specifically, the conditional operator should not be + // associated with the block for which it is the terminator.) + EXPECT_EQ(blockForStmt(IDecl), blockForStmt(ConditionalOp)); +} + +TEST_F(StmtToBlockTest, LogicalAnd) { + std::string Code = R"( + void target(bool b1, bool b2) { + bool b = b1 && b2; + } + )"; + ASSERT_THAT_ERROR(runAnalysis( + Code, [](ASTContext &C) { return NoopAnalysis(C); }) + .takeError(), + llvm::Succeeded()); + + const auto &BDecl = matchNode(declStmt(has(varDecl(hasName("b"))))); + const auto &AndOp = + matchNode(binaryOperator(hasOperatorName("&&"))); + + // The `&&` operator should be associated with the same block as the + // `DeclStmt` for `b`. (Specifically, the `&&` operator should not be + // associated with the block for which it is the terminator.) + EXPECT_EQ(blockForStmt(BDecl), blockForStmt(AndOp)); +} + +TEST_F(StmtToBlockTest, IfStatementWithLogicalAnd) { + std::string Code = R"( + void target(bool b1, bool b2) { + if (b1 && b2) + ; + } + )"; + ASSERT_THAT_ERROR(runAnalysis( + Code, [](ASTContext &C) { return NoopAnalysis(C); }) + .takeError(), + llvm::Succeeded()); + + const auto &If = matchNode(ifStmt()); + const auto &B2 = + matchNode(declRefExpr(to(varDecl(hasName("b2"))))); + const auto &AndOp = + matchNode(binaryOperator(hasOperatorName("&&"))); + + // The if statement is the terminator for the block that contains both `b2` + // and the `&&` operator (which appears only as a terminator condition, not + // as a regular `CFGElement`). + const CFGBlock *IfBlock = blockForStmt(If); + const CFGBlock *B2Block = blockForStmt(B2); + const CFGBlock *AndOpBlock = blockForStmt(AndOp); + EXPECT_EQ(IfBlock, B2Block); + EXPECT_EQ(IfBlock, AndOpBlock); +} + // Tests that check we discard state for expressions correctly. using DiscardExprStateTest = DataflowAnalysisTest; @@ -144,25 +233,20 @@ TEST_F(DiscardExprStateTest, WhileStatement) { auto BlockStates = llvm::cantFail(runAnalysis( Code, [](ASTContext &C) { return NoopAnalysis(C); })); - auto *NotEqOp = selectFirst( - "op", match(binaryOperator(hasOperatorName("!=")).bind("op"), - AST->getASTContext())); - ASSERT_NE(NotEqOp, nullptr); - - auto *CallFoo = selectFirst( - "call", match(callExpr(callee(functionDecl(hasName("foo")))).bind("call"), - AST->getASTContext())); - ASSERT_NE(CallFoo, nullptr); + const auto &NotEqOp = + matchNode(binaryOperator(hasOperatorName("!="))); + const auto &CallFoo = + matchNode(callExpr(callee(functionDecl(hasName("foo"))))); // In the block that evaluates the expression `p != nullptr`, this expression // is associated with a value. const auto &NotEqOpState = blockStateForStmt(BlockStates, NotEqOp); - EXPECT_NE(NotEqOpState.Env.getValue(*NotEqOp), nullptr); + EXPECT_NE(NotEqOpState.Env.getValue(NotEqOp), nullptr); // In the block that calls `foo(p)`, the value for `p != nullptr` is discarded // because it is not consumed by this block. const auto &CallFooState = blockStateForStmt(BlockStates, CallFoo); - EXPECT_EQ(CallFooState.Env.getValue(*NotEqOp), nullptr); + EXPECT_EQ(CallFooState.Env.getValue(NotEqOp), nullptr); } TEST_F(DiscardExprStateTest, BooleanOperator) { @@ -174,29 +258,24 @@ TEST_F(DiscardExprStateTest, BooleanOperator) { auto BlockStates = llvm::cantFail(runAnalysis( Code, [](ASTContext &C) { return NoopAnalysis(C); })); - auto *AndOp = selectFirst( - "op", match(binaryOperator(hasOperatorName("&&")).bind("op"), - AST->getASTContext())); - ASSERT_NE(AndOp, nullptr); - - auto *Return = selectFirst( - "return", match(returnStmt().bind("return"), AST->getASTContext())); - ASSERT_NE(Return, nullptr); + const auto &AndOp = + matchNode(binaryOperator(hasOperatorName("&&"))); + const auto &Return = matchNode(returnStmt()); // In the block that evaluates the LHS of the `&&` operator, the LHS is // associated with a value, while the right-hand side is not (unsurprisingly, // as it hasn't been evaluated yet). - const auto &LHSState = blockStateForStmt(BlockStates, AndOp->getLHS()); - auto *LHSValue = cast(LHSState.Env.getValue(*AndOp->getLHS())); + const auto &LHSState = blockStateForStmt(BlockStates, *AndOp.getLHS()); + auto *LHSValue = cast(LHSState.Env.getValue(*AndOp.getLHS())); ASSERT_NE(LHSValue, nullptr); - EXPECT_EQ(LHSState.Env.getValue(*AndOp->getRHS()), nullptr); + EXPECT_EQ(LHSState.Env.getValue(*AndOp.getRHS()), nullptr); // In the block that evaluates the RHS, the RHS is associated with a // value. The value for the LHS has been discarded as it is not consumed by // this block. - const auto &RHSState = blockStateForStmt(BlockStates, AndOp->getRHS()); - EXPECT_EQ(RHSState.Env.getValue(*AndOp->getLHS()), nullptr); - auto *RHSValue = cast(RHSState.Env.getValue(*AndOp->getRHS())); + const auto &RHSState = blockStateForStmt(BlockStates, *AndOp.getRHS()); + EXPECT_EQ(RHSState.Env.getValue(*AndOp.getLHS()), nullptr); + auto *RHSValue = cast(RHSState.Env.getValue(*AndOp.getRHS())); ASSERT_NE(RHSValue, nullptr); // In the block that evaluates the return statement, the expression `b1 && b2` @@ -217,9 +296,9 @@ TEST_F(DiscardExprStateTest, BooleanOperator) { // operands, rather than from the environment for the block that contains the // `&&`. const auto &ReturnState = blockStateForStmt(BlockStates, Return); - EXPECT_EQ(ReturnState.Env.getValue(*AndOp->getLHS()), nullptr); - EXPECT_EQ(ReturnState.Env.getValue(*AndOp->getRHS()), nullptr); - EXPECT_EQ(ReturnState.Env.getValue(*AndOp), + EXPECT_EQ(ReturnState.Env.getValue(*AndOp.getLHS()), nullptr); + EXPECT_EQ(ReturnState.Env.getValue(*AndOp.getRHS()), nullptr); + EXPECT_EQ(ReturnState.Env.getValue(AndOp), &ReturnState.Env.makeAnd(*LHSValue, *RHSValue)); } From 8bd327d6fed5a4ae99bdbd039f5503700030cf53 Mon Sep 17 00:00:00 2001 From: Nick Anderson Date: Thu, 22 Feb 2024 00:47:36 -0800 Subject: [PATCH 04/19] [AMDGPU][GlobalISel] Add fdiv / sqrt to rsq combine (#78673) Fixes #64743 --- llvm/lib/Target/AMDGPU/AMDGPUCombine.td | 8 +- .../AMDGPU/AMDGPUPostLegalizerCombiner.cpp | 23 + .../GlobalISel/combine-fdiv-sqrt-to-rsq.mir | 584 ++++++++++++++++++ 3 files changed, 614 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td index b9411e2052120d..9218760538dc5d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td +++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td @@ -33,6 +33,12 @@ def rcp_sqrt_to_rsq : GICombineRule< [{ return matchRcpSqrtToRsq(*${rcp}, ${matchinfo}); }]), (apply [{ Helper.applyBuildFn(*${rcp}, ${matchinfo}); }])>; +def fdiv_by_sqrt_to_rsq_f16 : GICombineRule< + (defs root:$root), + (match (G_FSQRT f16:$sqrt, $x, (MIFlags FmContract)), + (G_FDIV f16:$dst, $y, $sqrt, (MIFlags FmContract)):$root, + [{ return matchFDivSqrtToRsqF16(*${root}); }]), + (apply [{ applyFDivSqrtToRsqF16(*${root}, ${x}.getReg()); }])>; def cvt_f32_ubyteN_matchdata : GIDefMatchData<"CvtF32UByteMatchInfo">; @@ -156,7 +162,7 @@ def AMDGPUPostLegalizerCombiner: GICombiner< "AMDGPUPostLegalizerCombinerImpl", [all_combines, gfx6gfx7_combines, gfx8_combines, uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg, - rcp_sqrt_to_rsq, sign_extension_in_reg, smulu64]> { + rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> { let CombineAllMethodName = "tryCombineAllImpl"; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp index a1c34e92a57f35..82e17ddad851fd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp @@ -83,6 +83,9 @@ class AMDGPUPostLegalizerCombinerImpl : public Combiner { matchRcpSqrtToRsq(MachineInstr &MI, std::function &MatchInfo) const; + bool matchFDivSqrtToRsqF16(MachineInstr &MI) const; + void applyFDivSqrtToRsqF16(MachineInstr &MI, const Register &X) const; + // FIXME: Should be able to have 2 separate matchdatas rather than custom // struct boilerplate. struct CvtF32UByteMatchInfo { @@ -334,6 +337,26 @@ bool AMDGPUPostLegalizerCombinerImpl::matchRcpSqrtToRsq( return false; } +bool AMDGPUPostLegalizerCombinerImpl::matchFDivSqrtToRsqF16( + MachineInstr &MI) const { + Register Sqrt = MI.getOperand(2).getReg(); + return MRI.hasOneNonDBGUse(Sqrt); +} + +void AMDGPUPostLegalizerCombinerImpl::applyFDivSqrtToRsqF16( + MachineInstr &MI, const Register &X) const { + Register Dst = MI.getOperand(0).getReg(); + Register Y = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + uint32_t Flags = MI.getFlags(); + Register RSQ = B.buildIntrinsic(Intrinsic::amdgcn_rsq, {DstTy}) + .addUse(X) + .setMIFlags(Flags) + .getReg(0); + B.buildFMul(Dst, RSQ, Y, Flags); + MI.eraseFromParent(); +} + bool AMDGPUPostLegalizerCombinerImpl::matchCvtF32UByteN( MachineInstr &MI, CvtF32UByteMatchInfo &MatchInfo) const { Register SrcReg = MI.getOperand(1).getReg(); diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir new file mode 100644 index 00000000000000..6c5339e36c77f4 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir @@ -0,0 +1,584 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s + +--- +name: rsq_f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f16 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16) + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT [[INT]](s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %one:_(s16) = G_FCONSTANT half 1.0 + %rsq:_(s16) = contract G_FDIV %one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: rsq_f16_missing_contract0 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f16_missing_contract0 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s16) = G_FSQRT %x + ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00 + ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = G_FSQRT %x + %one:_(s16) = G_FCONSTANT half 1.0 + %rsq:_(s16) = contract G_FDIV %one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: rsq_f16_missing_contract1 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f16_missing_contract1 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x + ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00 + ; GCN-NEXT: %rsq:_(s16) = G_FDIV %one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %one:_(s16) = G_FCONSTANT half 1.0 + %rsq:_(s16) = G_FDIV %one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: neg_rsq_f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: neg_rsq_f16 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16) + ; GCN-NEXT: %rsq:_(s16) = contract G_FNEG [[INT]] + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %neg_one:_(s16) = G_FCONSTANT half -1.0 + %rsq:_(s16) = contract G_FDIV %neg_one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: neg_rsq_f16_missing_contract0 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: neg_rsq_f16_missing_contract0 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s16) = G_FSQRT %x + ; GCN-NEXT: %neg_one:_(s16) = G_FCONSTANT half 0xHBC00 + ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %neg_one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = G_FSQRT %x + %neg_one:_(s16) = G_FCONSTANT half -1.0 + %rsq:_(s16) = contract G_FDIV %neg_one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: neg_rsq_f16_missing_contract1 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: neg_rsq_f16_missing_contract1 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x + ; GCN-NEXT: %neg_one:_(s16) = G_FCONSTANT half 0xHBC00 + ; GCN-NEXT: %rsq:_(s16) = G_FDIV %neg_one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %neg_one:_(s16) = G_FCONSTANT half -1.0 + %rsq:_(s16) = G_FDIV %neg_one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: rsq_f16_multi_use +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f16_multi_use + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x + ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00 + ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + ; GCN-NEXT: S_ENDPGM 0, implicit %sqrt(s16) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %one:_(s16) = G_FCONSTANT half 1.0 + %rsq:_(s16) = contract G_FDIV %one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + S_ENDPGM 0, implicit %sqrt + +... + +--- +name: rsq_f16_multi_use_missing_contract0 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f16_multi_use_missing_contract0 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s16) = G_FSQRT %x + ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00 + ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + ; GCN-NEXT: S_ENDPGM 0, implicit %sqrt(s16) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = G_FSQRT %x + %one:_(s16) = G_FCONSTANT half 1.0 + %rsq:_(s16) = contract G_FDIV %one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + S_ENDPGM 0, implicit %sqrt + +... + +--- +name: rsq_f16_multi_use_missing_contract1 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f16_multi_use_missing_contract1 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x + ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00 + ; GCN-NEXT: %rsq:_(s16) = G_FDIV %one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + ; GCN-NEXT: S_ENDPGM 0, implicit %sqrt(s16) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %one:_(s16) = G_FCONSTANT half 1.0 + %rsq:_(s16) = G_FDIV %one, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + S_ENDPGM 0, implicit %sqrt + +... + +--- +name: rsq_f32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f32 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: %x:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %sqrt:_(s32) = contract G_FSQRT %x + ; GCN-NEXT: %one:_(s32) = G_FCONSTANT float 1.000000e+00 + ; GCN-NEXT: %rsq:_(s32) = contract G_FDIV %one, %sqrt + ; GCN-NEXT: $vgpr0 = COPY %rsq(s32) + %x:_(s32) = COPY $vgpr0 + %sqrt:_(s32) = contract G_FSQRT %x + %one:_(s32) = G_FCONSTANT float 1.0 + %rsq:_(s32) = contract G_FDIV %one, %sqrt + $vgpr0 = COPY %rsq + +... + +--- +name: neg_rsq_f32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: neg_rsq_f32 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: %x:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %sqrt:_(s32) = contract G_FSQRT %x + ; GCN-NEXT: %neg_one:_(s32) = G_FCONSTANT float -1.000000e+00 + ; GCN-NEXT: %rsq:_(s32) = contract G_FDIV %neg_one, %sqrt + ; GCN-NEXT: $vgpr0 = COPY %rsq(s32) + %x:_(s32) = COPY $vgpr0 + %sqrt:_(s32) = contract G_FSQRT %x + %neg_one:_(s32) = G_FCONSTANT float -1.0 + %rsq:_(s32) = contract G_FDIV %neg_one, %sqrt + $vgpr0 = COPY %rsq + +... + +--- +name: afn_rsq_f32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: afn_rsq_f32 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: %x:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %sqrt:_(s32) = contract afn G_FSQRT %x + ; GCN-NEXT: %one:_(s32) = G_FCONSTANT float 1.000000e+00 + ; GCN-NEXT: %rsq:_(s32) = contract afn G_FDIV %one, %sqrt + ; GCN-NEXT: $vgpr0 = COPY %rsq(s32) + %x:_(s32) = COPY $vgpr0 + %sqrt:_(s32) = contract afn G_FSQRT %x + %one:_(s32) = G_FCONSTANT float 1.0 + %rsq:_(s32) = contract afn G_FDIV %one, %sqrt + $vgpr0 = COPY %rsq + +... + +--- +name: afn_rsq_f32_multi_use +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: afn_rsq_f32_multi_use + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: %x:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %sqrt:_(s32) = contract afn G_FSQRT %x + ; GCN-NEXT: %one:_(s32) = G_FCONSTANT float 1.000000e+00 + ; GCN-NEXT: %rsq:_(s32) = contract afn G_FDIV %one, %sqrt + ; GCN-NEXT: %ret:_(s32) = G_FSUB %sqrt, %rsq + ; GCN-NEXT: $vgpr0 = COPY %ret(s32) + %x:_(s32) = COPY $vgpr0 + %sqrt:_(s32) = contract afn G_FSQRT %x + %one:_(s32) = G_FCONSTANT float 1.0 + %rsq:_(s32) = contract afn G_FDIV %one, %sqrt + %ret:_(s32) = G_FSUB %sqrt, %rsq + $vgpr0 = COPY %ret + +... + +--- +name: afn_neg_rsq_f32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: afn_neg_rsq_f32 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: %x:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %sqrt:_(s32) = contract afn G_FSQRT %x + ; GCN-NEXT: %neg_one:_(s32) = G_FCONSTANT float -1.000000e+00 + ; GCN-NEXT: %rsq:_(s32) = contract afn G_FDIV %neg_one, %sqrt + ; GCN-NEXT: $vgpr0 = COPY %rsq(s32) + %x:_(s32) = COPY $vgpr0 + %sqrt:_(s32) = contract afn G_FSQRT %x + %neg_one:_(s32) = G_FCONSTANT float -1.0 + %rsq:_(s32) = contract afn G_FDIV %neg_one, %sqrt + $vgpr0 = COPY %rsq + +... + + +--- +name: rsq_f64 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_f64 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s64) = contract G_FSQRT %x + ; GCN-NEXT: %one:_(s64) = G_FCONSTANT double 1.000000e+00 + ; GCN-NEXT: %rsq:_(s64) = contract G_FDIV %one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s64) = G_ANYEXT %0:_(s32) + %sqrt:_(s64) = contract G_FSQRT %x + %one:_(s64) = G_FCONSTANT double 1.0 + %rsq:_(s64) = contract G_FDIV %one, %sqrt + %ext:_(s32) = G_TRUNC %rsq:_(s64) + $vgpr0 = COPY %ext + +... + +--- +name: neg_rsq_f64 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: neg_rsq_f64 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s64) = contract G_FSQRT %x + ; GCN-NEXT: %neg_one:_(s64) = G_FCONSTANT double -1.000000e+00 + ; GCN-NEXT: %rsq:_(s64) = contract G_FDIV %neg_one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s64) = G_ANYEXT %0:_(s32) + %sqrt:_(s64) = contract G_FSQRT %x + %neg_one:_(s64) = G_FCONSTANT double -1.0 + %rsq:_(s64) = contract G_FDIV %neg_one, %sqrt + %ext:_(s32) = G_TRUNC %rsq:_(s64) + $vgpr0 = COPY %ext + +... + +--- +name: afn_rsq_f64 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: afn_rsq_f64 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s64) = contract afn G_FSQRT %x + ; GCN-NEXT: %one:_(s64) = G_FCONSTANT double 1.000000e+00 + ; GCN-NEXT: %rsq:_(s64) = contract afn G_FDIV %one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s64) = G_ANYEXT %0:_(s32) + %sqrt:_(s64) = contract afn G_FSQRT %x + %one:_(s64) = G_FCONSTANT double 1.0 + %rsq:_(s64) = contract afn G_FDIV %one, %sqrt + %ext:_(s32) = G_TRUNC %rsq:_(s64) + $vgpr0 = COPY %ext + +... + +--- +name: afn_neg_rsq_f64 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: afn_neg_rsq_f64 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32) + ; GCN-NEXT: %sqrt:_(s64) = contract afn G_FSQRT %x + ; GCN-NEXT: %neg_one:_(s64) = G_FCONSTANT double -1.000000e+00 + ; GCN-NEXT: %rsq:_(s64) = contract afn G_FDIV %neg_one, %sqrt + ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s64) = G_ANYEXT %0:_(s32) + %sqrt:_(s64) = contract afn G_FSQRT %x + %neg_one:_(s64) = G_FCONSTANT double -1.0 + %rsq:_(s64) = contract afn G_FDIV %neg_one, %sqrt + %ext:_(s32) = G_TRUNC %rsq:_(s64) + $vgpr0 = COPY %ext + +... + + +--- +name: rsq_fract_num_f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_fract_num_f16 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %fract:_(s16) = G_FCONSTANT half 0xH3800 + ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16) + ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %fract + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %fract:_(s16) = G_FCONSTANT half 0.5 + %rsq:_(s16) = contract G_FDIV %fract, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: neg_rsq_fract_num_f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: neg_rsq_fract_num_f16 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %neg_fract:_(s16) = G_FCONSTANT half 0xHB800 + ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16) + ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %neg_fract + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %neg_fract:_(s16) = G_FCONSTANT half -0.5 + %rsq:_(s16) = contract G_FDIV %neg_fract, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + + +... + +--- +name: rsq_large_num_f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: rsq_large_num_f16 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %ten:_(s16) = G_FCONSTANT half 0xH4900 + ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16) + ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %ten + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %ten:_(s16) = G_FCONSTANT half 10.0 + %rsq:_(s16) = contract G_FDIV %ten, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... + +--- +name: neg_rsq_large_num_f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: neg_rsq_large_num_f16 + ; GCN: liveins: $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32) + ; GCN-NEXT: %neg_ten:_(s16) = G_FCONSTANT half 0xHC900 + ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16) + ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %neg_ten + ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16) + ; GCN-NEXT: $vgpr0 = COPY %ext(s32) + %0:_(s32) = COPY $vgpr0 + %x:_(s16) = G_TRUNC %0:_(s32) + %sqrt:_(s16) = contract G_FSQRT %x + %neg_ten:_(s16) = G_FCONSTANT half -10.0 + %rsq:_(s16) = contract G_FDIV %neg_ten, %sqrt + %ext:_(s32) = G_ANYEXT %rsq:_(s16) + $vgpr0 = COPY %ext + +... From fde344aef20bc4280f01294ac6e14a5c2db2d572 Mon Sep 17 00:00:00 2001 From: Matthias Springer Date: Thu, 22 Feb 2024 09:55:50 +0100 Subject: [PATCH 05/19] [mlir][Transforms] Dialect conversion: Improve signature conversion API (#81997) This commit improves the block signature conversion API of the dialect conversion. There is the following comment in `ArgConverter::applySignatureConversion`: ``` // If no arguments are being changed or added, there is nothing to do. ``` However, the implementation actually used to replace a block with a new block even if the block argument types do not change (i.e., there is "nothing to do"). This is fixed in this commit. The documentation of the public `ConversionPatternRewriter` API is updated accordingly. This commit also removes a check that used to *sometimes* skip a block signature conversion if the block was already converted. This is not consistent with the public `ConversionPatternRewriter` API; blocks should always be converted, regardless of whether they were already converted or not. Block signature conversion also used to be silently skipped when the specified block was detached. Instead of silently skipping, an assertion is triggered. Attempting to convert a detached block (which is likely an erased block) is invalid API usage. --- mlir/include/mlir/Transforms/DialectConversion.h | 12 +++++++++--- mlir/lib/Transforms/Utils/DialectConversion.cpp | 10 +++------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h index 0d7722aa07ee38..2575be4cdea1ac 100644 --- a/mlir/include/mlir/Transforms/DialectConversion.h +++ b/mlir/include/mlir/Transforms/DialectConversion.h @@ -663,6 +663,8 @@ class ConversionPatternRewriter final : public PatternRewriter { /// Apply a signature conversion to the entry block of the given region. This /// replaces the entry block with a new block containing the updated /// signature. The new entry block to the region is returned for convenience. + /// If no block argument types are changing, the entry original block will be + /// left in place and returned. /// /// If provided, `converter` will be used for any materializations. Block * @@ -671,8 +673,11 @@ class ConversionPatternRewriter final : public PatternRewriter { const TypeConverter *converter = nullptr); /// Convert the types of block arguments within the given region. This - /// replaces each block with a new block containing the updated signature. The - /// entry block may have a special conversion if `entryConversion` is + /// replaces each block with a new block containing the updated signature. If + /// an updated signature would match the current signature, the respective + /// block is left in place as is. + /// + /// The entry block may have a special conversion if `entryConversion` is /// provided. On success, the new entry block to the region is returned for /// convenience. Otherwise, failure is returned. FailureOr convertRegionTypes( @@ -681,7 +686,8 @@ class ConversionPatternRewriter final : public PatternRewriter { /// Convert the types of block arguments within the given region except for /// the entry region. This replaces each non-entry block with a new block - /// containing the updated signature. + /// containing the updated signature. If an updated signature would match the + /// current signature, the respective block is left in place as is. /// /// If special conversion behavior is needed for the non-entry blocks (for /// example, we need to convert only a subset of a BB arguments), such diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp index 4989ddc3ec94fb..afdd31a748c8c4 100644 --- a/mlir/lib/Transforms/Utils/DialectConversion.cpp +++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp @@ -544,12 +544,8 @@ FailureOr ArgConverter::convertSignature( Block *block, const TypeConverter *converter, ConversionValueMapping &mapping, SmallVectorImpl &argReplacements) { - // Check if the block was already converted. - // * If the block is mapped in `conversionInfo`, it is a converted block. - // * If the block is detached, conservatively assume that it is going to be - // deleted; it is likely the old block (before it was converted). - if (conversionInfo.count(block) || !block->getParent()) - return block; + assert(block->getParent() && "cannot convert signature of detached block"); + // If a converter wasn't provided, and the block wasn't already converted, // there is nothing we can do. if (!converter) @@ -570,7 +566,7 @@ Block *ArgConverter::applySignatureConversion( // If no arguments are being changed or added, there is nothing to do. unsigned origArgCount = block->getNumArguments(); auto convertedTypes = signatureConversion.getConvertedTypes(); - if (origArgCount == 0 && convertedTypes.empty()) + if (llvm::equal(block->getArgumentTypes(), convertedTypes)) return block; // Split the block at the beginning to get a new block to use for the updated From 25e7e8d993f12f391ad90d23b5c3e2385ebafc81 Mon Sep 17 00:00:00 2001 From: Antonio Frighetto Date: Tue, 20 Feb 2024 22:13:46 +0100 Subject: [PATCH 06/19] [CGP] Permit tail call optimization on undefined return value We may freely allow tail call optzs on undef values as well. Fixes: https://github.com/llvm/llvm-project/issues/82387. --- llvm/lib/CodeGen/CodeGenPrepare.cpp | 5 +- llvm/test/CodeGen/AArch64/addsub.ll | 6 +- .../CodeGen/AArch64/callbr-asm-obj-file.ll | 2 +- llvm/test/CodeGen/RISCV/pr51206.ll | 12 ++-- llvm/test/CodeGen/X86/tailcall-cgp-dup.ll | 58 ++++++++++++++++++- 5 files changed, 66 insertions(+), 17 deletions(-) diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 4036f18dbc6794..feefe87f406365 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -2686,8 +2686,9 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, attributesPermitTailCall(F, CI, RetI, *TLI)) { // Either we return void or the return value must be the first // argument of a known intrinsic or library function. - if (!V || (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) && - V == CI->getArgOperand(0))) { + if (!V || isa(V) || + (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) && + V == CI->getArgOperand(0))) { TailCallBBs.push_back(Pred); } } diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll index 1b86fe6c707c8e..20215fe9146924 100644 --- a/llvm/test/CodeGen/AArch64/addsub.ll +++ b/llvm/test/CodeGen/AArch64/addsub.ll @@ -662,17 +662,13 @@ define dso_local i32 @_extract_crng_crng() { ; CHECK-NEXT: cmn x8, #1272 ; CHECK-NEXT: b.pl .LBB36_3 ; CHECK-NEXT: .LBB36_2: // %if.then -; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, primary_crng ; CHECK-NEXT: ldr w8, [x8, :lo12:primary_crng] ; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: adrp x8, input_pool ; CHECK-NEXT: add x8, x8, :lo12:input_pool ; CHECK-NEXT: csel x0, xzr, x8, eq -; CHECK-NEXT: bl crng_reseed -; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: b crng_reseed ; CHECK-NEXT: .LBB36_3: // %if.end ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll index 94041bf00218ca..e601f03d524a4a 100644 --- a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll +++ b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll @@ -40,7 +40,7 @@ declare dso_local i32 @g(...) local_unnamed_addr declare dso_local i32 @i(...) local_unnamed_addr ; CHECK-LABEL: : -; CHECK: bl {{.*}} +; CHECK: b {{.*}} ; CHECK-LABEL: <$d.5>: ; CHECK-LABEL: <$x.6>: ; CHECK-NEXT: b {{.*}} diff --git a/llvm/test/CodeGen/RISCV/pr51206.ll b/llvm/test/CodeGen/RISCV/pr51206.ll index f54031af0de5e6..8aa145f6ac5efa 100644 --- a/llvm/test/CodeGen/RISCV/pr51206.ll +++ b/llvm/test/CodeGen/RISCV/pr51206.ll @@ -27,16 +27,12 @@ define signext i32 @wobble() nounwind { ; CHECK-NEXT: lui a2, %hi(global.3) ; CHECK-NEXT: li a3, 5 ; CHECK-NEXT: sw a1, %lo(global.3)(a2) -; CHECK-NEXT: bltu a0, a3, .LBB0_2 -; CHECK-NEXT: # %bb.1: # %bb10 -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; CHECK-NEXT: call quux -; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .LBB0_2: # %bb12 +; CHECK-NEXT: bgeu a0, a3, .LBB0_2 +; CHECK-NEXT: # %bb.1: # %bb12 ; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: ret +; CHECK-NEXT: .LBB0_2: # %bb10 +; CHECK-NEXT: tail quux bb: %tmp = load i8, ptr @global, align 1 %tmp1 = zext i8 %tmp to i32 diff --git a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll index 401ed9f7bc5a9e..8a9ee60f341c2b 100644 --- a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll +++ b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll @@ -339,7 +339,7 @@ return: define ptr @strcpy_illegal_tailc(ptr %dest, i64 %sz, ptr readonly returned %src) nounwind { ; CHECK-LABEL: strcpy_illegal_tailc: -; CHECK: ## %bb.0: +; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: movq %rdx, %rbx ; CHECK-NEXT: testq %rsi, %rsi @@ -351,6 +351,7 @@ define ptr @strcpy_illegal_tailc(ptr %dest, i64 %sz, ptr readonly returned %src) ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: retq +entry: %cmp = icmp eq i64 %sz, 0 br i1 %cmp, label %return, label %if.then @@ -362,8 +363,63 @@ return: ret ptr %src } +@i = global i32 0, align 4 + +define i32 @undef_tailc() nounwind { +; CHECK-LABEL: undef_tailc: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: cmpl $0, _i(%rip) +; CHECK-NEXT: jne _qux ## TAILCALL +; CHECK-NEXT: ## %bb.1: ## %return +; CHECK-NEXT: retq +entry: + %val = load i32, ptr @i, align 4 + %cmp = icmp eq i32 %val, 0 + br i1 %cmp, label %return, label %if.then + +if.then: + %rv_unused = tail call i32 @qux() + br label %return + +return: + ret i32 undef +} + +define i32 @undef_and_known_tailc() nounwind { +; CHECK-LABEL: undef_and_known_tailc: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: movl _i(%rip), %eax +; CHECK-NEXT: cmpl $5, %eax +; CHECK-NEXT: je _qux ## TAILCALL +; CHECK-NEXT: ## %bb.1: ## %entry +; CHECK-NEXT: cmpl $2, %eax +; CHECK-NEXT: je _quux ## TAILCALL +; CHECK-NEXT: ## %bb.2: ## %return +; CHECK-NEXT: retq +entry: + %val = load i32, ptr @i, align 4 + switch i32 %val, label %return [ + i32 2, label %case_2 + i32 5, label %case_5 + ] + +case_2: + %rv_unused = tail call i32 @quux() + br label %return + +case_5: + %rv = tail call i32 @qux() + br label %return + +return: + %phi = phi i32 [ undef, %case_2 ], [ %rv, %case_5 ], [ undef, %entry ] + ret i32 %phi +} + declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1) declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) declare noalias ptr @malloc(i64) declare ptr @strcpy(ptr noalias returned writeonly, ptr noalias nocapture readonly) declare ptr @baz(ptr, ptr) +declare i32 @qux() +declare i32 @quux() From c5253aa136ac6ba683b367b2bae0dde1a543d1df Mon Sep 17 00:00:00 2001 From: CarolineConcatto Date: Thu, 22 Feb 2024 09:19:48 +0000 Subject: [PATCH 07/19] [AArch64] Restore Z-registers before P-registers (#79623) (#82492) This is needed by PR#77665[1] that uses a P-register while restoring Z-registers. The reverse for SVE register restore in the epilogue was added to guarantee performance, but further work was done to improve sve frame restore and besides that the schedule also may change the order of the restore, undoing the reverse restore. This also fix the problem reported in (PR #79623) on Windows with std::reverse and .base(). [1]https://github.com/llvm/llvm-project/pull/77665 --- .../Target/AArch64/AArch64FrameLowering.cpp | 19 ++-- .../framelayout-sve-calleesaves-fix.mir | 2 +- llvm/test/CodeGen/AArch64/framelayout-sve.mir | 24 ++--- .../sme-streaming-compatible-interface.ll | 32 +++---- .../AArch64/sme-streaming-interface.ll | 32 +++---- .../CodeGen/AArch64/sme2-intrinsics-ld1.ll | 32 +++---- .../CodeGen/AArch64/sme2-intrinsics-ldnt1.ll | 32 +++---- .../test/CodeGen/AArch64/stack-probing-sve.ll | 4 +- llvm/test/CodeGen/AArch64/sve-alloca.ll | 16 ++-- .../AArch64/sve-calling-convention-mixed.ll | 32 +++---- llvm/test/CodeGen/AArch64/sve-tailcall.ll | 32 +++---- llvm/test/CodeGen/AArch64/unwind-preserved.ll | 96 +++++++++---------- 12 files changed, 177 insertions(+), 176 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 3485edb69c910c..503b1c199650ff 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -3195,11 +3195,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters( return MIB->getIterator(); }; - // SVE objects are always restored in reverse order. - for (const RegPairInfo &RPI : reverse(RegPairs)) - if (RPI.isScalable()) - EmitMI(RPI); - if (homogeneousPrologEpilog(MF, &MBB)) { auto MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::HOM_Epilog)) .setMIFlag(MachineInstr::FrameDestroy); @@ -3210,11 +3205,19 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters( return true; } + // For performance reasons restore SVE register in increasing order + auto IsPPR = [](const RegPairInfo &c) { return c.Type == RegPairInfo::PPR; }; + auto PPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsPPR); + auto PPREnd = std::find_if_not(PPRBegin, RegPairs.end(), IsPPR); + std::reverse(PPRBegin, PPREnd); + auto IsZPR = [](const RegPairInfo &c) { return c.Type == RegPairInfo::ZPR; }; + auto ZPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsZPR); + auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.end(), IsZPR); + std::reverse(ZPRBegin, ZPREnd); + if (ReverseCSRRestoreSeq) { MachineBasicBlock::iterator First = MBB.end(); for (const RegPairInfo &RPI : reverse(RegPairs)) { - if (RPI.isScalable()) - continue; MachineBasicBlock::iterator It = EmitMI(RPI); if (First == MBB.end()) First = It; @@ -3223,8 +3226,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters( MBB.splice(MBBI, &MBB, First); } else { for (const RegPairInfo &RPI : RegPairs) { - if (RPI.isScalable()) - continue; (void)EmitMI(RPI); } } diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir index 3dba21d59b4087..aed31450736191 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir @@ -19,8 +19,8 @@ ; CHECK-NEXT: // implicit-def: $p4 ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG - ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload + ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: .cfi_restore z8 diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir index 213d7919e4a727..f7920e595e44ba 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir @@ -772,9 +772,9 @@ body: | # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 +# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 # CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 -# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 +# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8 @@ -873,14 +873,14 @@ body: | # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK: $p15 = frame-destroy LDR_PXI $sp, 4 -# CHECK: $p14 = frame-destroy LDR_PXI $sp, 5 -# CHECK: $p5 = frame-destroy LDR_PXI $sp, 14 -# CHECK: $p4 = frame-destroy LDR_PXI $sp, 15 # CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2 # CHECK: $z22 = frame-destroy LDR_ZXI $sp, 3 # CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16 # CHECK: $z8 = frame-destroy LDR_ZXI $sp, 17 +# CHECK: $p15 = frame-destroy LDR_PXI $sp, 4 +# CHECK: $p14 = frame-destroy LDR_PXI $sp, 5 +# CHECK: $p5 = frame-destroy LDR_PXI $sp, 14 +# CHECK: $p4 = frame-destroy LDR_PXI $sp, 15 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 18 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8 @@ -1037,14 +1037,14 @@ body: | # CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]] # CHECK: $sp = frame-destroy ADDVL_XXI $fp, -18 +# CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2 +# CHECK-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 3 +# CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16 +# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 17 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 4 # CHECK-NEXT: $p14 = frame-destroy LDR_PXI $sp, 5 # CHECK: $p5 = frame-destroy LDR_PXI $sp, 14 # CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 15 -# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 2 -# CHECK-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 3 -# CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16 -# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 17 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10 @@ -1198,10 +1198,10 @@ body: | # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 7 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6 -# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7 # CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 +# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6 +# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8 diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll index 296f2be9cfee5e..6d2abf7e18419a 100644 --- a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll +++ b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll @@ -226,30 +226,30 @@ define @streaming_compatible_with_scalable_vectors( @streaming_compatible_with_predicate_vectors( @smstart_clobber_sve( %x) nounwind { ; CHECK-NEXT: smstop sm ; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload @@ -267,30 +267,30 @@ define @smstart_clobber_sve_duplicate( %x) ; CHECK-NEXT: smstop sm ; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll index b7119fc0825673..ea7808d73093e6 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll @@ -129,7 +129,6 @@ define @ld1_x2_i8_z0_z8( %unused, @ld1_x2_i8_z0_z8( %unused, @ld1_x2_i8_z0_z8_scalar( %unused, @ld1_x2_i8_z0_z8_scalar( %unused, @ld1_x2_i16_z0_z8( %unused, @ld1_x2_i16_z0_z8( %unused, @ld1_x2_i16_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -611,6 +610,7 @@ define @ld1_x2_i16_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -751,7 +751,6 @@ define @ld1_x2_i32_z0_z8( %unused, @ld1_x2_i32_z0_z8( %unused, @ld1_x2_i32_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -922,6 +921,7 @@ define @ld1_x2_i32_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1062,7 +1062,6 @@ define @ld1_x2_i64_z0_z8( %unused, @ld1_x2_i64_z0_z8( %unused, @ld1_x2_i64_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1233,6 +1232,7 @@ define @ld1_x2_i64_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1380,7 +1380,6 @@ define @ld1_x4_i8_z0_z4_z8_z12( %unused, @ld1_x4_i8_z0_z4_z8_z12( %unused, @ld1_x4_i8_z0_z4_z8_z12_scalar( %unu ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1560,6 +1559,7 @@ define @ld1_x4_i8_z0_z4_z8_z12_scalar( %unu ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1711,7 +1711,6 @@ define @ld1_x4_i16_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1726,6 +1725,7 @@ define @ld1_x4_i16_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1877,7 +1877,6 @@ define @ld1_x4_i16_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1892,6 +1891,7 @@ define @ld1_x4_i16_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -2043,7 +2043,6 @@ define @ld1_x4_i32_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -2058,6 +2057,7 @@ define @ld1_x4_i32_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -2209,7 +2209,6 @@ define @ld1_x4_i32_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -2224,6 +2223,7 @@ define @ld1_x4_i32_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -2375,7 +2375,6 @@ define @ld1_x4_i64_z0_z4_z8_z12( %unused, < ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -2390,6 +2389,7 @@ define @ld1_x4_i64_z0_z4_z8_z12( %unused, < ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -2541,7 +2541,6 @@ define @ld1_x4_i64_z0_z4_z8_z12_scalar( %un ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -2556,6 +2555,7 @@ define @ld1_x4_i64_z0_z4_z8_z12_scalar( %un ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll index 1fb251a4f628e9..7e2d28fbf79828 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll @@ -82,7 +82,6 @@ define @ldnt1_x2_i8_z0_z8( %unused, @ldnt1_x2_i8_z0_z8( %unused, @ldnt1_x2_i8_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -206,6 +205,7 @@ define @ldnt1_x2_i8_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -299,7 +299,6 @@ define @ldnt1_x2_i16_z0_z8( %unused, @ldnt1_x2_i16_z0_z8( %unused, @ldnt1_x2_i16_z0_z8_scalar( %unused ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -423,6 +422,7 @@ define @ldnt1_x2_i16_z0_z8_scalar( %unused ; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -516,7 +516,6 @@ define @ldnt1_x2_i32_z0_z8( %unused, @ldnt1_x2_i32_z0_z8( %unused, @ldnt1_x2_i32_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -640,6 +639,7 @@ define @ldnt1_x2_i32_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -733,7 +733,6 @@ define @ldnt1_x2_i64_z0_z8( %unused, @ldnt1_x2_i64_z0_z8( %unused, @ldnt1_x2_i64_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -857,6 +856,7 @@ define @ldnt1_x2_i64_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -955,7 +955,6 @@ define @ldnt1_x4_i8_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -970,6 +969,7 @@ define @ldnt1_x4_i8_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1071,7 +1071,6 @@ define @ldnt1_x4_i8_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1086,6 +1085,7 @@ define @ldnt1_x4_i8_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1188,7 +1188,6 @@ define @ldnt1_x4_i16_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1203,6 +1202,7 @@ define @ldnt1_x4_i16_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1304,7 +1304,6 @@ define @ldnt1_x4_i16_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1319,6 +1318,7 @@ define @ldnt1_x4_i16_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1421,7 +1421,6 @@ define @ldnt1_x4_i32_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1436,6 +1435,7 @@ define @ldnt1_x4_i32_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1537,7 +1537,6 @@ define @ldnt1_x4_i32_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1552,6 +1551,7 @@ define @ldnt1_x4_i32_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1654,7 +1654,6 @@ define @ldnt1_x4_i64_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1669,6 +1668,7 @@ define @ldnt1_x4_i64_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret @@ -1770,7 +1770,6 @@ define @ldnt1_x4_i64_z0_z4_z8_z12_scalar( % ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1785,6 +1784,7 @@ define @ldnt1_x4_i64_z0_z4_z8_z12_scalar( % ; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CONTIGUOUS-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll index 1ad78709d5012d..56d865ef83e6bc 100644 --- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll +++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll @@ -380,7 +380,6 @@ define void @sve_16v_1p_csr( %a) #0 { ; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP -; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload @@ -397,6 +396,7 @@ define void @sve_16v_1p_csr( %a) #0 { ; CHECK-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #17 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: .cfi_restore z8 @@ -697,10 +697,10 @@ define void @sve_unprobed_area( %a, i32 %n) #0 { ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG -; CHECK-NEXT: ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: .cfi_restore z8 diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll index 47e49b84aaaffb..d227538043fceb 100644 --- a/llvm/test/CodeGen/AArch64/sve-alloca.ll +++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll @@ -66,30 +66,30 @@ define void @foo( %dst, i1 %cond) { ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: bl bar ; CHECK-NEXT: addvl sp, x29, #-18 -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll index 9851583b950eba..3965af6a9066d6 100644 --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll @@ -567,30 +567,30 @@ define @sve_caller_non_sve_callee_high_range( @sve_ret_caller_non_sve_callee_high_range() { ; CHECK-NEXT: fmov s7, #7.00000000 ; CHECK-NEXT: bl non_sve_callee_high_range ; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/sve-tailcall.ll b/llvm/test/CodeGen/AArch64/sve-tailcall.ll index f32c80d392b633..4ddf007768fd2c 100644 --- a/llvm/test/CodeGen/AArch64/sve-tailcall.ll +++ b/llvm/test/CodeGen/AArch64/sve-tailcall.ll @@ -83,30 +83,30 @@ define i32 @sve_caller_non_sve_callee( %arg) nounwind { ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: bl non_sve_callee -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload @@ -158,30 +158,30 @@ define i32 @sve_caller_non_sve_callee_fastcc( %arg) nounwind { ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: bl non_sve_callee -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll index f3c4d217e6fcaa..822be14faaeb1f 100644 --- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll +++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll @@ -63,18 +63,6 @@ define @invoke_callee_may_throw_sve( %v) uw ; CHECK-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -91,6 +79,18 @@ define @invoke_callee_may_throw_sve( %v) uw ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: .cfi_restore z8 @@ -112,18 +112,6 @@ define @invoke_callee_may_throw_sve( %v) uw ; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG -; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -140,6 +128,18 @@ define @invoke_callee_may_throw_sve( %v) uw ; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: .cfi_restore z8 @@ -215,18 +215,6 @@ define @invoke_callee_may_throw_sve( %v) uw ; GISEL-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 ; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG -; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -243,6 +231,18 @@ define @invoke_callee_may_throw_sve( %v) uw ; GISEL-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #18 ; GISEL-NEXT: .cfi_def_cfa wsp, 16 ; GISEL-NEXT: .cfi_restore z8 @@ -264,18 +264,6 @@ define @invoke_callee_may_throw_sve( %v) uw ; GISEL-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 ; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG -; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload -; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -292,6 +280,18 @@ define @invoke_callee_may_throw_sve( %v) uw ; GISEL-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #18 ; GISEL-NEXT: .cfi_def_cfa wsp, 16 ; GISEL-NEXT: .cfi_restore z8 From 55558cd05c998f1b287b0af97aa6db0db0bdfaa0 Mon Sep 17 00:00:00 2001 From: Matthias Springer Date: Thu, 22 Feb 2024 10:22:27 +0100 Subject: [PATCH 08/19] [mlir][Transforms][NFC] Turn block type conversion into `IRRewrite` (#81756) This commit is a refactoring of the dialect conversion. The dialect conversion maintains a list of "IR rewrites" that can be committed (upon success) or rolled back (upon failure). Until now, the signature conversion of a block was only a "partial" IR rewrite. Rollbacks were triggered via `BlockTypeConversionRewrite::rollback`, but there was no `BlockTypeConversionRewrite::commit` equivalent. Overview of changes: * Remove `ArgConverter`, an internal helper class that kept track of all block type conversions. There is now a separate `BlockTypeConversionRewrite` for each block type conversion. * No more special handling for block type conversions. They are now normal "IR rewrites", just like "block creation" or "block movement". In particular, trigger "commits" of block type conversion via `BlockTypeConversionRewrite::commit`. * Remove `ArgConverter::notifyOpRemoved`. This function was used to inform the `ArgConverter` that an operation was erased, to prevent a double-free of operations in certain situations. It would be unpractical to add a `notifyOpRemoved` API to `IRRewrite`. Instead, erasing ops/block should go through a new `SingleEraseRewriter` (that is owned by the `ConversionPatternRewriterImpl`) if there is chance of double-free. This rewriter ignores `eraseOp`/`eraseBlock` if the op/block was already freed. --- .../Transforms/Utils/DialectConversion.cpp | 794 ++++++++---------- 1 file changed, 364 insertions(+), 430 deletions(-) diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp index afdd31a748c8c4..db41b9f19e7e8d 100644 --- a/mlir/lib/Transforms/Utils/DialectConversion.cpp +++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp @@ -154,12 +154,13 @@ namespace { struct RewriterState { RewriterState(unsigned numCreatedOps, unsigned numUnresolvedMaterializations, unsigned numReplacements, unsigned numArgReplacements, - unsigned numRewrites, unsigned numIgnoredOperations) + unsigned numRewrites, unsigned numIgnoredOperations, + unsigned numErased) : numCreatedOps(numCreatedOps), numUnresolvedMaterializations(numUnresolvedMaterializations), numReplacements(numReplacements), numArgReplacements(numArgReplacements), numRewrites(numRewrites), - numIgnoredOperations(numIgnoredOperations) {} + numIgnoredOperations(numIgnoredOperations), numErased(numErased) {} /// The current number of created operations. unsigned numCreatedOps; @@ -178,6 +179,9 @@ struct RewriterState { /// The current number of ignored operations. unsigned numIgnoredOperations; + + /// The current number of erased operations/blocks. + unsigned numErased; }; //===----------------------------------------------------------------------===// @@ -292,370 +296,6 @@ static Value buildUnresolvedTargetMaterialization( outputType, outputType, converter, unresolvedMaterializations); } -//===----------------------------------------------------------------------===// -// ArgConverter -//===----------------------------------------------------------------------===// -namespace { -/// This class provides a simple interface for converting the types of block -/// arguments. This is done by creating a new block that contains the new legal -/// types and extracting the block that contains the old illegal types to allow -/// for undoing pending rewrites in the case of failure. -struct ArgConverter { - ArgConverter( - PatternRewriter &rewriter, - SmallVectorImpl &unresolvedMaterializations) - : rewriter(rewriter), - unresolvedMaterializations(unresolvedMaterializations) {} - - /// This structure contains the information pertaining to an argument that has - /// been converted. - struct ConvertedArgInfo { - ConvertedArgInfo(unsigned newArgIdx, unsigned newArgSize, - Value castValue = nullptr) - : newArgIdx(newArgIdx), newArgSize(newArgSize), castValue(castValue) {} - - /// The start index of in the new argument list that contains arguments that - /// replace the original. - unsigned newArgIdx; - - /// The number of arguments that replaced the original argument. - unsigned newArgSize; - - /// The cast value that was created to cast from the new arguments to the - /// old. This only used if 'newArgSize' > 1. - Value castValue; - }; - - /// This structure contains information pertaining to a block that has had its - /// signature converted. - struct ConvertedBlockInfo { - ConvertedBlockInfo(Block *origBlock, const TypeConverter *converter) - : origBlock(origBlock), converter(converter) {} - - /// The original block that was requested to have its signature converted. - Block *origBlock; - - /// The conversion information for each of the arguments. The information is - /// std::nullopt if the argument was dropped during conversion. - SmallVector, 1> argInfo; - - /// The type converter used to convert the arguments. - const TypeConverter *converter; - }; - - //===--------------------------------------------------------------------===// - // Rewrite Application - //===--------------------------------------------------------------------===// - - /// Erase any rewrites registered for the blocks within the given operation - /// which is about to be removed. This merely drops the rewrites without - /// undoing them. - void notifyOpRemoved(Operation *op); - - /// Cleanup and undo any generated conversions for the arguments of block. - /// This method replaces the new block with the original, reverting the IR to - /// its original state. - void discardRewrites(Block *block); - - /// Fully replace uses of the old arguments with the new. - void applyRewrites(ConversionValueMapping &mapping); - - /// Materialize any necessary conversions for converted arguments that have - /// live users, using the provided `findLiveUser` to search for a user that - /// survives the conversion process. - LogicalResult - materializeLiveConversions(ConversionValueMapping &mapping, - OpBuilder &builder, - function_ref findLiveUser); - - //===--------------------------------------------------------------------===// - // Conversion - //===--------------------------------------------------------------------===// - - /// Attempt to convert the signature of the given block, if successful a new - /// block is returned containing the new arguments. Returns `block` if it did - /// not require conversion. - FailureOr - convertSignature(Block *block, const TypeConverter *converter, - ConversionValueMapping &mapping, - SmallVectorImpl &argReplacements); - - /// Apply the given signature conversion on the given block. The new block - /// containing the updated signature is returned. If no conversions were - /// necessary, e.g. if the block has no arguments, `block` is returned. - /// `converter` is used to generate any necessary cast operations that - /// translate between the origin argument types and those specified in the - /// signature conversion. - Block *applySignatureConversion( - Block *block, const TypeConverter *converter, - TypeConverter::SignatureConversion &signatureConversion, - ConversionValueMapping &mapping, - SmallVectorImpl &argReplacements); - - /// A collection of blocks that have had their arguments converted. This is a - /// map from the new replacement block, back to the original block. - llvm::MapVector conversionInfo; - - /// The pattern rewriter to use when materializing conversions. - PatternRewriter &rewriter; - - /// An ordered set of unresolved materializations during conversion. - SmallVectorImpl &unresolvedMaterializations; -}; -} // namespace - -//===----------------------------------------------------------------------===// -// Rewrite Application - -void ArgConverter::notifyOpRemoved(Operation *op) { - if (conversionInfo.empty()) - return; - - for (Region ®ion : op->getRegions()) { - for (Block &block : region) { - // Drop any rewrites from within. - for (Operation &nestedOp : block) - if (nestedOp.getNumRegions()) - notifyOpRemoved(&nestedOp); - - // Check if this block was converted. - auto *it = conversionInfo.find(&block); - if (it == conversionInfo.end()) - continue; - - // Drop all uses of the original arguments and delete the original block. - Block *origBlock = it->second.origBlock; - for (BlockArgument arg : origBlock->getArguments()) - arg.dropAllUses(); - conversionInfo.erase(it); - } - } -} - -void ArgConverter::discardRewrites(Block *block) { - auto *it = conversionInfo.find(block); - if (it == conversionInfo.end()) - return; - Block *origBlock = it->second.origBlock; - - // Drop all uses of the new block arguments and replace uses of the new block. - for (int i = block->getNumArguments() - 1; i >= 0; --i) - block->getArgument(i).dropAllUses(); - block->replaceAllUsesWith(origBlock); - - // Move the operations back the original block, move the original block back - // into its original location and the delete the new block. - origBlock->getOperations().splice(origBlock->end(), block->getOperations()); - block->getParent()->getBlocks().insert(Region::iterator(block), origBlock); - block->erase(); - - conversionInfo.erase(it); -} - -void ArgConverter::applyRewrites(ConversionValueMapping &mapping) { - for (auto &info : conversionInfo) { - ConvertedBlockInfo &blockInfo = info.second; - Block *origBlock = blockInfo.origBlock; - - // Process the remapping for each of the original arguments. - for (unsigned i = 0, e = origBlock->getNumArguments(); i != e; ++i) { - std::optional &argInfo = blockInfo.argInfo[i]; - BlockArgument origArg = origBlock->getArgument(i); - - // Handle the case of a 1->0 value mapping. - if (!argInfo) { - if (Value newArg = mapping.lookupOrNull(origArg, origArg.getType())) - origArg.replaceAllUsesWith(newArg); - continue; - } - - // Otherwise this is a 1->1+ value mapping. - Value castValue = argInfo->castValue; - assert(argInfo->newArgSize >= 1 && castValue && "expected 1->1+ mapping"); - - // If the argument is still used, replace it with the generated cast. - if (!origArg.use_empty()) { - origArg.replaceAllUsesWith( - mapping.lookupOrDefault(castValue, origArg.getType())); - } - } - - delete origBlock; - blockInfo.origBlock = nullptr; - } -} - -LogicalResult ArgConverter::materializeLiveConversions( - ConversionValueMapping &mapping, OpBuilder &builder, - function_ref findLiveUser) { - for (auto &info : conversionInfo) { - Block *newBlock = info.first; - ConvertedBlockInfo &blockInfo = info.second; - Block *origBlock = blockInfo.origBlock; - - // Process the remapping for each of the original arguments. - for (unsigned i = 0, e = origBlock->getNumArguments(); i != e; ++i) { - // If the type of this argument changed and the argument is still live, we - // need to materialize a conversion. - BlockArgument origArg = origBlock->getArgument(i); - if (mapping.lookupOrNull(origArg, origArg.getType())) - continue; - Operation *liveUser = findLiveUser(origArg); - if (!liveUser) - continue; - - Value replacementValue = mapping.lookupOrDefault(origArg); - bool isDroppedArg = replacementValue == origArg; - if (isDroppedArg) - rewriter.setInsertionPointToStart(newBlock); - else - rewriter.setInsertionPointAfterValue(replacementValue); - Value newArg; - if (blockInfo.converter) { - newArg = blockInfo.converter->materializeSourceConversion( - rewriter, origArg.getLoc(), origArg.getType(), - isDroppedArg ? ValueRange() : ValueRange(replacementValue)); - assert((!newArg || newArg.getType() == origArg.getType()) && - "materialization hook did not provide a value of the expected " - "type"); - } - if (!newArg) { - InFlightDiagnostic diag = - emitError(origArg.getLoc()) - << "failed to materialize conversion for block argument #" << i - << " that remained live after conversion, type was " - << origArg.getType(); - if (!isDroppedArg) - diag << ", with target type " << replacementValue.getType(); - diag.attachNote(liveUser->getLoc()) - << "see existing live user here: " << *liveUser; - return failure(); - } - mapping.map(origArg, newArg); - } - } - return success(); -} - -//===----------------------------------------------------------------------===// -// Conversion - -FailureOr ArgConverter::convertSignature( - Block *block, const TypeConverter *converter, - ConversionValueMapping &mapping, - SmallVectorImpl &argReplacements) { - assert(block->getParent() && "cannot convert signature of detached block"); - - // If a converter wasn't provided, and the block wasn't already converted, - // there is nothing we can do. - if (!converter) - return failure(); - - // Try to convert the signature for the block with the provided converter. - if (auto conversion = converter->convertBlockSignature(block)) - return applySignatureConversion(block, converter, *conversion, mapping, - argReplacements); - return failure(); -} - -Block *ArgConverter::applySignatureConversion( - Block *block, const TypeConverter *converter, - TypeConverter::SignatureConversion &signatureConversion, - ConversionValueMapping &mapping, - SmallVectorImpl &argReplacements) { - // If no arguments are being changed or added, there is nothing to do. - unsigned origArgCount = block->getNumArguments(); - auto convertedTypes = signatureConversion.getConvertedTypes(); - if (llvm::equal(block->getArgumentTypes(), convertedTypes)) - return block; - - // Split the block at the beginning to get a new block to use for the updated - // signature. - Block *newBlock = block->splitBlock(block->begin()); - block->replaceAllUsesWith(newBlock); - // Unlink the block, but do not erase it yet, so that the change can be rolled - // back. - block->getParent()->getBlocks().remove(block); - - // Map all new arguments to the location of the argument they originate from. - SmallVector newLocs(convertedTypes.size(), - rewriter.getUnknownLoc()); - for (unsigned i = 0; i < origArgCount; ++i) { - auto inputMap = signatureConversion.getInputMapping(i); - if (!inputMap || inputMap->replacementValue) - continue; - Location origLoc = block->getArgument(i).getLoc(); - for (unsigned j = 0; j < inputMap->size; ++j) - newLocs[inputMap->inputNo + j] = origLoc; - } - - SmallVector newArgRange( - newBlock->addArguments(convertedTypes, newLocs)); - ArrayRef newArgs(newArgRange); - - // Remap each of the original arguments as determined by the signature - // conversion. - ConvertedBlockInfo info(block, converter); - info.argInfo.resize(origArgCount); - - OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPointToStart(newBlock); - for (unsigned i = 0; i != origArgCount; ++i) { - auto inputMap = signatureConversion.getInputMapping(i); - if (!inputMap) - continue; - BlockArgument origArg = block->getArgument(i); - - // If inputMap->replacementValue is not nullptr, then the argument is - // dropped and a replacement value is provided to be the remappedValue. - if (inputMap->replacementValue) { - assert(inputMap->size == 0 && - "invalid to provide a replacement value when the argument isn't " - "dropped"); - mapping.map(origArg, inputMap->replacementValue); - argReplacements.push_back(origArg); - continue; - } - - // Otherwise, this is a 1->1+ mapping. - auto replArgs = newArgs.slice(inputMap->inputNo, inputMap->size); - Value newArg; - - // If this is a 1->1 mapping and the types of new and replacement arguments - // match (i.e. it's an identity map), then the argument is mapped to its - // original type. - // FIXME: We simply pass through the replacement argument if there wasn't a - // converter, which isn't great as it allows implicit type conversions to - // appear. We should properly restructure this code to handle cases where a - // converter isn't provided and also to properly handle the case where an - // argument materialization is actually a temporary source materialization - // (e.g. in the case of 1->N). - if (replArgs.size() == 1 && - (!converter || replArgs[0].getType() == origArg.getType())) { - newArg = replArgs.front(); - } else { - Type origOutputType = origArg.getType(); - - // Legalize the argument output type. - Type outputType = origOutputType; - if (Type legalOutputType = converter->convertType(outputType)) - outputType = legalOutputType; - - newArg = buildUnresolvedArgumentMaterialization( - rewriter, origArg.getLoc(), replArgs, origOutputType, outputType, - converter, unresolvedMaterializations); - } - - mapping.map(origArg, newArg); - argReplacements.push_back(origArg); - info.argInfo[i] = - ConvertedArgInfo(inputMap->inputNo, inputMap->size, newArg); - } - - conversionInfo.insert({newBlock, std::move(info)}); - return newBlock; -} - //===----------------------------------------------------------------------===// // IR rewrites //===----------------------------------------------------------------------===// @@ -702,6 +342,12 @@ class IRRewrite { IRRewrite(Kind kind, ConversionPatternRewriterImpl &rewriterImpl) : kind(kind), rewriterImpl(rewriterImpl) {} + /// Erase the given op (unless it was already erased). + void eraseOp(Operation *op); + + /// Erase the given block (unless it was already erased). + void eraseBlock(Block *block); + const Kind kind; ConversionPatternRewriterImpl &rewriterImpl; }; @@ -744,8 +390,7 @@ class CreateBlockRewrite : public BlockRewrite { auto &blockOps = block->getOperations(); while (!blockOps.empty()) blockOps.remove(blockOps.begin()); - block->dropAllDefinedValueUses(); - block->erase(); + eraseBlock(block); } }; @@ -881,8 +526,7 @@ class SplitBlockRewrite : public BlockRewrite { // Merge back the block that was split out. originalBlock->getOperations().splice(originalBlock->end(), block->getOperations()); - block->dropAllDefinedValueUses(); - block->erase(); + eraseBlock(block); } private: @@ -890,20 +534,59 @@ class SplitBlockRewrite : public BlockRewrite { Block *originalBlock; }; +/// This structure contains the information pertaining to an argument that has +/// been converted. +struct ConvertedArgInfo { + ConvertedArgInfo(unsigned newArgIdx, unsigned newArgSize, + Value castValue = nullptr) + : newArgIdx(newArgIdx), newArgSize(newArgSize), castValue(castValue) {} + + /// The start index of in the new argument list that contains arguments that + /// replace the original. + unsigned newArgIdx; + + /// The number of arguments that replaced the original argument. + unsigned newArgSize; + + /// The cast value that was created to cast from the new arguments to the + /// old. This only used if 'newArgSize' > 1. + Value castValue; +}; + /// Block type conversion. This rewrite is partially reflected in the IR. class BlockTypeConversionRewrite : public BlockRewrite { public: - BlockTypeConversionRewrite(ConversionPatternRewriterImpl &rewriterImpl, - Block *block) - : BlockRewrite(Kind::BlockTypeConversion, rewriterImpl, block) {} + BlockTypeConversionRewrite( + ConversionPatternRewriterImpl &rewriterImpl, Block *block, + Block *origBlock, SmallVector, 1> argInfo, + const TypeConverter *converter) + : BlockRewrite(Kind::BlockTypeConversion, rewriterImpl, block), + origBlock(origBlock), argInfo(argInfo), converter(converter) {} static bool classof(const IRRewrite *rewrite) { return rewrite->getKind() == Kind::BlockTypeConversion; } - // TODO: Block type conversions are currently committed in - // `ArgConverter::applyRewrites`. This should be done in the "commit" method. + /// Materialize any necessary conversions for converted arguments that have + /// live users, using the provided `findLiveUser` to search for a user that + /// survives the conversion process. + LogicalResult + materializeLiveConversions(function_ref findLiveUser); + + void commit() override; + void rollback() override; + +private: + /// The original block that was requested to have its signature converted. + Block *origBlock; + + /// The conversion information for each of the arguments. The information is + /// std::nullopt if the argument was dropped during conversion. + SmallVector, 1> argInfo; + + /// The type converter used to convert the arguments. + const TypeConverter *converter; }; /// An operation rewrite. @@ -949,8 +632,8 @@ class MoveOperationRewrite : public OperationRewrite { // The block in which this operation was previously contained. Block *block; - // The original successor of this operation before it was moved. "nullptr" if - // this operation was the only operation in the region. + // The original successor of this operation before it was moved. "nullptr" + // if this operation was the only operation in the region. Operation *insertBeforeOp; }; @@ -1027,6 +710,26 @@ static bool hasRewrite(R &&rewrites, Operation *op) { }); } +/// Find the single rewrite object of the specified type and block among the +/// given rewrites. In debug mode, asserts that there is mo more than one such +/// object. Return "nullptr" if no object was found. +template +static RewriteTy *findSingleRewrite(R &&rewrites, Block *block) { + RewriteTy *result = nullptr; + for (auto &rewrite : rewrites) { + auto *rewriteTy = dyn_cast(rewrite.get()); + if (rewriteTy && rewriteTy->getBlock() == block) { +#ifndef NDEBUG + assert(!result && "expected single matching rewrite"); + result = rewriteTy; +#else + return rewriteTy; +#endif // NDEBUG + } + } + return result; +} + //===----------------------------------------------------------------------===// // ConversionPatternRewriterImpl //===----------------------------------------------------------------------===// @@ -1034,7 +737,7 @@ namespace mlir { namespace detail { struct ConversionPatternRewriterImpl : public RewriterBase::Listener { explicit ConversionPatternRewriterImpl(PatternRewriter &rewriter) - : argConverter(rewriter, unresolvedMaterializations), + : rewriter(rewriter), eraseRewriter(rewriter.getContext()), notifyCallback(nullptr) {} /// Cleanup and destroy any generated rewrite operations. This method is @@ -1084,15 +787,33 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener { /// removes them from being considered for legalization. void markNestedOpsIgnored(Operation *op); + /// Detach any operations nested in the given operation from their parent + /// blocks, and erase the given operation. This can be used when the nested + /// operations are scheduled for erasure themselves, so deleting the regions + /// of the given operation together with their content would result in + /// double-free. This happens, for example, when rolling back op creation in + /// the reverse order and if the nested ops were created before the parent op. + /// This function does not need to collect nested ops recursively because it + /// is expected to also be called for each nested op when it is about to be + /// deleted. + void detachNestedAndErase(Operation *op); + //===--------------------------------------------------------------------===// // Type Conversion //===--------------------------------------------------------------------===// - /// Convert the signature of the given block. + /// Attempt to convert the signature of the given block, if successful a new + /// block is returned containing the new arguments. Returns `block` if it did + /// not require conversion. FailureOr convertBlockSignature( Block *block, const TypeConverter *converter, TypeConverter::SignatureConversion *conversion = nullptr); + /// Convert the types of non-entry block arguments within the given region. + LogicalResult convertNonEntryRegionTypes( + Region *region, const TypeConverter &converter, + ArrayRef blockConversions = {}); + /// Apply a signature conversion on the given region, using `converter` for /// materializations if not null. Block * @@ -1105,10 +826,15 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener { convertRegionTypes(Region *region, const TypeConverter &converter, TypeConverter::SignatureConversion *entryConversion); - /// Convert the types of non-entry block arguments within the given region. - LogicalResult convertNonEntryRegionTypes( - Region *region, const TypeConverter &converter, - ArrayRef blockConversions = {}); + /// Apply the given signature conversion on the given block. The new block + /// containing the updated signature is returned. If no conversions were + /// necessary, e.g. if the block has no arguments, `block` is returned. + /// `converter` is used to generate any necessary cast operations that + /// translate between the origin argument types and those specified in the + /// signature conversion. + Block *applySignatureConversion( + Block *block, const TypeConverter *converter, + TypeConverter::SignatureConversion &signatureConversion); //===--------------------------------------------------------------------===// // Rewriter Notification Hooks @@ -1140,17 +866,54 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener { notifyMatchFailure(Location loc, function_ref reasonCallback) override; + //===--------------------------------------------------------------------===// + // IR Erasure + //===--------------------------------------------------------------------===// + + /// A rewriter that keeps track of erased ops and blocks. It ensures that no + /// operation or block is erased multiple times. This rewriter assumes that + /// no new IR is created between calls to `eraseOp`/`eraseBlock`. + struct SingleEraseRewriter : public RewriterBase, RewriterBase::Listener { + public: + SingleEraseRewriter(MLIRContext *context) + : RewriterBase(context, /*listener=*/this) {} + + /// Erase the given op (unless it was already erased). + void eraseOp(Operation *op) override { + if (erased.contains(op)) + return; + op->dropAllUses(); + RewriterBase::eraseOp(op); + } + + /// Erase the given block (unless it was already erased). + void eraseBlock(Block *block) override { + if (erased.contains(block)) + return; + block->dropAllDefinedValueUses(); + RewriterBase::eraseBlock(block); + } + + void notifyOperationErased(Operation *op) override { erased.insert(op); } + void notifyBlockErased(Block *block) override { erased.insert(block); } + + /// Pointers to all erased operations and blocks. + SetVector erased; + }; + //===--------------------------------------------------------------------===// // State //===--------------------------------------------------------------------===// + PatternRewriter &rewriter; + + /// This rewriter must be used for erasing ops/blocks. + SingleEraseRewriter eraseRewriter; + // Mapping between replaced values that differ in type. This happens when // replacing a value with one of a different type. ConversionValueMapping mapping; - /// Utility used to convert block arguments. - ArgConverter argConverter; - /// Ordered vector of all of the newly created operations during conversion. SmallVector createdOps; @@ -1207,20 +970,100 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener { } // namespace detail } // namespace mlir +void IRRewrite::eraseOp(Operation *op) { + rewriterImpl.eraseRewriter.eraseOp(op); +} + +void IRRewrite::eraseBlock(Block *block) { + rewriterImpl.eraseRewriter.eraseBlock(block); +} + +void BlockTypeConversionRewrite::commit() { + // Process the remapping for each of the original arguments. + for (auto [origArg, info] : + llvm::zip_equal(origBlock->getArguments(), argInfo)) { + // Handle the case of a 1->0 value mapping. + if (!info) { + if (Value newArg = + rewriterImpl.mapping.lookupOrNull(origArg, origArg.getType())) + origArg.replaceAllUsesWith(newArg); + continue; + } + + // Otherwise this is a 1->1+ value mapping. + Value castValue = info->castValue; + assert(info->newArgSize >= 1 && castValue && "expected 1->1+ mapping"); + + // If the argument is still used, replace it with the generated cast. + if (!origArg.use_empty()) { + origArg.replaceAllUsesWith( + rewriterImpl.mapping.lookupOrDefault(castValue, origArg.getType())); + } + } + + delete origBlock; + origBlock = nullptr; +} + void BlockTypeConversionRewrite::rollback() { - // Undo the type conversion. - rewriterImpl.argConverter.discardRewrites(block); -} - -/// Detach any operations nested in the given operation from their parent -/// blocks, and erase the given operation. This can be used when the nested -/// operations are scheduled for erasure themselves, so deleting the regions of -/// the given operation together with their content would result in double-free. -/// This happens, for example, when rolling back op creation in the reverse -/// order and if the nested ops were created before the parent op. This function -/// does not need to collect nested ops recursively because it is expected to -/// also be called for each nested op when it is about to be deleted. -static void detachNestedAndErase(Operation *op) { + // Drop all uses of the new block arguments and replace uses of the new block. + for (int i = block->getNumArguments() - 1; i >= 0; --i) + block->getArgument(i).dropAllUses(); + block->replaceAllUsesWith(origBlock); + + // Move the operations back the original block, move the original block back + // into its original location and the delete the new block. + origBlock->getOperations().splice(origBlock->end(), block->getOperations()); + block->getParent()->getBlocks().insert(Region::iterator(block), origBlock); + eraseBlock(block); +} + +LogicalResult BlockTypeConversionRewrite::materializeLiveConversions( + function_ref findLiveUser) { + // Process the remapping for each of the original arguments. + for (auto it : llvm::enumerate(origBlock->getArguments())) { + // If the type of this argument changed and the argument is still live, we + // need to materialize a conversion. + BlockArgument origArg = it.value(); + if (rewriterImpl.mapping.lookupOrNull(origArg, origArg.getType())) + continue; + Operation *liveUser = findLiveUser(origArg); + if (!liveUser) + continue; + + Value replacementValue = rewriterImpl.mapping.lookupOrDefault(origArg); + bool isDroppedArg = replacementValue == origArg; + if (isDroppedArg) + rewriterImpl.rewriter.setInsertionPointToStart(getBlock()); + else + rewriterImpl.rewriter.setInsertionPointAfterValue(replacementValue); + Value newArg; + if (converter) { + newArg = converter->materializeSourceConversion( + rewriterImpl.rewriter, origArg.getLoc(), origArg.getType(), + isDroppedArg ? ValueRange() : ValueRange(replacementValue)); + assert((!newArg || newArg.getType() == origArg.getType()) && + "materialization hook did not provide a value of the expected " + "type"); + } + if (!newArg) { + InFlightDiagnostic diag = + emitError(origArg.getLoc()) + << "failed to materialize conversion for block argument #" + << it.index() << " that remained live after conversion, type was " + << origArg.getType(); + if (!isDroppedArg) + diag << ", with target type " << replacementValue.getType(); + diag.attachNote(liveUser->getLoc()) + << "see existing live user here: " << *liveUser; + return failure(); + } + rewriterImpl.mapping.map(origArg, newArg); + } + return success(); +} + +void ConversionPatternRewriterImpl::detachNestedAndErase(Operation *op) { for (Region ®ion : op->getRegions()) { for (Block &block : region.getBlocks()) { while (!block.getOperations().empty()) @@ -1228,8 +1071,7 @@ static void detachNestedAndErase(Operation *op) { block.dropAllDefinedValueUses(); } } - op->dropAllUses(); - op->erase(); + eraseRewriter.eraseOp(op); } void ConversionPatternRewriterImpl::discardRewrites() { @@ -1248,11 +1090,6 @@ void ConversionPatternRewriterImpl::applyRewrites() { for (OpResult result : repl.first->getResults()) if (Value newValue = mapping.lookupOrNull(result, result.getType())) result.replaceAllUsesWith(newValue); - - // If this operation defines any regions, drop any pending argument - // rewrites. - if (repl.first->getNumRegions()) - argConverter.notifyOpRemoved(repl.first); } // Apply all of the requested argument replacements. @@ -1279,22 +1116,16 @@ void ConversionPatternRewriterImpl::applyRewrites() { // Drop all of the unresolved materialization operations created during // conversion. - for (auto &mat : unresolvedMaterializations) { - mat.getOp()->dropAllUses(); - mat.getOp()->erase(); - } + for (auto &mat : unresolvedMaterializations) + eraseRewriter.eraseOp(mat.getOp()); // In a second pass, erase all of the replaced operations in reverse. This // allows processing nested operations before their parent region is // destroyed. Because we process in reverse order, producers may be deleted // before their users (a pattern deleting a producer and then the consumer) // so we first drop all uses explicitly. - for (auto &repl : llvm::reverse(replacements)) { - repl.first->dropAllUses(); - repl.first->erase(); - } - - argConverter.applyRewrites(mapping); + for (auto &repl : llvm::reverse(replacements)) + eraseRewriter.eraseOp(repl.first); // Commit all rewrites. for (auto &rewrite : rewrites) @@ -1307,7 +1138,8 @@ void ConversionPatternRewriterImpl::applyRewrites() { RewriterState ConversionPatternRewriterImpl::getCurrentState() { return RewriterState(createdOps.size(), unresolvedMaterializations.size(), replacements.size(), argReplacements.size(), - rewrites.size(), ignoredOps.size()); + rewrites.size(), ignoredOps.size(), + eraseRewriter.erased.size()); } void ConversionPatternRewriterImpl::resetState(RewriterState state) { @@ -1355,6 +1187,9 @@ void ConversionPatternRewriterImpl::resetState(RewriterState state) { while (!operationsWithChangedResults.empty() && operationsWithChangedResults.back() >= state.numReplacements) operationsWithChangedResults.pop_back(); + + while (eraseRewriter.erased.size() != state.numErased) + eraseRewriter.erased.pop_back(); } void ConversionPatternRewriterImpl::undoRewrites(unsigned numRewritesToKeep) { @@ -1443,18 +1278,18 @@ void ConversionPatternRewriterImpl::markNestedOpsIgnored(Operation *op) { FailureOr ConversionPatternRewriterImpl::convertBlockSignature( Block *block, const TypeConverter *converter, TypeConverter::SignatureConversion *conversion) { - FailureOr result = - conversion ? argConverter.applySignatureConversion( - block, converter, *conversion, mapping, argReplacements) - : argConverter.convertSignature(block, converter, mapping, - argReplacements); - if (failed(result)) + if (conversion) + return applySignatureConversion(block, converter, *conversion); + + // If a converter wasn't provided, and the block wasn't already converted, + // there is nothing we can do. + if (!converter) return failure(); - if (Block *newBlock = *result) { - if (newBlock != block) - appendRewrite(newBlock); - } - return result; + + // Try to convert the signature for the block with the provided converter. + if (auto conversion = converter->convertBlockSignature(block)) + return applySignatureConversion(block, converter, *conversion); + return failure(); } Block *ConversionPatternRewriterImpl::applySignatureConversion( @@ -1508,6 +1343,102 @@ LogicalResult ConversionPatternRewriterImpl::convertNonEntryRegionTypes( return success(); } +Block *ConversionPatternRewriterImpl::applySignatureConversion( + Block *block, const TypeConverter *converter, + TypeConverter::SignatureConversion &signatureConversion) { + // If no arguments are being changed or added, there is nothing to do. + unsigned origArgCount = block->getNumArguments(); + auto convertedTypes = signatureConversion.getConvertedTypes(); + if (llvm::equal(block->getArgumentTypes(), convertedTypes)) + return block; + + // Split the block at the beginning to get a new block to use for the updated + // signature. + Block *newBlock = block->splitBlock(block->begin()); + block->replaceAllUsesWith(newBlock); + // Unlink the block, but do not erase it yet, so that the change can be rolled + // back. + block->getParent()->getBlocks().remove(block); + + // Map all new arguments to the location of the argument they originate from. + SmallVector newLocs(convertedTypes.size(), + rewriter.getUnknownLoc()); + for (unsigned i = 0; i < origArgCount; ++i) { + auto inputMap = signatureConversion.getInputMapping(i); + if (!inputMap || inputMap->replacementValue) + continue; + Location origLoc = block->getArgument(i).getLoc(); + for (unsigned j = 0; j < inputMap->size; ++j) + newLocs[inputMap->inputNo + j] = origLoc; + } + + SmallVector newArgRange( + newBlock->addArguments(convertedTypes, newLocs)); + ArrayRef newArgs(newArgRange); + + // Remap each of the original arguments as determined by the signature + // conversion. + SmallVector, 1> argInfo; + argInfo.resize(origArgCount); + + OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(newBlock); + for (unsigned i = 0; i != origArgCount; ++i) { + auto inputMap = signatureConversion.getInputMapping(i); + if (!inputMap) + continue; + BlockArgument origArg = block->getArgument(i); + + // If inputMap->replacementValue is not nullptr, then the argument is + // dropped and a replacement value is provided to be the remappedValue. + if (inputMap->replacementValue) { + assert(inputMap->size == 0 && + "invalid to provide a replacement value when the argument isn't " + "dropped"); + mapping.map(origArg, inputMap->replacementValue); + argReplacements.push_back(origArg); + continue; + } + + // Otherwise, this is a 1->1+ mapping. + auto replArgs = newArgs.slice(inputMap->inputNo, inputMap->size); + Value newArg; + + // If this is a 1->1 mapping and the types of new and replacement arguments + // match (i.e. it's an identity map), then the argument is mapped to its + // original type. + // FIXME: We simply pass through the replacement argument if there wasn't a + // converter, which isn't great as it allows implicit type conversions to + // appear. We should properly restructure this code to handle cases where a + // converter isn't provided and also to properly handle the case where an + // argument materialization is actually a temporary source materialization + // (e.g. in the case of 1->N). + if (replArgs.size() == 1 && + (!converter || replArgs[0].getType() == origArg.getType())) { + newArg = replArgs.front(); + } else { + Type origOutputType = origArg.getType(); + + // Legalize the argument output type. + Type outputType = origOutputType; + if (Type legalOutputType = converter->convertType(outputType)) + outputType = legalOutputType; + + newArg = buildUnresolvedArgumentMaterialization( + rewriter, origArg.getLoc(), replArgs, origOutputType, outputType, + converter, unresolvedMaterializations); + } + + mapping.map(origArg, newArg); + argReplacements.push_back(origArg); + argInfo[i] = ConvertedArgInfo(inputMap->inputNo, inputMap->size, newArg); + } + + appendRewrite(newBlock, block, argInfo, + converter); + return newBlock; +} + //===----------------------------------------------------------------------===// // Rewriter Notification Hooks @@ -2635,8 +2566,11 @@ LogicalResult OperationConverter::legalizeConvertedArgumentTypes( }); return liveUserIt == val.user_end() ? nullptr : *liveUserIt; }; - return rewriterImpl.argConverter.materializeLiveConversions( - rewriterImpl.mapping, rewriter, findLiveUser); + for (auto &r : rewriterImpl.rewrites) + if (auto *rewrite = dyn_cast(r.get())) + if (failed(rewrite->materializeLiveConversions(findLiveUser))) + return failure(); + return success(); } /// Replace the results of a materialization operation with the given values. From fddf23c6f4478fc39b0077538d288082f983ce80 Mon Sep 17 00:00:00 2001 From: Vyacheslav Levytskyy <89994100+VyacheslavLevytskyy@users.noreply.github.com> Date: Thu, 22 Feb 2024 10:27:59 +0100 Subject: [PATCH 09/19] [SPIRV] Add support for the SPV_KHR_subgroup_rotate extension (#82374) This PR adds support for the SPV_KHR_subgroup_rotate extension that enables rotating values across invocations within a subgroup: * https://github.com/KhronosGroup/SPIRV-Registry/blob/main/extensions/KHR/SPV_KHR_subgroup_rotate.asciidoc --- llvm/lib/Target/SPIRV/SPIRVBuiltins.td | 7 +- llvm/lib/Target/SPIRV/SPIRVInstrInfo.td | 5 + llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp | 9 + llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp | 4 + .../lib/Target/SPIRV/SPIRVSymbolicOperands.td | 1 + .../subgroup-rotate.ll | 357 ++++++++++++++++++ 6 files changed, 382 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td index e6e3560d02f58b..28a63b93b43b6e 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td @@ -619,7 +619,8 @@ class GroupBuiltin { !eq(operation, OpGroupNonUniformShuffleDown), !eq(operation, OpGroupBroadcast), !eq(operation, OpGroupNonUniformBroadcast), - !eq(operation, OpGroupNonUniformBroadcastFirst)); + !eq(operation, OpGroupNonUniformBroadcastFirst), + !eq(operation, OpGroupNonUniformRotateKHR)); bit HasBoolArg = !or(!and(IsAllOrAny, !eq(IsAllEqual, false)), IsBallot, IsLogical); } @@ -877,6 +878,10 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_logical_xors", Wo defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_logical_xors", WorkOrSub, OpGroupNonUniformLogicalXor>; defm : DemangledGroupBuiltin<"group_clustered_reduce_logical_xor", WorkOrSub, OpGroupNonUniformLogicalXor>; +// cl_khr_subgroup_rotate / SPV_KHR_subgroup_rotate +defm : DemangledGroupBuiltin<"group_rotate", OnlySub, OpGroupNonUniformRotateKHR>; +defm : DemangledGroupBuiltin<"group_clustered_rotate", OnlySub, OpGroupNonUniformRotateKHR>; + // cl_khr_work_group_uniform_arithmetic / SPV_KHR_uniform_group_instructions defm : DemangledGroupBuiltin<"group_reduce_imul", OnlyWork, OpGroupIMulKHR>; defm : DemangledGroupBuiltin<"group_reduce_mulu", OnlyWork, OpGroupIMulKHR>; diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td index 0f11bc34d176f7..86f65b6320d530 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td +++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td @@ -765,6 +765,11 @@ def OpGroupNonUniformLogicalAnd: OpGroupNUGroup<"LogicalAnd", 362>; def OpGroupNonUniformLogicalOr: OpGroupNUGroup<"LogicalOr", 363>; def OpGroupNonUniformLogicalXor: OpGroupNUGroup<"LogicalXor", 364>; +// SPV_KHR_subgroup_rotate +def OpGroupNonUniformRotateKHR: Op<4431, (outs ID:$res), + (ins TYPE:$type, ID:$scope, ID:$value, ID:$delta, variable_ops), + "$res = OpGroupNonUniformRotateKHR $type $scope $value $delta">; + // 3.49.7, Constant-Creation Instructions // - SPV_INTEL_function_pointers diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp index dbda2871e153de..9b9575b9879948 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -1069,6 +1069,15 @@ void addInstrRequirements(const MachineInstr &MI, Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL); } break; + case SPIRV::OpGroupNonUniformRotateKHR: + if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate)) + report_fatal_error("OpGroupNonUniformRotateKHR instruction requires the " + "following SPIR-V extension: SPV_KHR_subgroup_rotate", + false); + Reqs.addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate); + Reqs.addCapability(SPIRV::Capability::GroupNonUniformRotateKHR); + Reqs.addCapability(SPIRV::Capability::GroupNonUniform); + break; case SPIRV::OpGroupIMulKHR: case SPIRV::OpGroupFMulKHR: case SPIRV::OpGroupBitwiseAndKHR: diff --git a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp index e186154aa408bd..4694363614ef60 100644 --- a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp @@ -75,6 +75,10 @@ cl::list Extensions( "Allows to use the LinkOnceODR linkage type that is to let " "a function or global variable to be merged with other functions " "or global variables of the same name when linkage occurs."), + clEnumValN(SPIRV::Extension::SPV_KHR_subgroup_rotate, + "SPV_KHR_subgroup_rotate", + "Adds a new instruction that enables rotating values across " + "invocations within a subgroup."), clEnumValN(SPIRV::Extension::SPV_INTEL_function_pointers, "SPV_INTEL_function_pointers", "Allows translation of function pointers."))); diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td index 4e5ac0d531b2d5..6c36087baa85ed 100644 --- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td +++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td @@ -455,6 +455,7 @@ defm BitInstructions : CapabilityOperand<6025, 0, 0, [SPV_KHR_bit_instructions], defm ExpectAssumeKHR : CapabilityOperand<5629, 0, 0, [SPV_KHR_expect_assume], []>; defm FunctionPointersINTEL : CapabilityOperand<5603, 0, 0, [SPV_INTEL_function_pointers], []>; defm IndirectReferencesINTEL : CapabilityOperand<5604, 0, 0, [SPV_INTEL_function_pointers], []>; +defm GroupNonUniformRotateKHR : CapabilityOperand<6026, 0, 0, [SPV_KHR_subgroup_rotate], [GroupNonUniform]>; defm AtomicFloat32AddEXT : CapabilityOperand<6033, 0, 0, [SPV_EXT_shader_atomic_float_add], []>; defm AtomicFloat64AddEXT : CapabilityOperand<6034, 0, 0, [SPV_EXT_shader_atomic_float_add], []>; defm AtomicFloat16AddEXT : CapabilityOperand<6095, 0, 0, [SPV_EXT_shader_atomic_float16_add], []>; diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll new file mode 100644 index 00000000000000..b1d6a09c7fe35b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll @@ -0,0 +1,357 @@ +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - | FileCheck %s +; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %} + +; CHECK-ERROR: LLVM ERROR: OpGroupNonUniformRotateKHR instruction requires the following SPIR-V extension: SPV_KHR_subgroup_rotate + +; CHECK: OpCapability GroupNonUniformRotateKHR +; CHECK: OpExtension "SPV_KHR_subgroup_rotate" + +; CHECK-DAG: %[[TyInt8:.*]] = OpTypeInt 8 0 +; CHECK-DAG: %[[TyInt16:.*]] = OpTypeInt 16 0 +; CHECK-DAG: %[[TyInt32:.*]] = OpTypeInt 32 0 +; CHECK-DAG: %[[TyInt64:.*]] = OpTypeInt 64 0 +; CHECK-DAG: %[[TyFloat:.*]] = OpTypeFloat 32 +; CHECK-DAG: %[[TyHalf:.*]] = OpTypeFloat 16 +; CHECK-DAG: %[[TyDouble:.*]] = OpTypeFloat 64 +; CHECK-DAG: %[[ScopeSubgroup:.*]] = OpConstant %[[TyInt32]] 3 +; CHECK-DAG: %[[ConstInt2:.*]] = OpConstant %[[TyInt32]] 2 +; CHECK-DAG: %[[ConstInt4:.*]] = OpConstant %[[TyInt32]] 4 + +target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024" +target triple = "spir" + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateChar(ptr addrspace(1) noundef align 1 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !5 !kernel_arg_base_type !5 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i8, align 1 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i8 0, ptr %v, align 1 + %value = load i8, ptr %v, align 1 +; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func signext i8 @_Z16sub_group_rotateci(i8 noundef signext %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %data, i32 0 + store i8 %call, ptr addrspace(1) %arrayidx, align 1 + %value_clustered = load i8, ptr %v, align 1 +; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func signext i8 @_Z26sub_group_clustered_rotatecij(i8 noundef signext %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %data2, i32 1 + store i8 %call1, ptr addrspace(1) %arrayidx2, align 1 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func signext i8 @_Z16sub_group_rotateci(i8 noundef signext, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func signext i8 @_Z26sub_group_clustered_rotatecij(i8 noundef signext, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateUChar(ptr addrspace(1) noundef align 1 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !7 !kernel_arg_base_type !7 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i8, align 1 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i8 0, ptr %v, align 1 + %value = load i8, ptr %v, align 1 +; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func zeroext i8 @_Z16sub_group_rotatehi(i8 noundef zeroext %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %data, i32 0 + store i8 %call, ptr addrspace(1) %arrayidx, align 1 + %value_clustered = load i8, ptr %v, align 1 +; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func zeroext i8 @_Z26sub_group_clustered_rotatehij(i8 noundef zeroext %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %data2, i32 1 + store i8 %call1, ptr addrspace(1) %arrayidx2, align 1 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func zeroext i8 @_Z16sub_group_rotatehi(i8 noundef zeroext, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func zeroext i8 @_Z26sub_group_clustered_rotatehij(i8 noundef zeroext, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateShort(ptr addrspace(1) noundef align 2 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !8 !kernel_arg_base_type !8 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i16, align 2 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i16 0, ptr %v, align 2 + %value = load i16, ptr %v, align 2 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func signext i16 @_Z16sub_group_rotatesi(i16 noundef signext %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %data, i32 0 + store i16 %call, ptr addrspace(1) %arrayidx, align 2 + %value_clustered = load i16, ptr %v, align 2 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func signext i16 @_Z26sub_group_clustered_rotatesij(i16 noundef signext %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i16, ptr addrspace(1) %data2, i32 1 + store i16 %call1, ptr addrspace(1) %arrayidx2, align 2 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func signext i16 @_Z16sub_group_rotatesi(i16 noundef signext, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func signext i16 @_Z26sub_group_clustered_rotatesij(i16 noundef signext, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateUShort(ptr addrspace(1) noundef align 2 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !9 !kernel_arg_base_type !9 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i16, align 2 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i16 0, ptr %v, align 2 + %value = load i16, ptr %v, align 2 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func zeroext i16 @_Z16sub_group_rotateti(i16 noundef zeroext %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %data, i32 0 + store i16 %call, ptr addrspace(1) %arrayidx, align 2 + %value_clustered = load i16, ptr %v, align 2 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func zeroext i16 @_Z26sub_group_clustered_rotatetij(i16 noundef zeroext %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i16, ptr addrspace(1) %data2, i32 1 + store i16 %call1, ptr addrspace(1) %arrayidx2, align 2 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func zeroext i16 @_Z16sub_group_rotateti(i16 noundef zeroext, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func zeroext i16 @_Z26sub_group_clustered_rotatetij(i16 noundef zeroext, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateInt(ptr addrspace(1) noundef align 4 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !10 !kernel_arg_base_type !10 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i32, align 4 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i32 0, ptr %v, align 4 + %value = load i32, ptr %v, align 4 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func i32 @_Z16sub_group_rotateii(i32 noundef %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %data, i32 0 + store i32 %call, ptr addrspace(1) %arrayidx, align 4 + %value_clustered = load i32, ptr %v, align 4 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func i32 @_Z26sub_group_clustered_rotateiij(i32 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %data2, i32 1 + store i32 %call1, ptr addrspace(1) %arrayidx2, align 4 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func i32 @_Z16sub_group_rotateii(i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func i32 @_Z26sub_group_clustered_rotateiij(i32 noundef, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateUInt(ptr addrspace(1) noundef align 4 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !11 !kernel_arg_base_type !11 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i32, align 4 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i32 0, ptr %v, align 4 + %value = load i32, ptr %v, align 4 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func i32 @_Z16sub_group_rotateji(i32 noundef %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %data, i32 0 + store i32 %call, ptr addrspace(1) %arrayidx, align 4 + %value_clustered = load i32, ptr %v, align 4 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func i32 @_Z26sub_group_clustered_rotatejij(i32 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %data2, i32 1 + store i32 %call1, ptr addrspace(1) %arrayidx2, align 4 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func i32 @_Z16sub_group_rotateji(i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func i32 @_Z26sub_group_clustered_rotatejij(i32 noundef, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateLong(ptr addrspace(1) noundef align 8 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !12 !kernel_arg_base_type !12 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i64, align 8 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i64 0, ptr %v, align 8 + %value = load i64, ptr %v, align 8 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func i64 @_Z16sub_group_rotateli(i64 noundef %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i64, ptr addrspace(1) %data, i32 0 + store i64 %call, ptr addrspace(1) %arrayidx, align 8 + %value_clustered = load i64, ptr %v, align 8 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func i64 @_Z26sub_group_clustered_rotatelij(i64 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i64, ptr addrspace(1) %data2, i32 1 + store i64 %call1, ptr addrspace(1) %arrayidx2, align 8 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func i64 @_Z16sub_group_rotateli(i64 noundef, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func i64 @_Z26sub_group_clustered_rotatelij(i64 noundef, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateULong(ptr addrspace(1) noundef align 8 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !13 !kernel_arg_base_type !13 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca i64, align 8 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store i64 0, ptr %v, align 8 + %value = load i64, ptr %v, align 8 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func i64 @_Z16sub_group_rotatemi(i64 noundef %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds i64, ptr addrspace(1) %data, i32 0 + store i64 %call, ptr addrspace(1) %arrayidx, align 8 + %value_clustered = load i64, ptr %v, align 8 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func i64 @_Z26sub_group_clustered_rotatemij(i64 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds i64, ptr addrspace(1) %data2, i32 1 + store i64 %call1, ptr addrspace(1) %arrayidx2, align 8 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func i64 @_Z16sub_group_rotatemi(i64 noundef, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func i64 @_Z26sub_group_clustered_rotatemij(i64 noundef, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateFloat(ptr addrspace(1) noundef align 4 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !14 !kernel_arg_base_type !14 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca float, align 4 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store float 0.000000e+00, ptr %v, align 4 + %value = load float, ptr %v, align 4 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyFloat]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func float @_Z16sub_group_rotatefi(float noundef %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds float, ptr addrspace(1) %data, i32 0 + store float %call, ptr addrspace(1) %arrayidx, align 4 + %value_clustered = load float, ptr %v, align 4 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyFloat]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func float @_Z26sub_group_clustered_rotatefij(float noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds float, ptr addrspace(1) %data2, i32 1 + store float %call1, ptr addrspace(1) %arrayidx2, align 4 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func float @_Z16sub_group_rotatefi(float noundef, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func float @_Z26sub_group_clustered_rotatefij(float noundef, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateHalf(ptr addrspace(1) noundef align 2 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !15 !kernel_arg_base_type !15 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca half, align 2 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store half 0xH0000, ptr %v, align 2 + %value = load half, ptr %v, align 2 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyHalf]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func half @_Z16sub_group_rotateDhi(half noundef %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds half, ptr addrspace(1) %data, i32 0 + store half %call, ptr addrspace(1) %arrayidx, align 2 + %value_clustered = load half, ptr %v, align 2 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyHalf]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func half @_Z26sub_group_clustered_rotateDhij(half noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds half, ptr addrspace(1) %data2, i32 1 + store half %call1, ptr addrspace(1) %arrayidx2, align 2 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func half @_Z16sub_group_rotateDhi(half noundef, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func half @_Z26sub_group_clustered_rotateDhij(half noundef, i32 noundef, i32 noundef) #1 + +; Function Attrs: convergent noinline norecurse nounwind optnone +define dso_local spir_kernel void @testRotateDouble(ptr addrspace(1) noundef align 8 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !16 !kernel_arg_base_type !16 !kernel_arg_type_qual !6 { +entry: + %dst.addr = alloca ptr addrspace(1), align 4 + %v = alloca double, align 8 + store ptr addrspace(1) %dst, ptr %dst.addr, align 4 + store double 0.000000e+00, ptr %v, align 8 + %value = load double, ptr %v, align 8 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyDouble]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] + %call = call spir_func double @_Z16sub_group_rotatedi(double noundef %value, i32 noundef 2) #2 + %data = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx = getelementptr inbounds double, ptr addrspace(1) %data, i32 0 + store double %call, ptr addrspace(1) %arrayidx, align 8 + %value_clustered = load double, ptr %v, align 8 + ; CHECK: OpGroupNonUniformRotateKHR %[[TyDouble]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]] + %call1 = call spir_func double @_Z26sub_group_clustered_rotatedij(double noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2 + %data2 = load ptr addrspace(1), ptr %dst.addr, align 4 + %arrayidx2 = getelementptr inbounds double, ptr addrspace(1) %data2, i32 1 + store double %call1, ptr addrspace(1) %arrayidx2, align 8 + ret void +} + +; Function Attrs: convergent nounwind +declare spir_func double @_Z16sub_group_rotatedi(double noundef, i32 noundef) #1 + +; Function Attrs: convergent nounwind +declare spir_func double @_Z26sub_group_clustered_rotatedij(double noundef, i32 noundef, i32 noundef) #1 + +attributes #0 = { convergent noinline norecurse nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="false" } +attributes #1 = { convergent nounwind "no-trapping-math"="true" "stack-protector-buffer-size"="8" } +attributes #2 = { convergent nounwind } + +!llvm.module.flags = !{!0} +!opencl.ocl.version = !{!1} +!opencl.spir.version = !{!1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 2, i32 0} +!2 = !{!"clang version 19.0.0"} +!3 = !{i32 1} +!4 = !{!"none"} +!5 = !{!"char*"} +!6 = !{!""} +!7 = !{!"uchar*"} +!8 = !{!"short*"} +!9 = !{!"ushort*"} +!10 = !{!"int*"} +!11 = !{!"uint*"} +!12 = !{!"long*"} +!13 = !{!"ulong*"} +!14 = !{!"float*"} +!15 = !{!"half*"} +!16 = !{!"double*"} From 6cca23a3b91e12c0b6639449bc1e5eb564067db3 Mon Sep 17 00:00:00 2001 From: Vyacheslav Levytskyy <89994100+VyacheslavLevytskyy@users.noreply.github.com> Date: Thu, 22 Feb 2024 10:30:00 +0100 Subject: [PATCH 10/19] [SPIRV] Prevent creation of jump tables from switch (#82287) This PR is to prevent creation of jump tables from switch. The reason is that SPIR-V doesn't know how to lower jump tables, and a sequence of commands that IRTranslator generates for switch via jump tables breaks SPIR-V Backend code generation with complains to G_BRJT. The next example is the shortest code to break SPIR-V Backend code generation in this way: ``` target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" target triple = "spir64-unknown-unknown" define spir_func void @foo(i32 noundef %val) { entry: switch i32 %val, label %sw.epilog [ i32 0, label %sw.bb i32 1, label %sw.bb2 i32 2, label %sw.bb3 i32 3, label %sw.bb4 ] sw.bb: br label %sw.epilog sw.bb2: br label %sw.epilog sw.bb3: br label %sw.epilog sw.bb4: br label %sw.epilog sw.epilog: ret void } ``` To resolve the issue we set a high lower limit for number of blocks in a jump table via getMinimumJumpTableEntries() and prevent undesirable (or rather unsupported at the moment) path of code generation. --- llvm/lib/Target/SPIRV/SPIRVISelLowering.h | 3 ++ .../CodeGen/SPIRV/switch-no-jump-table.ll | 30 +++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll diff --git a/llvm/lib/Target/SPIRV/SPIRVISelLowering.h b/llvm/lib/Target/SPIRV/SPIRVISelLowering.h index f317b262071954..d34f802e9d889f 100644 --- a/llvm/lib/Target/SPIRV/SPIRVISelLowering.h +++ b/llvm/lib/Target/SPIRV/SPIRVISelLowering.h @@ -31,6 +31,9 @@ class SPIRVTargetLowering : public TargetLowering { return true; } + // prevent creation of jump tables + bool areJTsAllowed(const Function *) const override { return false; } + // This is to prevent sexts of non-i64 vector indices which are generated // within general IRTranslator hence type generation for it is omitted. MVT getVectorIdxTy(const DataLayout &DL) const override { diff --git a/llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll b/llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll new file mode 100644 index 00000000000000..c9c0f17f0b91ef --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll @@ -0,0 +1,30 @@ +; The test is to check that jump tables are not generated from switch + +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpSwitch %[[#]] %[[Label:]] +; CHECK-4: OpBranch %[[Label]] + +target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" +target triple = "spir64-unknown-unknown" + +define spir_func void @foo(i32 noundef %val) { +entry: + switch i32 %val, label %sw.epilog [ + i32 0, label %sw.bb + i32 1, label %sw.bb2 + i32 2, label %sw.bb3 + i32 3, label %sw.bb4 + ] +sw.bb: + br label %sw.epilog +sw.bb2: + br label %sw.epilog +sw.bb3: + br label %sw.epilog +sw.bb4: + br label %sw.epilog +sw.epilog: + ret void +} From bcbffd99c48ed0cabd1b94e9ff252680f0968fc3 Mon Sep 17 00:00:00 2001 From: Jay Foad Date: Thu, 22 Feb 2024 09:40:46 +0000 Subject: [PATCH 11/19] [AMDGPU] Split Dpp8FI and Dpp16FI operands (#82379) Split Dpp8FI and Dpp16FI into two different operands sharing an AsmOperandClass. They are parsed and rendered identically as fi:1 but the encoding is different: for DPP16 FI is a single bit, but for DPP8 it uses two different special values in the src0 field. Having a dedicated decoder for Dpp8FI allows it to reject other (non-special) src0 values so that AMDGPUDisassembler::getInstruction no longer needs to call isValidDPP8 to do post hoc validation of decoded DPP8 instructions. --- .../Disassembler/AMDGPUDisassembler.cpp | 33 ++++++++----------- .../AMDGPU/Disassembler/AMDGPUDisassembler.h | 1 + llvm/lib/Target/AMDGPU/SIInstrInfo.td | 19 ++++++----- llvm/lib/Target/AMDGPU/VOP1Instructions.td | 4 +-- llvm/lib/Target/AMDGPU/VOP2Instructions.td | 18 +++++----- llvm/lib/Target/AMDGPU/VOP3Instructions.td | 8 ++--- llvm/lib/Target/AMDGPU/VOP3PInstructions.td | 4 +-- llvm/lib/Target/AMDGPU/VOPCInstructions.td | 2 +- 8 files changed, 43 insertions(+), 46 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index 894607dfdd8c4c..53abb3e3f9aea8 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -119,6 +119,12 @@ static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val, return addOperand(Inst, DAsm->decodeSplitBarrier(Val)); } +static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr, + const MCDisassembler *Decoder) { + auto DAsm = static_cast(Decoder); + return addOperand(Inst, DAsm->decodeDpp8FI(Val)); +} + #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm, \ uint64_t /*Addr*/, \ @@ -440,19 +446,6 @@ static inline DecoderUInt128 eat12Bytes(ArrayRef &Bytes) { return DecoderUInt128(Lo, Hi); } -// The disassembler is greedy, so we need to check FI operand value to -// not parse a dpp if the correct literal is not set. For dpp16 the -// autogenerated decoder checks the dpp literal -static bool isValidDPP8(const MCInst &MI) { - using namespace llvm::AMDGPU::DPP; - int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi); - assert(FiIdx != -1); - if ((unsigned)FiIdx >= MI.getNumOperands()) - return false; - unsigned Fi = MI.getOperand(FiIdx).getImm(); - return Fi == DPP8_FI_0 || Fi == DPP8_FI_1; -} - DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, ArrayRef Bytes_, uint64_t Address, @@ -474,13 +467,11 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, MI, DecW, Address, CS); if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) break; - MI = MCInst(); // clear Res = tryDecodeInst(DecoderTableDPP8GFX1296, DecoderTableDPP8GFX12_FAKE1696, MI, DecW, Address, CS); if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) break; - MI = MCInst(); // clear const auto convertVOPDPP = [&]() { if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P) { @@ -530,26 +521,22 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, break; if (convertDPP8Inst(MI) == MCDisassembler::Success) break; - MI = MCInst(); // clear } } Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address, CS); if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) break; - MI = MCInst(); // clear Res = tryDecodeInst(DecoderTableDPP8GFX1164, DecoderTableDPP8GFX11_FAKE1664, MI, QW, Address, CS); if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) break; - MI = MCInst(); // clear Res = tryDecodeInst(DecoderTableDPP8GFX1264, DecoderTableDPP8GFX12_FAKE1664, MI, QW, Address, CS); if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) break; - MI = MCInst(); // clear Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address, CS); if (Res) break; @@ -982,7 +969,7 @@ DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const { AMDGPU::OpName::src1_modifiers); } } - return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail; + return MCDisassembler::Success; } DecodeStatus AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const { @@ -1831,6 +1818,12 @@ MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const { return decodeSrcOp(OPW32, Val); } +MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const { + if (Val != AMDGPU::DPP::DPP8_FI_0 && Val != AMDGPU::DPP::DPP8_FI_1) + return MCOperand(); + return MCOperand::createImm(Val); +} + bool AMDGPUDisassembler::isVI() const { return STI.hasFeature(AMDGPU::FeatureVolcanicIslands); } diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h index 3142b8a14a4dd5..dd0581576bd22e 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h @@ -261,6 +261,7 @@ class AMDGPUDisassembler : public MCDisassembler { MCOperand decodeBoolReg(unsigned Val) const; MCOperand decodeSplitBarrier(unsigned Val) const; + MCOperand decodeDpp8FI(unsigned Val) const; int getTTmpIdx(unsigned Val) const; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index 97c723752b70b9..34cdb09b0e15da 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -987,8 +987,8 @@ def SDWAVopcDst : BoolRC { } class NamedIntOperand - : CustomOperand { + string name = NAME, string ConvertMethod = "nullptr"> + : CustomOperand { let ParserMethod = "[this](OperandVector &Operands) -> ParseStatus { "# "return parseIntWithPrefix(\""#Prefix#"\", Operands, "# @@ -1090,9 +1090,12 @@ let DefaultValue = "0xf" in { def DppRowMask : NamedIntOperand; def DppBankMask : NamedIntOperand; } -def DppBoundCtrl : NamedIntOperand bool { return convertDppBoundCtrl(BC); }">; -def DppFI : NamedIntOperand; + +let DecoderMethod = "decodeDpp8FI" in +def Dpp8FI : NamedIntOperand; +def Dpp16FI : NamedIntOperand; def blgp : CustomOperand; def CBSZ : NamedIntOperand; @@ -1823,7 +1826,7 @@ class getInsDPP16 { dag ret = !con(getInsDPP.ret, - (ins DppFI:$fi)); + (ins Dpp16FI:$fi)); } class getInsDPP8 { dag ret = !con(getInsDPPBase.ret, - (ins dpp8:$dpp8, DppFI:$fi)); + (ins dpp8:$dpp8, Dpp8FI:$fi)); } class getInsVOP3DPPBase { @@ -1851,12 +1854,12 @@ class getInsVOP3DPP { dag ret = !con(getInsVOP3DPP.ret, - (ins DppFI:$fi)); + (ins Dpp16FI:$fi)); } class getInsVOP3DPP8 { dag ret = !con(getInsVOP3DPPBase.ret, - (ins dpp8:$dpp8, DppFI:$fi)); + (ins dpp8:$dpp8, Dpp8FI:$fi)); } // Ins for SDWA diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td index 99f8e8ede4ace9..576ad32a70cf36 100644 --- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td @@ -380,9 +380,9 @@ class VOP_MOVREL : VOPProfile<[untyped, i32, untyped, un let OutsDPP = (outs Src0RC32:$vdst); let InsDPP16 = (ins Src0RC32:$old, Src0RC32:$src0, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, - DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, DppFI:$fi); + DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi); let AsmDPP16 = getAsmDPP16<1, 1, 0>.ret; - let InsDPP8 = (ins Src0RC32:$old, Src0RC32:$src0, dpp8:$dpp8, DppFI:$fi); + let InsDPP8 = (ins Src0RC32:$old, Src0RC32:$src0, dpp8:$dpp8, Dpp8FI:$fi); let AsmDPP8 = getAsmDPP8<1, 1, 0>.ret; let OutsVOP3DPP = (outs Src0RC64:$vdst); diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td index 4437d5f2a03338..9f54e69f6d55e1 100644 --- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td @@ -430,7 +430,7 @@ class VOP_MAC : VOPProfile <[vt0, vt1, vt1, v getVregSrcForVT.ret:$src2, // stub argument dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl); - let InsDPP16 = !con(InsDPP, (ins DppFI:$fi)); + let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi)); let InsVOP3Base = getInsVOP3Base, 3, 0, HasModifiers, HasModifiers, HasOMod, Src0ModVOP3DPP, Src1ModVOP3DPP, Src2Mod, HasOpSel>.ret; @@ -447,7 +447,7 @@ class VOP_MAC : VOPProfile <[vt0, vt1, vt1, v let InsDPP8 = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0, Src1ModDPP:$src1_modifiers, Src1DPP:$src1, getVregSrcForVT.ret:$src2, // stub argument - dpp8:$dpp8, DppFI:$fi); + dpp8:$dpp8, Dpp8FI:$fi); let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0, Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1, getVregSrcForVT.ret:$src2, // stub argument @@ -500,7 +500,7 @@ def VOP_MAC_F16_t16 : VOP_MAC { let InsDPP8 = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0, Src1ModDPP:$src1_modifiers, Src1DPP:$src1, getVregSrcForVT.ret:$src2, // stub argument - dpp8:$dpp8, DppFI:$fi); + dpp8:$dpp8, Dpp8FI:$fi); let Src2Mod = FP32InputMods; // dummy unused modifiers let Src2RC64 = VGPRSrc_32; // stub argument } @@ -552,11 +552,11 @@ def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/ Src1DPP:$src1, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl); - let InsDPP16 = !con(InsDPP, (ins DppFI:$fi)); + let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi)); let InsDPP8 = (ins DstRCDPP:$old, Src0DPP:$src0, Src1DPP:$src1, - dpp8:$dpp8, DppFI:$fi); + dpp8:$dpp8, Dpp8FI:$fi); let Outs32 = (outs DstRC:$vdst); let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let OutsVOP3DPP = Outs64; @@ -594,11 +594,11 @@ def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], /*EnableClamp=*/1> Src1DPP:$src1, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl); - let InsDPP16 = !con(InsDPP, (ins DppFI:$fi)); + let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi)); let InsDPP8 = (ins DstRCDPP:$old, Src0DPP:$src0, Src1DPP:$src1, - dpp8:$dpp8, DppFI:$fi); + dpp8:$dpp8, Dpp8FI:$fi); let HasExt = 1; let HasExtDPP = 1; @@ -645,11 +645,11 @@ class VOP2e_SGPR ArgVT> : VOPProfile { FPVRegInputMods:$src1_modifiers, Src1DPP:$src1, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl); - let InsDPP16 = !con(InsDPP, (ins DppFI:$fi)); + let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi)); let InsDPP8 = (ins DstRCDPP:$old, FPVRegInputMods:$src0_modifiers, Src0DPP:$src0, FPVRegInputMods:$src1_modifiers, Src1DPP:$src1, - dpp8:$dpp8, DppFI:$fi); + dpp8:$dpp8, Dpp8FI:$fi); let Src0ModVOP3DPP = FPVRegInputMods; let Src1ModVOP3DPP = FPVRegInputMods; diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index 396ae9c9d92eea..7198a4022dae87 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -532,11 +532,11 @@ def VOP3_CVT_PK_F8_F32_Profile : VOP3_Profile { FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1, VGPR_32:$vdst_in, op_sel0:$op_sel, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, - DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, DppFI:$fi); + DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi); let InsVOP3DPP8 = (ins VGPR_32:$old, FP32InputMods:$src0_modifiers, Src0VOP3DPP:$src0, FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1, - VGPR_32:$vdst_in, op_sel0:$op_sel, dpp8:$dpp8, DppFI:$fi); + VGPR_32:$vdst_in, op_sel0:$op_sel, dpp8:$dpp8, Dpp8FI:$fi); let HasClamp = 0; let HasExtVOP3DPP = 1; @@ -553,12 +553,12 @@ def VOP3_CVT_SR_F8_F32_Profile : VOP3_Profile, FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1, FP32InputMods:$src2_modifiers, VGPR_32:$src2, op_sel0:$op_sel, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, - DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, DppFI:$fi); + DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi); let InsVOP3DPP8 = (ins VGPR_32:$old, FP32InputMods:$src0_modifiers, Src0VOP3DPP:$src0, FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1, FP32InputMods:$src2_modifiers, VGPR_32:$src2, - op_sel0:$op_sel, dpp8:$dpp8, DppFI:$fi); + op_sel0:$op_sel, dpp8:$dpp8, Dpp8FI:$fi); let HasClamp = 0; let HasSrc2 = 0; let HasSrc2Mods = 1; diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td index 74f451b6d4f7fe..a0090f3e8d1db0 100644 --- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -461,13 +461,13 @@ def VOP3P_DOTF8_Profile : VOP3P_Profile, let InsVOP3DPP8 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1, PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2, - neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp8:$dpp8, DppFI:$fi); + neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp8:$dpp8, Dpp8FI:$fi); let InsVOP3DPP16 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1, PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2, neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, - DppBoundCtrl:$bound_ctrl, DppFI:$fi); + DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi); } multiclass VOP3PDOTF8Inst { diff --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td index fe52a0e39e4f1b..508f06c4739a50 100644 --- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td @@ -766,7 +766,7 @@ class VOPC_Class_Profile sched, ValueType src0VT, ValueType let AsmDPP = "$src0_modifiers, $src1 $dpp_ctrl$row_mask$bank_mask$bound_ctrl"; let AsmDPP16 = AsmDPP#"$fi"; let InsDPP = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0, Src1DPP:$src1, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl); - let InsDPP16 = !con(InsDPP, (ins DppFI:$fi)); + let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi)); // DPP8 forbids modifiers and can inherit from VOPC_Profile let Ins64 = (ins Src0Mod:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1); From 6193233540e55de61baeb80208b06c6808b14dbc Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Thu, 22 Feb 2024 13:01:37 +0300 Subject: [PATCH 12/19] [AArch64] Fix sched model for TSV110 core. (#82343) Accumulator operand of MADD instruction can be bypassed from another MUL-like operation. Before this fix bypassing was incorrectly applied to multiplier operand. Co-authored-by: Yury Gribov --- llvm/lib/Target/AArch64/AArch64SchedTSV110.td | 6 +- .../AArch64/HiSilicon/tsv110-forwarding.s | 83 +++++++++++++++++++ 2 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s diff --git a/llvm/lib/Target/AArch64/AArch64SchedTSV110.td b/llvm/lib/Target/AArch64/AArch64SchedTSV110.td index 0ae9a69fd48265..1c577a25bf7390 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedTSV110.td +++ b/llvm/lib/Target/AArch64/AArch64SchedTSV110.td @@ -419,10 +419,10 @@ def : InstRW<[TSV110Wr_12cyc_1MDU], (instregex "^(S|U)DIVWr$")>; def : InstRW<[TSV110Wr_20cyc_1MDU], (instregex "^(S|U)DIVXr$")>; def TSV110ReadMAW : SchedReadAdvance<2, [TSV110Wr_3cyc_1MDU]>; -def : InstRW<[TSV110Wr_3cyc_1MDU, TSV110ReadMAW], (instrs MADDWrrr, MSUBWrrr)>; +def : InstRW<[TSV110Wr_3cyc_1MDU, ReadIM, ReadIM, TSV110ReadMAW], (instrs MADDWrrr, MSUBWrrr)>; def TSV110ReadMAQ : SchedReadAdvance<3, [TSV110Wr_4cyc_1MDU]>; -def : InstRW<[TSV110Wr_4cyc_1MDU, TSV110ReadMAQ], (instrs MADDXrrr, MSUBXrrr)>; -def : InstRW<[TSV110Wr_3cyc_1MDU, TSV110ReadMAW], (instregex "(S|U)(MADDL|MSUBL)rrr")>; +def : InstRW<[TSV110Wr_4cyc_1MDU, ReadIM, ReadIM, TSV110ReadMAQ], (instrs MADDXrrr, MSUBXrrr)>; +def : InstRW<[TSV110Wr_3cyc_1MDU, ReadIM, ReadIM, TSV110ReadMAW], (instregex "(S|U)(MADDL|MSUBL)rrr")>; def : InstRW<[TSV110Wr_4cyc_1MDU], (instregex "^(S|U)MULHrr$")>; diff --git a/llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s b/llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s new file mode 100644 index 00000000000000..207822b618396e --- /dev/null +++ b/llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s @@ -0,0 +1,83 @@ +# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py +# RUN: llvm-mca -mtriple=aarch64 -mcpu=tsv110 --instruction-info=0 --resource-pressure=0 --timeline --iterations=1 < %s | FileCheck %s + +# LLVM-MCA-BEGIN madd nobypass +mul x0, x1, x2 +add x0, x0, x1 +add x0, x0, x1 +add x0, x0, x1 +# LLVM-MCA-END + +# LLVM-MCA-BEGIN madd bypass +mul x0, x1, x2 +madd x0, x1, x2, x0 +madd x0, x1, x2, x0 +madd x0, x0, x0, x0 +# LLVM-MCA-END + +# CHECK: [0] Code Region - madd nobypass + +# CHECK: Iterations: 1 +# CHECK-NEXT: Instructions: 4 +# CHECK-NEXT: Total Cycles: 10 +# CHECK-NEXT: Total uOps: 4 + +# CHECK: Dispatch Width: 4 +# CHECK-NEXT: uOps Per Cycle: 0.40 +# CHECK-NEXT: IPC: 0.40 +# CHECK-NEXT: Block RThroughput: 1.0 + +# CHECK: Timeline view: +# CHECK-NEXT: Index 0123456789 + +# CHECK: [0,0] DeeeeER . mul x0, x1, x2 +# CHECK-NEXT: [0,1] D====eER . add x0, x0, x1 +# CHECK-NEXT: [0,2] D=====eER. add x0, x0, x1 +# CHECK-NEXT: [0,3] D======eER add x0, x0, x1 + +# CHECK: Average Wait times (based on the timeline view): +# CHECK-NEXT: [0]: Executions +# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue +# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready +# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage + +# CHECK: [0] [1] [2] [3] +# CHECK-NEXT: 0. 1 1.0 1.0 0.0 mul x0, x1, x2 +# CHECK-NEXT: 1. 1 5.0 0.0 0.0 add x0, x0, x1 +# CHECK-NEXT: 2. 1 6.0 0.0 0.0 add x0, x0, x1 +# CHECK-NEXT: 3. 1 7.0 0.0 0.0 add x0, x0, x1 +# CHECK-NEXT: 1 4.8 0.3 0.0 + +# CHECK: [1] Code Region - madd bypass + +# CHECK: Iterations: 1 +# CHECK-NEXT: Instructions: 4 +# CHECK-NEXT: Total Cycles: 13 +# CHECK-NEXT: Total uOps: 4 + +# CHECK: Dispatch Width: 4 +# CHECK-NEXT: uOps Per Cycle: 0.31 +# CHECK-NEXT: IPC: 0.31 +# CHECK-NEXT: Block RThroughput: 4.0 + +# CHECK: Timeline view: +# CHECK-NEXT: 012 +# CHECK-NEXT: Index 0123456789 + +# CHECK: [0,0] DeeeeER . . mul x0, x1, x2 +# CHECK-NEXT: [0,1] D=eeeeER . . madd x0, x1, x2, x0 +# CHECK-NEXT: [0,2] D==eeeeER . . madd x0, x1, x2, x0 +# CHECK-NEXT: [0,3] D======eeeeER madd x0, x0, x0, x0 + +# CHECK: Average Wait times (based on the timeline view): +# CHECK-NEXT: [0]: Executions +# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue +# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready +# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage + +# CHECK: [0] [1] [2] [3] +# CHECK-NEXT: 0. 1 1.0 1.0 0.0 mul x0, x1, x2 +# CHECK-NEXT: 1. 1 2.0 0.0 0.0 madd x0, x1, x2, x0 +# CHECK-NEXT: 2. 1 3.0 0.0 0.0 madd x0, x1, x2, x0 +# CHECK-NEXT: 3. 1 7.0 0.0 0.0 madd x0, x0, x0, x0 +# CHECK-NEXT: 1 3.3 0.3 0.0 From 4a602d9250e1eb3c729d0421d11be2be8693cbf2 Mon Sep 17 00:00:00 2001 From: Vyacheslav Levytskyy <89994100+VyacheslavLevytskyy@users.noreply.github.com> Date: Thu, 22 Feb 2024 11:05:19 +0100 Subject: [PATCH 13/19] Add support for the SPV_INTEL_usm_storage_classes extension (#82247) Add support for the SPV_INTEL_usm_storage_classes extension: * https://github.com/intel/llvm/blob/sycl/sycl/doc/design/spirv-extensions/SPV_INTEL_usm_storage_classes.asciidoc --- llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp | 17 ++-- llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp | 5 +- llvm/lib/Target/SPIRV/SPIRVInstrInfo.td | 4 + .../Target/SPIRV/SPIRVInstructionSelector.cpp | 36 ++++++-- llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp | 16 ++-- llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp | 7 ++ llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp | 11 ++- llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp | 6 ++ .../lib/Target/SPIRV/SPIRVSymbolicOperands.td | 3 + llvm/lib/Target/SPIRV/SPIRVUtils.cpp | 19 ++++- llvm/lib/Target/SPIRV/SPIRVUtils.h | 3 +- .../intel-usm-addrspaces.ll | 84 +++++++++++++++++++ 12 files changed, 183 insertions(+), 28 deletions(-) create mode 100644 llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll diff --git a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp index cc438b2bb8d4d7..10569ef0468bda 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp @@ -150,7 +150,8 @@ getKernelArgTypeQual(const Function &F, unsigned ArgIdx) { static SPIRVType *getArgSPIRVType(const Function &F, unsigned ArgIdx, SPIRVGlobalRegistry *GR, - MachineIRBuilder &MIRBuilder) { + MachineIRBuilder &MIRBuilder, + const SPIRVSubtarget &ST) { // Read argument's access qualifier from metadata or default. SPIRV::AccessQualifier::AccessQualifier ArgAccessQual = getArgAccessQual(F, ArgIdx); @@ -169,8 +170,8 @@ static SPIRVType *getArgSPIRVType(const Function &F, unsigned ArgIdx, if (MDTypeStr.ends_with("*")) ResArgType = GR->getOrCreateSPIRVTypeByName( MDTypeStr, MIRBuilder, - addressSpaceToStorageClass( - OriginalArgType->getPointerAddressSpace())); + addressSpaceToStorageClass(OriginalArgType->getPointerAddressSpace(), + ST)); else if (MDTypeStr.ends_with("_t")) ResArgType = GR->getOrCreateSPIRVTypeByName( "opencl." + MDTypeStr.str(), MIRBuilder, @@ -206,6 +207,10 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, assert(GR && "Must initialize the SPIRV type registry before lowering args."); GR->setCurrentFunc(MIRBuilder.getMF()); + // Get access to information about available extensions + const SPIRVSubtarget *ST = + static_cast(&MIRBuilder.getMF().getSubtarget()); + // Assign types and names to all args, and store their types for later. FunctionType *FTy = getOriginalFunctionType(F); SmallVector ArgTypeVRegs; @@ -216,7 +221,7 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, // TODO: handle the case of multiple registers. if (VRegs[i].size() > 1) return false; - auto *SpirvTy = getArgSPIRVType(F, i, GR, MIRBuilder); + auto *SpirvTy = getArgSPIRVType(F, i, GR, MIRBuilder, *ST); GR->assignSPIRVTypeToVReg(SpirvTy, VRegs[i][0], MIRBuilder.getMF()); ArgTypeVRegs.push_back(SpirvTy); @@ -318,10 +323,6 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, if (F.hasName()) buildOpName(FuncVReg, F.getName(), MIRBuilder); - // Get access to information about available extensions - const auto *ST = - static_cast(&MIRBuilder.getMF().getSubtarget()); - // Handle entry points and function linkage. if (isEntryPoint(F)) { const auto &STI = MIRBuilder.getMF().getSubtarget(); diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp index 47fec745c3f18a..a1cb630f1aa477 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -709,7 +709,10 @@ SPIRVType *SPIRVGlobalRegistry::createSPIRVType( // TODO: change the implementation once opaque pointers are supported // in the SPIR-V specification. SpvElementType = getOrCreateSPIRVIntegerType(8, MIRBuilder); - auto SC = addressSpaceToStorageClass(PType->getAddressSpace()); + // Get access to information about available extensions + const SPIRVSubtarget *ST = + static_cast(&MIRBuilder.getMF().getSubtarget()); + auto SC = addressSpaceToStorageClass(PType->getAddressSpace(), *ST); // Null pointer means we have a loop in type definitions, make and // return corresponding OpTypeForwardPointer. if (SpvElementType == nullptr) { diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td index 86f65b6320d530..7c5252e8cb372b 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td +++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td @@ -430,6 +430,10 @@ def OpGenericCastToPtrExplicit : Op<123, (outs ID:$r), (ins TYPE:$t, ID:$p, Stor "$r = OpGenericCastToPtrExplicit $t $p $s">; def OpBitcast : UnOp<"OpBitcast", 124>; +// SPV_INTEL_usm_storage_classes +def OpPtrCastToCrossWorkgroupINTEL : UnOp<"OpPtrCastToCrossWorkgroupINTEL", 5934>; +def OpCrossWorkgroupCastToPtrINTEL : UnOp<"OpCrossWorkgroupCastToPtrINTEL", 5938>; + // 3.42.12 Composite Instructions def OpVectorExtractDynamic: Op<77, (outs ID:$res), (ins TYPE:$type, vID:$vec, ID:$idx), diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index 53d19a1e31382d..7258d3b4d88ed3 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -828,8 +828,18 @@ static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) { } } +static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) { + switch (SC) { + case SPIRV::StorageClass::DeviceOnlyINTEL: + case SPIRV::StorageClass::HostOnlyINTEL: + return true; + default: + return false; + } +} + // In SPIR-V address space casting can only happen to and from the Generic -// storage class. We can also only case Workgroup, CrossWorkgroup, or Function +// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function // pointers to and from Generic pointers. As such, we can convert e.g. from // Workgroup to Function by going via a Generic pointer as an intermediary. All // other combinations can only be done by a bitcast, and are probably not safe. @@ -862,13 +872,17 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg, SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr); SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg); - // Casting from an eligable pointer to Generic. + // don't generate a cast between identical storage classes + if (SrcSC == DstSC) + return true; + + // Casting from an eligible pointer to Generic. if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)) return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric); - // Casting from Generic to an eligable pointer. + // Casting from Generic to an eligible pointer. if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC)) return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr); - // Casting between 2 eligable pointers using Generic as an intermediary. + // Casting between 2 eligible pointers using Generic as an intermediary. if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) { Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass); SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType( @@ -886,6 +900,16 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg, .addUse(Tmp) .constrainAllUses(TII, TRI, RBI); } + + // Check if instructions from the SPV_INTEL_usm_storage_classes extension may + // be applied + if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup) + return selectUnOp(ResVReg, ResType, I, + SPIRV::OpPtrCastToCrossWorkgroupINTEL); + if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC)) + return selectUnOp(ResVReg, ResType, I, + SPIRV::OpCrossWorkgroupCastToPtrINTEL); + // TODO Should this case just be disallowed completely? // We're casting 2 other arbitrary address spaces, so have to bitcast. return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast); @@ -1545,7 +1569,7 @@ bool SPIRVInstructionSelector::selectGlobalValue( } SPIRVType *ResType = GR.getOrCreateSPIRVPointerType( PointerBaseType, I, TII, - addressSpaceToStorageClass(GV->getAddressSpace())); + addressSpaceToStorageClass(GV->getAddressSpace(), STI)); std::string GlobalIdent; if (!GV->hasName()) { @@ -1618,7 +1642,7 @@ bool SPIRVInstructionSelector::selectGlobalValue( unsigned AddrSpace = GV->getAddressSpace(); SPIRV::StorageClass::StorageClass Storage = - addressSpaceToStorageClass(AddrSpace); + addressSpaceToStorageClass(AddrSpace, STI); bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage && Storage != SPIRV::StorageClass::Function; SPIRV::LinkageType::LinkageType LnkType = diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp index 011a550a7b3d9b..4f2e7a240fc2cc 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp @@ -102,14 +102,16 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) { const LLT p2 = LLT::pointer(2, PSize); // UniformConstant const LLT p3 = LLT::pointer(3, PSize); // Workgroup const LLT p4 = LLT::pointer(4, PSize); // Generic - const LLT p5 = LLT::pointer(5, PSize); // Input + const LLT p5 = + LLT::pointer(5, PSize); // Input, SPV_INTEL_usm_storage_classes (Device) + const LLT p6 = LLT::pointer(6, PSize); // SPV_INTEL_usm_storage_classes (Host) // TODO: remove copy-pasting here by using concatenation in some way. auto allPtrsScalarsAndVectors = { - p0, p1, p2, p3, p4, p5, s1, s8, s16, - s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8, - v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, - v8s8, v8s16, v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64}; + p0, p1, p2, p3, p4, p5, p6, s1, s8, s16, + s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8, v3s16, + v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8, v8s16, + v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64}; auto allScalarsAndVectors = { s1, s8, s16, s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64, @@ -133,8 +135,8 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) { auto allFloatAndIntScalars = allIntScalars; - auto allPtrs = {p0, p1, p2, p3, p4, p5}; - auto allWritablePtrs = {p0, p1, p3, p4}; + auto allPtrs = {p0, p1, p2, p3, p4, p5, p6}; + auto allWritablePtrs = {p0, p1, p3, p4, p5, p6}; for (auto Opc : TypeFoldingSupportingOpcs) getActionDefinitionsBuilder(Opc).custom(); diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp index 9b9575b9879948..3be28c97d95381 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -1063,6 +1063,13 @@ void addInstrRequirements(const MachineInstr &MI, Reqs.addCapability(SPIRV::Capability::ExpectAssumeKHR); } break; + case SPIRV::OpPtrCastToCrossWorkgroupINTEL: + case SPIRV::OpCrossWorkgroupCastToPtrINTEL: + if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) { + Reqs.addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes); + Reqs.addCapability(SPIRV::Capability::USMStorageClassesINTEL); + } + break; case SPIRV::OpConstantFunctionPointerINTEL: if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) { Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers); diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp index cbc16fa986614e..144216896eb68c 100644 --- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp @@ -122,6 +122,9 @@ static void foldConstantsIntoIntrinsics(MachineFunction &MF) { static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB) { + // Get access to information about available extensions + const SPIRVSubtarget *ST = + static_cast(&MIB.getMF().getSubtarget()); SmallVector ToErase; for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { @@ -141,7 +144,7 @@ static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR, getMDOperandAsType(MI.getOperand(3).getMetadata(), 0), MIB); SPIRVType *AssignedPtrType = GR->getOrCreateSPIRVPointerType( BaseTy, MI, *MF.getSubtarget().getInstrInfo(), - addressSpaceToStorageClass(MI.getOperand(4).getImm())); + addressSpaceToStorageClass(MI.getOperand(4).getImm(), *ST)); // If the bitcast would be redundant, replace all uses with the source // register. @@ -250,6 +253,10 @@ Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB) { + // Get access to information about available extensions + const SPIRVSubtarget *ST = + static_cast(&MIB.getMF().getSubtarget()); + MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector ToErase; @@ -269,7 +276,7 @@ static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR, getMDOperandAsType(MI.getOperand(2).getMetadata(), 0), MIB); SPIRVType *AssignedPtrType = GR->getOrCreateSPIRVPointerType( BaseTy, MI, *MF.getSubtarget().getInstrInfo(), - addressSpaceToStorageClass(MI.getOperand(3).getImm())); + addressSpaceToStorageClass(MI.getOperand(3).getImm(), *ST)); MachineInstr *Def = MRI.getVRegDef(Reg); assert(Def && "Expecting an instruction that defines the register"); insertAssignInstr(Reg, nullptr, AssignedPtrType, GR, MIB, diff --git a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp index 4694363614ef60..79f16146ccd944 100644 --- a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp @@ -49,6 +49,12 @@ cl::list Extensions( clEnumValN(SPIRV::Extension::SPV_INTEL_optnone, "SPV_INTEL_optnone", "Adds OptNoneINTEL value for Function Control mask that " "indicates a request to not optimize the function."), + clEnumValN(SPIRV::Extension::SPV_INTEL_usm_storage_classes, + "SPV_INTEL_usm_storage_classes", + "Introduces two new storage classes that are sub classes of " + "the CrossWorkgroup storage class " + "that provides additional information that can enable " + "optimization."), clEnumValN(SPIRV::Extension::SPV_INTEL_subgroups, "SPV_INTEL_subgroups", "Allows work items in a subgroup to share data without the " "use of local memory and work group barriers, and to " diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td index 6c36087baa85ed..b022b97408d7d4 100644 --- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td +++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td @@ -463,6 +463,7 @@ defm AtomicFloat16MinMaxEXT : CapabilityOperand<5616, 0, 0, [SPV_EXT_shader_atom defm AtomicFloat32MinMaxEXT : CapabilityOperand<5612, 0, 0, [SPV_EXT_shader_atomic_float_min_max], []>; defm AtomicFloat64MinMaxEXT : CapabilityOperand<5613, 0, 0, [SPV_EXT_shader_atomic_float_min_max], []>; defm GroupUniformArithmeticKHR : CapabilityOperand<6400, 0, 0, [SPV_KHR_uniform_group_instructions], []>; +defm USMStorageClassesINTEL : CapabilityOperand<5935, 0, 0, [SPV_INTEL_usm_storage_classes], [Kernel]>; //===----------------------------------------------------------------------===// // Multiclass used to define SourceLanguage enum values and at the same time @@ -700,6 +701,8 @@ defm IncomingRayPayloadNV : StorageClassOperand<5342, [RayTracingNV]>; defm ShaderRecordBufferNV : StorageClassOperand<5343, [RayTracingNV]>; defm PhysicalStorageBufferEXT : StorageClassOperand<5349, [PhysicalStorageBufferAddressesEXT]>; defm CodeSectionINTEL : StorageClassOperand<5605, [FunctionPointersINTEL]>; +defm DeviceOnlyINTEL : StorageClassOperand<5936, [USMStorageClassesINTEL]>; +defm HostOnlyINTEL : StorageClassOperand<5937, [USMStorageClassesINTEL]>; //===----------------------------------------------------------------------===// // Multiclass used to define Dim enum values and at the same time diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp index 05f766d3ec5483..169d7cc93897ed 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp @@ -14,6 +14,7 @@ #include "MCTargetDesc/SPIRVBaseInfo.h" #include "SPIRV.h" #include "SPIRVInstrInfo.h" +#include "SPIRVSubtarget.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" @@ -146,15 +147,19 @@ unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC) { return 3; case SPIRV::StorageClass::Generic: return 4; + case SPIRV::StorageClass::DeviceOnlyINTEL: + return 5; + case SPIRV::StorageClass::HostOnlyINTEL: + return 6; case SPIRV::StorageClass::Input: return 7; default: - llvm_unreachable("Unable to get address space id"); + report_fatal_error("Unable to get address space id"); } } SPIRV::StorageClass::StorageClass -addressSpaceToStorageClass(unsigned AddrSpace) { +addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI) { switch (AddrSpace) { case 0: return SPIRV::StorageClass::Function; @@ -166,10 +171,18 @@ addressSpaceToStorageClass(unsigned AddrSpace) { return SPIRV::StorageClass::Workgroup; case 4: return SPIRV::StorageClass::Generic; + case 5: + return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes) + ? SPIRV::StorageClass::DeviceOnlyINTEL + : SPIRV::StorageClass::CrossWorkgroup; + case 6: + return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes) + ? SPIRV::StorageClass::HostOnlyINTEL + : SPIRV::StorageClass::CrossWorkgroup; case 7: return SPIRV::StorageClass::Input; default: - llvm_unreachable("Unknown address space"); + report_fatal_error("Unknown address space"); } } diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h index a33dc02f854f58..1af53dcd0c4cd1 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.h +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h @@ -27,6 +27,7 @@ class MachineRegisterInfo; class Register; class StringRef; class SPIRVInstrInfo; +class SPIRVSubtarget; // Add the given string as a series of integer operand, inserting null // terminators and padding to make sure the operands all have 32-bit @@ -62,7 +63,7 @@ unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC); // Convert an LLVM IR address space to a SPIR-V storage class. SPIRV::StorageClass::StorageClass -addressSpaceToStorageClass(unsigned AddrSpace); +addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI); SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC); diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll new file mode 100644 index 00000000000000..30c16350bf2b1f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll @@ -0,0 +1,84 @@ +; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_usm_storage_classes/intel_usm_addrspaces.ll + +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT +; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %} +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-WITHOUT +; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-: Capability USMStorageClassesINTEL +; CHECK-SPIRV-WITHOUT-NO: Capability USMStorageClassesINTEL +; CHECK-SPIRV-EXT-DAG: %[[DevTy:[0-9]+]] = OpTypePointer DeviceOnlyINTEL %[[#]] +; CHECK-SPIRV-EXT-DAG: %[[HostTy:[0-9]+]] = OpTypePointer HostOnlyINTEL %[[#]] +; CHECK-SPIRV-DAG: %[[CrsWrkTy:[0-9]+]] = OpTypePointer CrossWorkgroup %[[#]] + +target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" +target triple = "spir64-unknown-unknown" + +define spir_kernel void @foo_kernel() { +entry: + ret void +} + +; CHECK-SPIRV: %[[Ptr1:[0-9]+]] = OpLoad %[[CrsWrkTy]] %[[#]] +; CHECK-SPIRV-EXT: %[[CastedPtr1:[0-9]+]] = OpCrossWorkgroupCastToPtrINTEL %[[DevTy]] %[[Ptr1]] +; CHECK-SPIRV-WITHOUT-NOT: OpCrossWorkgroupCastToPtrINTEL +; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr1]] +define spir_func void @test1(ptr addrspace(1) %arg_glob, ptr addrspace(5) %arg_dev) { +entry: + %arg_glob.addr = alloca ptr addrspace(1), align 4 + %arg_dev.addr = alloca ptr addrspace(5), align 4 + store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4 + store ptr addrspace(5) %arg_dev, ptr %arg_dev.addr, align 4 + %loaded_glob = load ptr addrspace(1), ptr %arg_glob.addr, align 4 + %casted_ptr = addrspacecast ptr addrspace(1) %loaded_glob to ptr addrspace(5) + store ptr addrspace(5) %casted_ptr, ptr %arg_dev.addr, align 4 + ret void +} + +; CHECK-SPIRV: %[[Ptr2:[0-9]+]] = OpLoad %[[CrsWrkTy]] %[[#]] +; CHECK-SPIRV-EXT: %[[CastedPtr2:[0-9]+]] = OpCrossWorkgroupCastToPtrINTEL %[[HostTy]] %[[Ptr2]] +; CHECK-SPIRV-WITHOUT-NOT: OpCrossWorkgroupCastToPtrINTEL +; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr2]] +define spir_func void @test2(ptr addrspace(1) %arg_glob, ptr addrspace(6) %arg_host) { +entry: + %arg_glob.addr = alloca ptr addrspace(1), align 4 + %arg_host.addr = alloca ptr addrspace(6), align 4 + store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4 + store ptr addrspace(6) %arg_host, ptr %arg_host.addr, align 4 + %loaded_glob = load ptr addrspace(1), ptr %arg_glob.addr, align 4 + %casted_ptr = addrspacecast ptr addrspace(1) %loaded_glob to ptr addrspace(6) + store ptr addrspace(6) %casted_ptr, ptr %arg_host.addr, align 4 + ret void +} + +; CHECK-SPIRV-EXT: %[[Ptr3:[0-9]+]] = OpLoad %[[DevTy]] %[[#]] +; CHECK-SPIRV-EXT: %[[CastedPtr3:[0-9]+]] = OpPtrCastToCrossWorkgroupINTEL %[[CrsWrkTy]] %[[Ptr3]] +; CHECK-SPIRV-WITHOUT-NOT: OpPtrCastToCrossWorkgroupINTEL +; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr3]] +define spir_func void @test3(ptr addrspace(1) %arg_glob, ptr addrspace(5) %arg_dev) { +entry: + %arg_glob.addr = alloca ptr addrspace(1), align 4 + %arg_dev.addr = alloca ptr addrspace(5), align 4 + store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4 + store ptr addrspace(5) %arg_dev, ptr %arg_dev.addr, align 4 + %loaded_dev = load ptr addrspace(5), ptr %arg_dev.addr, align 4 + %casted_ptr = addrspacecast ptr addrspace(5) %loaded_dev to ptr addrspace(1) + store ptr addrspace(1) %casted_ptr, ptr %arg_glob.addr, align 4 + ret void +} + +; CHECK-SPIRV-EXT: %[[Ptr4:[0-9]+]] = OpLoad %[[HostTy]] %[[#]] +; CHECK-SPIRV-EXT: %[[CastedPtr4:[0-9]+]] = OpPtrCastToCrossWorkgroupINTEL %[[CrsWrkTy]] %[[Ptr4]] +; CHECK-SPIRV-WITHOUT-NOT: OpPtrCastToCrossWorkgroupINTEL +; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr4]] +define spir_func void @test4(ptr addrspace(1) %arg_glob, ptr addrspace(6) %arg_host) { +entry: + %arg_glob.addr = alloca ptr addrspace(1), align 4 + %arg_host.addr = alloca ptr addrspace(6), align 4 + store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4 + store ptr addrspace(6) %arg_host, ptr %arg_host.addr, align 4 + %loaded_host = load ptr addrspace(6), ptr %arg_host.addr, align 4 + %casted_ptr = addrspacecast ptr addrspace(6) %loaded_host to ptr addrspace(1) + store ptr addrspace(1) %casted_ptr, ptr %arg_glob.addr, align 4 + ret void +} From f01719afaae9a208ac272d99760d18e4c16d9241 Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Thu, 22 Feb 2024 10:21:12 +0000 Subject: [PATCH 14/19] [mlir][test] Add integration tests for vector.interleave (#80969) --- .../CPU/ArmSVE/test-scalable-interleave.mlir | 24 +++++++++++++++++++ .../Dialect/Vector/CPU/test-interleave.mlir | 24 +++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir new file mode 100644 index 00000000000000..8ae3eee6462ca7 --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir @@ -0,0 +1,24 @@ +// RUN: mlir-opt %s -test-lower-to-llvm | \ +// RUN: %mcr_aarch64_cmd -e entry -entry-point-result=void \ +// RUN: -shared-libs=%mlir_c_runner_utils,%mlir_arm_runner_utils | \ +// RUN: FileCheck %s + +func.func @entry() { + %f1 = arith.constant 1.0 : f32 + %f2 = arith.constant 2.0 : f32 + %v1 = vector.splat %f1 : vector<[4]xf32> + %v2 = vector.splat %f2 : vector<[4]xf32> + vector.print %v1 : vector<[4]xf32> + vector.print %v2 : vector<[4]xf32> + // + // Test vectors: + // + // CHECK: ( 1, 1, 1, 1 + // CHECK: ( 2, 2, 2, 2 + + %v3 = vector.interleave %v1, %v2 : vector<[4]xf32> + vector.print %v3 : vector<[8]xf32> + // CHECK: ( 1, 2, 1, 2, 1, 2, 1, 2 + + return +} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir new file mode 100644 index 00000000000000..0bc78af6aba031 --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir @@ -0,0 +1,24 @@ +// RUN: mlir-opt %s -test-lower-to-llvm | \ +// RUN: mlir-cpu-runner -e entry -entry-point-result=void \ +// RUN: -shared-libs=%mlir_c_runner_utils | \ +// RUN: FileCheck %s + +func.func @entry() { + %f1 = arith.constant 1.0 : f32 + %f2 = arith.constant 2.0 : f32 + %v1 = vector.splat %f1 : vector<2x4xf32> + %v2 = vector.splat %f2 : vector<2x4xf32> + vector.print %v1 : vector<2x4xf32> + vector.print %v2 : vector<2x4xf32> + // + // Test vectors: + // + // CHECK: ( ( 1, 1, 1, 1 ), ( 1, 1, 1, 1 ) ) + // CHECK: ( ( 2, 2, 2, 2 ), ( 2, 2, 2, 2 ) ) + + %v3 = vector.interleave %v1, %v2 : vector<2x4xf32> + vector.print %v3 : vector<2x8xf32> + // CHECK: ( ( 1, 2, 1, 2, 1, 2, 1, 2 ), ( 1, 2, 1, 2, 1, 2, 1, 2 ) ) + + return +} From e4d4ebe0415b9f1fd8cb034ac68f0616f12facf2 Mon Sep 17 00:00:00 2001 From: David Spickett Date: Thu, 22 Feb 2024 10:22:07 +0000 Subject: [PATCH 15/19] [llvm][llvm-jitlink] Disable test on Windows on Arm This fails on one of our bots: https://lab.llvm.org/buildbot/#/builders/120/builds/6309 llvm-jitlink error: Unsupported target machine architecture in COFF object The other bot doesn't run the test at all it seems but I can't explain why. It's also possible that I'm mistaken and the mostly native but still "cross compiling" setup we have on WoA means an x86 object is produced sometimes (perhaps because a default triple is still x86). --- llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test b/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test index 33ad5515a6357a..ec71011d545ebd 100644 --- a/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test +++ b/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test @@ -5,4 +5,7 @@ # # Use -sectcreate to create a section from a data file. +# Jitlink does not support ARM64 COFF files. +# UNSUPPORTED: target=aarch64-pc-windows-{{.*}} + # jitlink-check: *{4}foo = 0x2a2a5a5a \ No newline at end of file From b9ce237980b5a636e87e3578609c812833f7537f Mon Sep 17 00:00:00 2001 From: Jay Foad Date: Thu, 22 Feb 2024 10:39:43 +0000 Subject: [PATCH 16/19] [AMDGPU] Clean up conversion of DPP instructions in AMDGPUDisassembler (#82480) Convert DPP instructions after all calls to tryDecodeInst, just like we do for all other instruction types. NFCI. --- .../Disassembler/AMDGPUDisassembler.cpp | 127 ++++++++---------- 1 file changed, 53 insertions(+), 74 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index 53abb3e3f9aea8..c5d06dea92c30a 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -465,36 +465,25 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, Res = tryDecodeInst(DecoderTableDPP8GFX1196, DecoderTableDPP8GFX11_FAKE1696, MI, DecW, Address, CS); - if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) + if (Res) break; + Res = tryDecodeInst(DecoderTableDPP8GFX1296, DecoderTableDPP8GFX12_FAKE1696, MI, DecW, Address, CS); - if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) + if (Res) break; - const auto convertVOPDPP = [&]() { - if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P) { - convertVOP3PDPPInst(MI); - } else if (AMDGPU::isVOPC64DPP(MI.getOpcode())) { - convertVOPCDPPInst(MI); // Special VOP3 case - } else { - assert(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3); - convertVOP3DPPInst(MI); // Regular VOP3 case - } - }; Res = tryDecodeInst(DecoderTableDPPGFX1196, DecoderTableDPPGFX11_FAKE1696, MI, DecW, Address, CS); - if (Res) { - convertVOPDPP(); + if (Res) break; - } + Res = tryDecodeInst(DecoderTableDPPGFX1296, DecoderTableDPPGFX12_FAKE1696, MI, DecW, Address, CS); - if (Res) { - convertVOPDPP(); + if (Res) break; - } + Res = tryDecodeInst(DecoderTableGFX1196, MI, DecW, Address, CS); if (Res) break; @@ -515,27 +504,22 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding)) { Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS); - if (Res) { - if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) - == -1) - break; - if (convertDPP8Inst(MI) == MCDisassembler::Success) - break; - } + if (Res) + break; } Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address, CS); - if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) + if (Res) break; Res = tryDecodeInst(DecoderTableDPP8GFX1164, DecoderTableDPP8GFX11_FAKE1664, MI, QW, Address, CS); - if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) + if (Res) break; Res = tryDecodeInst(DecoderTableDPP8GFX1264, DecoderTableDPP8GFX12_FAKE1664, MI, QW, Address, CS); - if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) + if (Res) break; Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address, CS); @@ -543,19 +527,13 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, Res = tryDecodeInst(DecoderTableDPPGFX1164, DecoderTableDPPGFX11_FAKE1664, MI, QW, Address, CS); - if (Res) { - if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC) - convertVOPCDPPInst(MI); + if (Res) break; - } Res = tryDecodeInst(DecoderTableDPPGFX1264, DecoderTableDPPGFX12_FAKE1664, MI, QW, Address, CS); - if (Res) { - if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC) - convertVOPCDPPInst(MI); + if (Res) break; - } if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem)) { Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS); @@ -652,6 +630,22 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, Address, CS); } while (false); + if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP)) { + if (isMacDPP(MI)) + convertMacDPPInst(MI); + + if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P) + convertVOP3PDPPInst(MI); + else if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC) || + AMDGPU::isVOPC64DPP(MI.getOpcode())) + convertVOPCDPPInst(MI); // Special VOP3 case + else if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) != + -1) + convertDPP8Inst(MI); + else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3) + convertVOP3DPPInst(MI); // Regular VOP3 case + } + if (Res && AMDGPU::isMAC(MI.getOpcode())) { // Insert dummy unused src2_modifiers. insertNamedMCOperand(MI, MCOperand::createImm(0), @@ -926,56 +920,41 @@ void AMDGPUDisassembler::convertMacDPPInst(MCInst &MI) const { AMDGPU::OpName::src2_modifiers); } -// We must check FI == literal to reject not genuine dpp8 insts, and we must -// first add optional MI operands to check FI DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const { unsigned Opc = MI.getOpcode(); - if (MCII->get(Opc).TSFlags & SIInstrFlags::VOP3P) { - convertVOP3PDPPInst(MI); - } else if ((MCII->get(Opc).TSFlags & SIInstrFlags::VOPC) || - AMDGPU::isVOPC64DPP(Opc)) { - convertVOPCDPPInst(MI); - } else { - if (isMacDPP(MI)) - convertMacDPPInst(MI); + int VDstInIdx = + AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in); + if (VDstInIdx != -1) + insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in); - int VDstInIdx = - AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in); - if (VDstInIdx != -1) - insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in); + if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp8_gfx12 || + MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp8_gfx12) + insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::src2); - if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp8_gfx12 || - MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp8_gfx12) - insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::src2); + unsigned DescNumOps = MCII->get(Opc).getNumOperands(); + if (MI.getNumOperands() < DescNumOps && + AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) { + convertTrue16OpSel(MI); + auto Mods = collectVOPModifiers(MI); + insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel), + AMDGPU::OpName::op_sel); + } else { + // Insert dummy unused src modifiers. + if (MI.getNumOperands() < DescNumOps && + AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers)) + insertNamedMCOperand(MI, MCOperand::createImm(0), + AMDGPU::OpName::src0_modifiers); - unsigned DescNumOps = MCII->get(Opc).getNumOperands(); if (MI.getNumOperands() < DescNumOps && - AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) { - convertTrue16OpSel(MI); - auto Mods = collectVOPModifiers(MI); - insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel), - AMDGPU::OpName::op_sel); - } else { - // Insert dummy unused src modifiers. - if (MI.getNumOperands() < DescNumOps && - AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers)) - insertNamedMCOperand(MI, MCOperand::createImm(0), - AMDGPU::OpName::src0_modifiers); - - if (MI.getNumOperands() < DescNumOps && - AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers)) - insertNamedMCOperand(MI, MCOperand::createImm(0), - AMDGPU::OpName::src1_modifiers); - } + AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers)) + insertNamedMCOperand(MI, MCOperand::createImm(0), + AMDGPU::OpName::src1_modifiers); } return MCDisassembler::Success; } DecodeStatus AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const { - if (isMacDPP(MI)) - convertMacDPPInst(MI); - convertTrue16OpSel(MI); int VDstInIdx = From 4f12f47550eee85447c9ec37d27a20c6593d3d40 Mon Sep 17 00:00:00 2001 From: Harald van Dijk Date: Thu, 22 Feb 2024 10:45:27 +0000 Subject: [PATCH 17/19] [AArch64] Switch to soft promoting half types. (#80576) The traditional promotion is known to generate wrong code. Like #80440 for ARM, except that far less is affected as on AArch64, hardware floating point support always includes FP16 support and is unaffected by these changes. This only affects `-mgeneral-regs-only` (Clang) / `-mattr=-fp-armv8` (LLVM). Because this only affects a configuration where no FP support is available at all, `useFPRegsForHalfType()` has no effect and is not specified: `f32` was getting legalized as a parameter and return type to an integer anyway. --- llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 + .../AArch64/strictfp_f16_abi_promote.ll | 140 +++--------------- 2 files changed, 26 insertions(+), 116 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 436b21fd134632..bec13484450d78 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -1308,6 +1308,8 @@ class AArch64TargetLowering : public TargetLowering { bool preferScalarizeSplat(SDNode *N) const override; unsigned getMinimumJumpTableEntries() const override; + + bool softPromoteHalfType() const override { return true; } }; namespace AArch64 { diff --git a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll index 37186cf22ccc71..a34f7abcc22a3f 100644 --- a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll +++ b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll @@ -70,22 +70,20 @@ define void @v3f16_arg(<3 x half> %arg, ptr %ptr) #0 { ; NOFP16-NEXT: .cfi_offset w22, -32 ; NOFP16-NEXT: .cfi_offset w30, -48 ; NOFP16-NEXT: mov w21, w0 -; NOFP16-NEXT: and w0, w2, #0xffff +; NOFP16-NEXT: and w0, w1, #0xffff ; NOFP16-NEXT: mov x19, x3 -; NOFP16-NEXT: mov w20, w1 +; NOFP16-NEXT: mov w20, w2 ; NOFP16-NEXT: bl __gnu_h2f_ieee ; NOFP16-NEXT: mov w22, w0 ; NOFP16-NEXT: and w0, w21, #0xffff ; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w21, w0 +; NOFP16-NEXT: mov w8, w0 ; NOFP16-NEXT: and w0, w20, #0xffff +; NOFP16-NEXT: orr x21, x8, x22, lsl #32 ; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w8, w21 -; NOFP16-NEXT: // kill: def $w0 killed $w0 def $x0 -; NOFP16-NEXT: str w22, [x19, #8] -; NOFP16-NEXT: orr x8, x8, x0, lsl #32 +; NOFP16-NEXT: str x21, [x19] ; NOFP16-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload -; NOFP16-NEXT: str x8, [x19] +; NOFP16-NEXT: str w0, [x19, #8] ; NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload ; NOFP16-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload ; NOFP16-NEXT: ret @@ -182,46 +180,17 @@ define void @v4f16_arg(<4 x half> %arg, ptr %ptr) #0 { define void @outgoing_v4f16_return(ptr %ptr) #0 { ; NOFP16-LABEL: outgoing_v4f16_return: ; NOFP16: // %bb.0: -; NOFP16-NEXT: stp x30, x23, [sp, #-48]! // 16-byte Folded Spill -; NOFP16-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill -; NOFP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; NOFP16-NEXT: .cfi_def_cfa_offset 48 +; NOFP16-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill +; NOFP16-NEXT: .cfi_def_cfa_offset 16 ; NOFP16-NEXT: .cfi_offset w19, -8 -; NOFP16-NEXT: .cfi_offset w20, -16 -; NOFP16-NEXT: .cfi_offset w21, -24 -; NOFP16-NEXT: .cfi_offset w22, -32 -; NOFP16-NEXT: .cfi_offset w23, -40 -; NOFP16-NEXT: .cfi_offset w30, -48 +; NOFP16-NEXT: .cfi_offset w30, -16 ; NOFP16-NEXT: mov x19, x0 ; NOFP16-NEXT: bl v4f16_result -; NOFP16-NEXT: and w0, w0, #0xffff -; NOFP16-NEXT: mov w20, w1 -; NOFP16-NEXT: mov w21, w2 -; NOFP16-NEXT: mov w22, w3 -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w23, w0 -; NOFP16-NEXT: and w0, w20, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w20, w0 -; NOFP16-NEXT: and w0, w21, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w21, w0 -; NOFP16-NEXT: and w0, w22, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #6] -; NOFP16-NEXT: mov w0, w21 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #4] -; NOFP16-NEXT: mov w0, w20 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #2] -; NOFP16-NEXT: mov w0, w23 -; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: strh w2, [x19, #4] +; NOFP16-NEXT: strh w3, [x19, #6] +; NOFP16-NEXT: strh w1, [x19, #2] ; NOFP16-NEXT: strh w0, [x19] -; NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload -; NOFP16-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload -; NOFP16-NEXT: ldp x30, x23, [sp], #48 // 16-byte Folded Reload +; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload ; NOFP16-NEXT: ret %val = call <4 x half> @v4f16_result() store <4 x half> %val, ptr %ptr @@ -231,82 +200,21 @@ define void @outgoing_v4f16_return(ptr %ptr) #0 { define void @outgoing_v8f16_return(ptr %ptr) #0 { ; NOFP16-LABEL: outgoing_v8f16_return: ; NOFP16: // %bb.0: -; NOFP16-NEXT: stp x30, x27, [sp, #-80]! // 16-byte Folded Spill -; NOFP16-NEXT: stp x26, x25, [sp, #16] // 16-byte Folded Spill -; NOFP16-NEXT: stp x24, x23, [sp, #32] // 16-byte Folded Spill -; NOFP16-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill -; NOFP16-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; NOFP16-NEXT: .cfi_def_cfa_offset 80 +; NOFP16-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill +; NOFP16-NEXT: .cfi_def_cfa_offset 16 ; NOFP16-NEXT: .cfi_offset w19, -8 -; NOFP16-NEXT: .cfi_offset w20, -16 -; NOFP16-NEXT: .cfi_offset w21, -24 -; NOFP16-NEXT: .cfi_offset w22, -32 -; NOFP16-NEXT: .cfi_offset w23, -40 -; NOFP16-NEXT: .cfi_offset w24, -48 -; NOFP16-NEXT: .cfi_offset w25, -56 -; NOFP16-NEXT: .cfi_offset w26, -64 -; NOFP16-NEXT: .cfi_offset w27, -72 -; NOFP16-NEXT: .cfi_offset w30, -80 +; NOFP16-NEXT: .cfi_offset w30, -16 ; NOFP16-NEXT: mov x19, x0 ; NOFP16-NEXT: bl v8f16_result -; NOFP16-NEXT: and w0, w0, #0xffff -; NOFP16-NEXT: mov w21, w1 -; NOFP16-NEXT: mov w22, w2 -; NOFP16-NEXT: mov w23, w3 -; NOFP16-NEXT: mov w24, w4 -; NOFP16-NEXT: mov w25, w5 -; NOFP16-NEXT: mov w26, w6 -; NOFP16-NEXT: mov w27, w7 -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w20, w0 -; NOFP16-NEXT: and w0, w21, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w21, w0 -; NOFP16-NEXT: and w0, w22, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w22, w0 -; NOFP16-NEXT: and w0, w23, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w23, w0 -; NOFP16-NEXT: and w0, w24, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w24, w0 -; NOFP16-NEXT: and w0, w25, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w25, w0 -; NOFP16-NEXT: and w0, w26, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: mov w26, w0 -; NOFP16-NEXT: and w0, w27, #0xffff -; NOFP16-NEXT: bl __gnu_h2f_ieee -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #14] -; NOFP16-NEXT: mov w0, w26 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #12] -; NOFP16-NEXT: mov w0, w25 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #10] -; NOFP16-NEXT: mov w0, w24 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #8] -; NOFP16-NEXT: mov w0, w23 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #6] -; NOFP16-NEXT: mov w0, w22 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #4] -; NOFP16-NEXT: mov w0, w21 -; NOFP16-NEXT: bl __gnu_f2h_ieee -; NOFP16-NEXT: strh w0, [x19, #2] -; NOFP16-NEXT: mov w0, w20 -; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: strh w5, [x19, #10] +; NOFP16-NEXT: strh w7, [x19, #14] +; NOFP16-NEXT: strh w6, [x19, #12] +; NOFP16-NEXT: strh w4, [x19, #8] +; NOFP16-NEXT: strh w3, [x19, #6] +; NOFP16-NEXT: strh w2, [x19, #4] +; NOFP16-NEXT: strh w1, [x19, #2] ; NOFP16-NEXT: strh w0, [x19] -; NOFP16-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload -; NOFP16-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload -; NOFP16-NEXT: ldp x24, x23, [sp, #32] // 16-byte Folded Reload -; NOFP16-NEXT: ldp x26, x25, [sp, #16] // 16-byte Folded Reload -; NOFP16-NEXT: ldp x30, x27, [sp], #80 // 16-byte Folded Reload +; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload ; NOFP16-NEXT: ret %val = call <8 x half> @v8f16_result() store <8 x half> %val, ptr %ptr From 3b7d43301e3662da4197cef7948c18fab850d9c4 Mon Sep 17 00:00:00 2001 From: Jay Foad Date: Thu, 22 Feb 2024 11:18:18 +0000 Subject: [PATCH 18/19] [AMDGPU] Remove DPP DecoderNamespaces. NFC. (#82491) Now that there is no special checking for valid DPP encodings, these instructions can use the same DecoderNamespace as other 64- or 96-bit instructions. Also clean up setting DecoderNamespace: in most cases it should be set as a pair with AssemblerPredicate. --- .../Disassembler/AMDGPUDisassembler.cpp | 57 +- llvm/lib/Target/AMDGPU/VOP1Instructions.td | 75 ++- llvm/lib/Target/AMDGPU/VOP2Instructions.td | 36 +- llvm/lib/Target/AMDGPU/VOP3PInstructions.td | 6 +- llvm/lib/Target/AMDGPU/VOPCInstructions.td | 498 ++++++++---------- llvm/lib/Target/AMDGPU/VOPInstructions.td | 16 +- 6 files changed, 288 insertions(+), 400 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index c5d06dea92c30a..70e2275c58745e 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -462,33 +462,13 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, // encodings if (isGFX11Plus() && Bytes.size() >= 12 ) { DecoderUInt128 DecW = eat12Bytes(Bytes); - Res = - tryDecodeInst(DecoderTableDPP8GFX1196, DecoderTableDPP8GFX11_FAKE1696, - MI, DecW, Address, CS); + Res = tryDecodeInst(DecoderTableGFX1196, DecoderTableGFX11_FAKE1696, MI, + DecW, Address, CS); if (Res) break; - Res = - tryDecodeInst(DecoderTableDPP8GFX1296, DecoderTableDPP8GFX12_FAKE1696, - MI, DecW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableDPPGFX1196, DecoderTableDPPGFX11_FAKE1696, - MI, DecW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableDPPGFX1296, DecoderTableDPPGFX12_FAKE1696, - MI, DecW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableGFX1196, MI, DecW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableGFX1296, MI, DecW, Address, CS); + Res = tryDecodeInst(DecoderTableGFX1296, DecoderTableGFX12_FAKE1696, MI, + DecW, Address, CS); if (Res) break; @@ -508,33 +488,6 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, break; } - Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableDPP8GFX1164, - DecoderTableDPP8GFX11_FAKE1664, MI, QW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableDPP8GFX1264, - DecoderTableDPP8GFX12_FAKE1664, MI, QW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address, CS); - if (Res) break; - - Res = tryDecodeInst(DecoderTableDPPGFX1164, DecoderTableDPPGFX11_FAKE1664, - MI, QW, Address, CS); - if (Res) - break; - - Res = tryDecodeInst(DecoderTableDPPGFX1264, DecoderTableDPPGFX12_FAKE1664, - MI, QW, Address, CS); - if (Res) - break; - if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem)) { Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS); if (Res) @@ -593,7 +546,7 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, break; } - // Reinitialize Bytes as DPP64 could have eaten too much + // Reinitialize Bytes Bytes = Bytes_.slice(0, MaxInstBytesNum); // Try decode 32-bit instruction diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td index 576ad32a70cf36..f5424cf48d7a53 100644 --- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td @@ -749,7 +749,7 @@ class VOP1_DPP16 op, VOP1_DPP_Pseudo ps, int subtarget, VOPProfile p = p class VOP1_DPP16_Gen op, VOP1_DPP_Pseudo ps, GFXGen Gen, VOPProfile p = ps.Pfl> : VOP1_DPP16 { let AssemblerPredicate = Gen.AssemblerPredicate; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; } class VOP1_DPP8 op, VOP1_Pseudo ps, VOPProfile p = ps.Pfl> : @@ -770,7 +770,7 @@ class VOP1_DPP8 op, VOP1_Pseudo ps, VOPProfile p = ps.Pfl> : class VOP1_DPP8_Gen op, VOP1_Pseudo ps, GFXGen Gen, VOPProfile p = ps.Pfl> : VOP1_DPP8 { let AssemblerPredicate = Gen.AssemblerPredicate; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; } //===----------------------------------------------------------------------===// @@ -816,7 +816,7 @@ multiclass VOP1_Real_dpp_with_name op, string opName, string asmName> { defvar ps = !cast(opName#"_e32"); let AsmString = asmName # ps.Pfl.AsmDPP16, - DecoderNamespace = "DPP" # Gen.DecoderNamespace # + DecoderNamespace = Gen.DecoderNamespace # !if(ps.Pfl.IsRealTrue16, "", "_FAKE16") in { defm NAME : VOP1_Real_dpp; } @@ -831,7 +831,7 @@ multiclass VOP1_Real_dpp8_with_name op, string opName, string asmName> { defvar ps = !cast(opName#"_e32"); let AsmString = asmName # ps.Pfl.AsmDPP8, - DecoderNamespace = "DPP8" # Gen.DecoderNamespace # + DecoderNamespace = Gen.DecoderNamespace # !if(ps.Pfl.IsRealTrue16, "", "_FAKE16") in { defm NAME : VOP1_Real_dpp8; } @@ -994,9 +994,7 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in { } multiclass VOP1_Real_dpp8_gfx10 op> { if !cast(NAME#"_e32").Pfl.HasExt32BitDPP then - def _dpp8_gfx10 : VOP1_DPP8(NAME#"_e32")> { - let DecoderNamespace = "DPP8"; - } + def _dpp8_gfx10 : VOP1_DPP8(NAME#"_e32")>; } } // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" @@ -1192,16 +1190,14 @@ class VOP1_DPPe op, VOP1_DPP_Pseudo ps, VOPProfile P = ps.Pfl> : let Inst{31-25} = 0x3f; //encoding } -multiclass VOP1Only_Real_vi op> { - let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in { +let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in { + multiclass VOP1Only_Real_vi op> { def _vi : VOP1_Real(NAME), SIEncodingFamily.VI>, VOP1e(NAME).Pfl>; } -} -multiclass VOP1_Real_e32e64_vi op> { - let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in { + multiclass VOP1_Real_e32e64_vi op> { def _e32_vi : VOP1_Real(NAME#"_e32"), SIEncodingFamily.VI>, VOP1e(NAME#"_e32").Pfl>; @@ -1389,44 +1385,41 @@ def : GCNPat < // GFX9 //===----------------------------------------------------------------------===// -multiclass VOP1_Real_gfx9 op> { - let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in { +let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in { + multiclass VOP1_Real_gfx9 op> { defm NAME : VOP1_Real_e32e64_vi ; - } - - if !cast(NAME#"_e32").Pfl.HasExtSDWA9 then - def _sdwa_gfx9 : - VOP_SDWA9_Real (NAME#"_sdwa")>, - VOP1_SDWA9Ae (NAME#"_sdwa").Pfl>; - - if !cast(NAME#"_e32").Pfl.HasExtDPP then - def _dpp_gfx9 : - VOP_DPP_Real(NAME#"_dpp"), SIEncodingFamily.GFX9>, - VOP1_DPPe(NAME#"_dpp")>; - -} -multiclass VOP1_Real_NoDstSel_SDWA_gfx9 op> { - let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in { - defm NAME : VOP1_Real_e32e64_vi ; + if !cast(NAME#"_e32").Pfl.HasExtSDWA9 then + def _sdwa_gfx9 : + VOP_SDWA9_Real (NAME#"_sdwa")>, + VOP1_SDWA9Ae (NAME#"_sdwa").Pfl>; + + if !cast(NAME#"_e32").Pfl.HasExtDPP then + def _dpp_gfx9 : + VOP_DPP_Real(NAME#"_dpp"), SIEncodingFamily.GFX9>, + VOP1_DPPe(NAME#"_dpp")>; } - if !cast(NAME#"_e32").Pfl.HasExtSDWA9 then - def _sdwa_gfx9 : - VOP_SDWA9_Real (NAME#"_sdwa")>, - VOP1_SDWA9Ae (NAME#"_sdwa").Pfl> { - let Inst{42-40} = 6; - } + multiclass VOP1_Real_NoDstSel_SDWA_gfx9 op> { + defm NAME : VOP1_Real_e32e64_vi ; - if !cast(NAME#"_e32").Pfl.HasExtDPP then - def _dpp_gfx9 : - VOP_DPP_Real(NAME#"_dpp"), SIEncodingFamily.GFX9>, - VOP1_DPPe(NAME#"_dpp")>; + if !cast(NAME#"_e32").Pfl.HasExtSDWA9 then + def _sdwa_gfx9 : + VOP_SDWA9_Real (NAME#"_sdwa")>, + VOP1_SDWA9Ae (NAME#"_sdwa").Pfl> { + let Inst{42-40} = 6; + } + + if !cast(NAME#"_e32").Pfl.HasExtDPP then + def _dpp_gfx9 : + VOP_DPP_Real(NAME#"_dpp"), SIEncodingFamily.GFX9>, + VOP1_DPPe(NAME#"_dpp")>; + } } defm V_SCREEN_PARTITION_4SE_B32 : VOP1_Real_gfx9 <0x37>; -let AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX9" in +let AssemblerPredicate = isGFX940Plus in defm V_MOV_B64 : VOP1_Real_gfx9 <0x38>; let OtherPredicates = [HasFP8ConversionInsts] in { diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td index 9f54e69f6d55e1..13fe79b4759608 100644 --- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td @@ -1273,7 +1273,7 @@ class VOP2_DPP16_Gen op, VOP2_DPP_Pseudo ps, GFXGen Gen, VOP2_DPP16 { let AssemblerPredicate = Gen.AssemblerPredicate; let OtherPredicates = !if(ps.Pfl.IsRealTrue16, [UseRealTrue16Insts], []); - let DecoderNamespace = "DPP"#Gen.DecoderNamespace# + let DecoderNamespace = Gen.DecoderNamespace# !if(ps.Pfl.IsRealTrue16, "", "_FAKE16"); } @@ -1302,7 +1302,7 @@ class VOP2_DPP8_Gen op, VOP2_Pseudo ps, GFXGen Gen, VOP2_DPP8 { let AssemblerPredicate = Gen.AssemblerPredicate; let OtherPredicates = !if(ps.Pfl.IsRealTrue16, [UseRealTrue16Insts], []); - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace# + let DecoderNamespace = Gen.DecoderNamespace# !if(ps.Pfl.IsRealTrue16, "", "_FAKE16"); } @@ -1748,9 +1748,7 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in { } multiclass VOP2_Real_dpp8_gfx10 op> { if !cast(NAME#"_e32").Pfl.HasExt32BitDPP then - def _dpp8_gfx10 : VOP2_DPP8(NAME#"_e32")> { - let DecoderNamespace = "DPP8"; - } + def _dpp8_gfx10 : VOP2_DPP8(NAME#"_e32")>; } //===------------------------- VOP2 (with name) -------------------------===// @@ -1797,7 +1795,6 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in { def _dpp8_gfx10 : VOP2_DPP8(opName#"_e32")> { VOP2_Pseudo ps = !cast(opName#"_e32"); let AsmString = asmName # ps.Pfl.AsmDPP8; - let DecoderNamespace = "DPP8"; } } @@ -1876,7 +1873,6 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in { VOP2_DPP8(opName#"_e32")> { string AsmDPP8 = !cast(opName#"_e32").Pfl.AsmDPP8; let AsmString = asmName # !subst(", vcc", "", AsmDPP8); - let DecoderNamespace = "DPP8"; } if !cast(opName#"_e32").Pfl.HasExt32BitDPP then def _dpp8_w32_gfx10 : @@ -2231,7 +2227,7 @@ multiclass VOP2_SDWA9_Real op> { VOP2_SDWA9Ae (NAME#"_sdwa").Pfl>; } -let AssemblerPredicate = isGFX8Only in { +let AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" in { multiclass VOP2be_Real_e32e64_vi_only op, string OpName, string AsmName> { def _e32_vi : @@ -2239,14 +2235,12 @@ multiclass VOP2be_Real_e32e64_vi_only op, string OpName, string AsmName VOP2e(OpName#"_e32").Pfl> { VOP2_Pseudo ps = !cast(OpName#"_e32"); let AsmString = AsmName # ps.AsmOperands; - let DecoderNamespace = "GFX8"; } def _e64_vi : VOP3_Real(OpName#"_e64"), SIEncodingFamily.VI>, VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast(OpName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(OpName#"_e64"); let AsmString = AsmName # ps.AsmOperands; - let DecoderNamespace = "GFX8"; } if !cast(OpName#"_e32").Pfl.HasExtSDWA then def _sdwa_vi : @@ -2263,9 +2257,10 @@ multiclass VOP2be_Real_e32e64_vi_only op, string OpName, string AsmName let AsmString = AsmName # ps.AsmOperands; } } -} -let AssemblerPredicate = isGFX9Only in { +} // End AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" + +let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in { multiclass VOP2be_Real_e32e64_gfx9 op, string OpName, string AsmName> { def _e32_gfx9 : @@ -2273,14 +2268,12 @@ multiclass VOP2be_Real_e32e64_gfx9 op, string OpName, string AsmName> { VOP2e(OpName#"_e32").Pfl> { VOP2_Pseudo ps = !cast(OpName#"_e32"); let AsmString = AsmName # ps.AsmOperands; - let DecoderNamespace = "GFX9"; } def _e64_gfx9 : VOP3_Real(OpName#"_e64"), SIEncodingFamily.GFX9>, VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast(OpName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(OpName#"_e64"); let AsmString = AsmName # ps.AsmOperands; - let DecoderNamespace = "GFX9"; } if !cast(OpName#"_e32").Pfl.HasExtSDWA9 then def _sdwa_gfx9 : @@ -2295,21 +2288,16 @@ multiclass VOP2be_Real_e32e64_gfx9 op, string OpName, string AsmName> { VOP2_DPPe(OpName#"_dpp")> { VOP2_DPP_Pseudo ps = !cast(OpName#"_dpp"); let AsmString = AsmName # ps.AsmOperands; - let DecoderNamespace = "GFX9"; } } multiclass VOP2_Real_e32e64_gfx9 op> { def _e32_gfx9 : VOP2_Real(NAME#"_e32"), SIEncodingFamily.GFX9>, - VOP2e(NAME#"_e32").Pfl>{ - let DecoderNamespace = "GFX9"; - } + VOP2e(NAME#"_e32").Pfl>; def _e64_gfx9 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX9>, - VOP3e_vi <{0, 1, 0, 0, op{5-0}}, !cast(NAME#"_e64").Pfl> { - let DecoderNamespace = "GFX9"; - } + VOP3e_vi <{0, 1, 0, 0, op{5-0}}, !cast(NAME#"_e64").Pfl>; if !cast(NAME#"_e32").Pfl.HasExtSDWA9 then def _sdwa_gfx9 : VOP_SDWA9_Real (NAME#"_sdwa")>, @@ -2318,12 +2306,10 @@ multiclass VOP2_Real_e32e64_gfx9 op> { if !cast(NAME#"_e32").Pfl.HasExtDPP then def _dpp_gfx9 : VOP_DPP_Real(NAME#"_dpp"), SIEncodingFamily.GFX9>, - VOP2_DPPe(NAME#"_dpp")> { - let DecoderNamespace = "GFX9"; - } + VOP2_DPPe(NAME#"_dpp")>; } -} // AssemblerPredicate = isGFX9Only +} // End AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" multiclass VOP2_Real_e32e64_vi op> : Base_VOP2_Real_e32e64_vi, VOP2_SDWA_Real, VOP2_SDWA9_Real { diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td index a0090f3e8d1db0..cf76de40aef412 100644 --- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -1486,7 +1486,7 @@ multiclass VOP3P_Real_dpp op, string backing_ps_name = NAME, : VOP3P_DPP16(backing_ps_name #"_dpp"), Gen.Subtarget> { let AsmString = asmName #ps.Pfl.AsmVOP3DPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; let AssemblerPredicate = Gen.AssemblerPredicate; } } @@ -1496,7 +1496,7 @@ multiclass VOP3P_Real_dpp8 op, string backing_ps_name = NAME defvar ps = !cast(backing_ps_name); def _dpp8#Gen.Suffix : VOP3P_DPP8_Base { let AsmString = asmName #ps.Pfl.AsmVOP3DPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; let AssemblerPredicate = Gen.AssemblerPredicate; } } @@ -1613,7 +1613,7 @@ multiclass VOP3P_Real_MFMA_gfx940_aliases op, string Name = !cast(NAME#"_e64").Mnemonic, VOP3_Pseudo PS_ACD = !cast(NAME # "_e64"), VOP3_Pseudo PS_VCD = !cast(NAME # "_vgprcd" # "_e64")> { - let SubtargetPredicate = isGFX940Plus, + let AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940", AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in { def _gfx940_acd : VOP3P_Real, diff --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td index 508f06c4739a50..e5e82447d55fbd 100644 --- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td @@ -222,6 +222,8 @@ class VOPCInstAlias { @@ -1331,196 +1333,176 @@ class VOPC64_DPP8_NoDst op, VOP_Pseudo ps, string opName = ps.OpName> //===----------------------------------------------------------------------===// multiclass VOPC_Real_Base op> { - let AssemblerPredicate = Gen.AssemblerPredicate in { + let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in { defvar ps32 = !cast(NAME#"_e32"); defvar ps64 = !cast(NAME#"_e64"); - let DecoderNamespace = Gen.DecoderNamespace in { - def _e32#Gen.Suffix : VOPC_Real, - VOPCe; - def _e64#Gen.Suffix : VOP3_Real, - VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> { - // Encoding used for VOPC instructions encoded as VOP3 differs from - // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. - bits<8> sdst; - let Inst{7-0} = sdst; - } - } // End DecoderNamespace = Gen.DecoderNamespace + def _e32#Gen.Suffix : VOPC_Real, + VOPCe; + def _e64#Gen.Suffix : VOP3_Real, + VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> { + // Encoding used for VOPC instructions encoded as VOP3 differs from + // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. + bits<8> sdst; + let Inst{7-0} = sdst; + } defm : VOPCInstAliases; if ps32.Pfl.HasExtDPP then { defvar psDPP = !cast(NAME #"_e32" #"_dpp"); defvar AsmDPP = ps32.Pfl.AsmDPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC; - def _e32_dpp_w32#Gen.Suffix : VOPC_DPP16 { - let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e32_dpp_w64#Gen.Suffix : VOPC_DPP16 { - let AsmString = psDPP.OpName # " vcc, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC; + def _e32_dpp_w32#Gen.Suffix : VOPC_DPP16 { + let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e32_dpp_w64#Gen.Suffix : VOPC_DPP16 { + let AsmString = psDPP.OpName # " vcc, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } defvar AsmDPP8 = ps32.Pfl.AsmDPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e32_dpp8#Gen.Suffix : VOPC_DPP8; - def _e32_dpp8_w32#Gen.Suffix : VOPC_DPP8 { - let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e32_dpp8_w64#Gen.Suffix : VOPC_DPP8 { - let AsmString = ps32.OpName # " vcc, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e32_dpp8#Gen.Suffix : VOPC_DPP8; + def _e32_dpp8_w32#Gen.Suffix : VOPC_DPP8 { + let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e32_dpp8_w64#Gen.Suffix : VOPC_DPP8 { + let AsmString = ps32.OpName # " vcc, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } } if ps64.Pfl.HasExtVOP3DPP then { defvar psDPP = !cast(NAME #"_e64" #"_dpp"); defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP>, - SIMCInstr; - def _e64_dpp_w32#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> { - let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e64_dpp_w64#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> { - let AsmString = psDPP.OpName # " vcc, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP>, + SIMCInstr; + def _e64_dpp_w32#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> { + let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e64_dpp_w64#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> { + let AsmString = psDPP.OpName # " vcc, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64>; - def _e64_dpp8_w32#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> { - let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e64_dpp8_w64#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> { - let AsmString = ps32.OpName # " vcc, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64>; + def _e64_dpp8_w32#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> { + let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e64_dpp8_w64#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> { + let AsmString = ps32.OpName # " vcc, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } } - } // AssemblerPredicate = Gen.AssemblerPredicate + } // AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace } multiclass VOPC_Real_with_name op, string OpName, string asm_name, string pseudo_mnemonic = ""> { - let AssemblerPredicate = Gen.AssemblerPredicate in { + let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in { defvar ps32 = !cast(OpName#"_e32"); defvar ps64 = !cast(OpName#"_e64"); - let DecoderNamespace = Gen.DecoderNamespace in { - def _e32#Gen.Suffix : - // 32 and 64 bit forms of the instruction have _e32 and _e64 - // respectively appended to their assembly mnemonic. - // _e64 is printed as part of the VOPDstS64orS32 operand, whereas - // the destination-less 32bit forms add it to the asmString here. - VOPC_Real, - VOPCe, - MnemonicAlias, - Requires<[Gen.AssemblerPredicate]>; - def _e64#Gen.Suffix : - VOP3_Real, - VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl>, - MnemonicAlias, - Requires<[Gen.AssemblerPredicate]> { - // Encoding used for VOPC instructions encoded as VOP3 differs from - // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. - bits<8> sdst; - let Inst{7-0} = sdst; - } - } // End DecoderNamespace = Gen.DecoderNamespace + def _e32#Gen.Suffix : + // 32 and 64 bit forms of the instruction have _e32 and _e64 + // respectively appended to their assembly mnemonic. + // _e64 is printed as part of the VOPDstS64orS32 operand, whereas + // the destination-less 32bit forms add it to the asmString here. + VOPC_Real, + VOPCe, + MnemonicAlias, + Requires<[Gen.AssemblerPredicate]>; + def _e64#Gen.Suffix : + VOP3_Real, + VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl>, + MnemonicAlias, + Requires<[Gen.AssemblerPredicate]> { + // Encoding used for VOPC instructions encoded as VOP3 differs from + // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. + bits<8> sdst; + let Inst{7-0} = sdst; + } defm : VOPCInstAliases; if ps32.Pfl.HasExtDPP then { defvar psDPP = !cast(OpName #"_e32" #"_dpp"); defvar AsmDPP = ps32.Pfl.AsmDPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC; - def _e32_dpp_w32#Gen.Suffix - : VOPC_DPP16 { - let AsmString = asm_name # " vcc_lo, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e32_dpp_w64#Gen.Suffix - : VOPC_DPP16 { - let AsmString = asm_name # " vcc, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC; + def _e32_dpp_w32#Gen.Suffix + : VOPC_DPP16 { + let AsmString = asm_name # " vcc_lo, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e32_dpp_w64#Gen.Suffix + : VOPC_DPP16 { + let AsmString = asm_name # " vcc, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } defvar AsmDPP8 = ps32.Pfl.AsmDPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e32_dpp8#Gen.Suffix : VOPC_DPP8; - def _e32_dpp8_w32#Gen.Suffix - : VOPC_DPP8 { - let AsmString = asm_name # " vcc_lo, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e32_dpp8_w64#Gen.Suffix - : VOPC_DPP8 { - let AsmString = asm_name # " vcc, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e32_dpp8#Gen.Suffix : VOPC_DPP8; + def _e32_dpp8_w32#Gen.Suffix + : VOPC_DPP8 { + let AsmString = asm_name # " vcc_lo, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e32_dpp8_w64#Gen.Suffix + : VOPC_DPP8 { + let AsmString = asm_name # " vcc, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } } if ps64.Pfl.HasExtVOP3DPP then { defvar psDPP = !cast(OpName #"_e64" #"_dpp"); defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name>, - SIMCInstr; - def _e64_dpp_w32#Gen.Suffix - : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> { - let AsmString = asm_name # " vcc_lo, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e64_dpp_w64#Gen.Suffix - : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> { - let AsmString = asm_name # " vcc, " # AsmDPP; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name>, + SIMCInstr; + def _e64_dpp_w32#Gen.Suffix + : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> { + let AsmString = asm_name # " vcc_lo, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e64_dpp_w64#Gen.Suffix + : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> { + let AsmString = asm_name # " vcc, " # AsmDPP; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name>; - def _e64_dpp8_w32#Gen.Suffix - : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> { - let AsmString = asm_name # " vcc_lo, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave32; - } - def _e64_dpp8_w64#Gen.Suffix - : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> { - let AsmString = asm_name # " vcc, " # AsmDPP8; - let isAsmParserOnly = 1; - let WaveSizePredicate = isWave64; - } + def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name>; + def _e64_dpp8_w32#Gen.Suffix + : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> { + let AsmString = asm_name # " vcc_lo, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave32; + } + def _e64_dpp8_w64#Gen.Suffix + : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> { + let AsmString = asm_name # " vcc, " # AsmDPP8; + let isAsmParserOnly = 1; + let WaveSizePredicate = isWave64; } } - } // AssemblerPredicate = Gen.AssemblerPredicate + } // End AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace } multiclass VOPC_Real_t16 op, string asm_name, @@ -1528,123 +1510,103 @@ multiclass VOPC_Real_t16 op, string asm_name, VOPC_Real_with_name; multiclass VOPCX_Real op> { - let AssemblerPredicate = Gen.AssemblerPredicate in { + let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in { defvar ps32 = !cast(NAME#"_nosdst_e32"); defvar ps64 = !cast(NAME#"_nosdst_e64"); - let DecoderNamespace = Gen.DecoderNamespace in { - def _e32#Gen.Suffix : - VOPC_Real, - VOPCe { - let AsmString = !subst("_nosdst", "", ps32.PseudoInstr) - # " " # ps32.AsmOperands; - } - def _e64#Gen.Suffix : - VOP3_Real, - VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> { - let Inst{7-0} = ?; // sdst - let AsmString = !subst("_nosdst", "", ps64.Mnemonic) - # "{_e64} " # ps64.AsmOperands; - } - } // End DecoderNamespace = Gen.DecoderNamespace + def _e32#Gen.Suffix : + VOPC_Real, + VOPCe { + let AsmString = !subst("_nosdst", "", ps32.PseudoInstr) + # " " # ps32.AsmOperands; + } + def _e64#Gen.Suffix : + VOP3_Real, + VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> { + let Inst{7-0} = ?; // sdst + let AsmString = !subst("_nosdst", "", ps64.Mnemonic) + # "{_e64} " # ps64.AsmOperands; + } defm : VOPCXInstAliases; if ps32.Pfl.HasExtDPP then { defvar psDPP = !cast(NAME #"_nosdst_e32" #"_dpp"); defvar AsmDPP = ps32.Pfl.AsmDPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e32_dpp#Gen.Suffix - : VOPC_DPP16_SIMC { - let AsmString = !subst("_nosdst", "", psDPP.OpName) # " " # AsmDPP; - } + def _e32_dpp#Gen.Suffix + : VOPC_DPP16_SIMC { + let AsmString = !subst("_nosdst", "", psDPP.OpName) # " " # AsmDPP; } defvar AsmDPP8 = ps32.Pfl.AsmDPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e32_dpp8#Gen.Suffix : VOPC_DPP8 { - let AsmString = !subst("_nosdst", "", ps32.OpName) # " " # AsmDPP8; - } + def _e32_dpp8#Gen.Suffix : VOPC_DPP8 { + let AsmString = !subst("_nosdst", "", ps32.OpName) # " " # AsmDPP8; } } if ps64.Pfl.HasExtVOP3DPP then { defvar psDPP = !cast(NAME #"_nosdst_e64" #"_dpp"); defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e64_dpp#Gen.Suffix - : VOPC64_DPP16_NoDst<{0, op}, psDPP>, - SIMCInstr { - let AsmString = !subst("_nosdst", "", psDPP.OpName) - # "{_e64_dpp} " # AsmDPP; - } + def _e64_dpp#Gen.Suffix + : VOPC64_DPP16_NoDst<{0, op}, psDPP>, + SIMCInstr { + let AsmString = !subst("_nosdst", "", psDPP.OpName) + # "{_e64_dpp} " # AsmDPP; } defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64> { - let AsmString = !subst("_nosdst", "", ps64.OpName) - # "{_e64_dpp} " # AsmDPP8; - } + def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64> { + let AsmString = !subst("_nosdst", "", ps64.OpName) + # "{_e64_dpp} " # AsmDPP8; } } - } // AssemblerPredicate = Gen.AssemblerPredicate + } // End AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace } multiclass VOPCX_Real_with_name op, string OpName, string asm_name, string pseudo_mnemonic = ""> { - let AssemblerPredicate = Gen.AssemblerPredicate in { + let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in { defvar ps32 = !cast(OpName#"_nosdst_e32"); defvar ps64 = !cast(OpName#"_nosdst_e64"); - let DecoderNamespace = Gen.DecoderNamespace in { - def _e32#Gen.Suffix - : VOPC_Real, - MnemonicAlias, - Requires<[Gen.AssemblerPredicate]>, - VOPCe { - let AsmString = asm_name # "{_e32} " # ps32.AsmOperands; - } - def _e64#Gen.Suffix - : VOP3_Real, - MnemonicAlias, - Requires<[Gen.AssemblerPredicate]>, - VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> { - let Inst{7-0} = ? ; // sdst - let AsmString = asm_name # "{_e64} " # ps64.AsmOperands; - } - } // End DecoderNamespace = Gen.DecoderNamespace + def _e32#Gen.Suffix + : VOPC_Real, + MnemonicAlias, + Requires<[Gen.AssemblerPredicate]>, + VOPCe { + let AsmString = asm_name # "{_e32} " # ps32.AsmOperands; + } + def _e64#Gen.Suffix + : VOP3_Real, + MnemonicAlias, + Requires<[Gen.AssemblerPredicate]>, + VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> { + let Inst{7-0} = ? ; // sdst + let AsmString = asm_name # "{_e64} " # ps64.AsmOperands; + } defm : VOPCXInstAliases; if ps32.Pfl.HasExtDPP then { defvar psDPP = !cast(OpName#"_nosdst_e32"#"_dpp"); - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC; - } - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e32_dpp8#Gen.Suffix : VOPC_DPP8; - } + def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC; + def _e32_dpp8#Gen.Suffix : VOPC_DPP8; } if ps64.Pfl.HasExtVOP3DPP then { defvar psDPP = !cast(OpName#"_nosdst_e64"#"_dpp"); defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16; - let DecoderNamespace = "DPP"#Gen.DecoderNamespace in { - def _e64_dpp#Gen.Suffix - : VOPC64_DPP16_NoDst<{0, op}, psDPP, asm_name>, - SIMCInstr { - let AsmString = asm_name # "{_e64_dpp} " # AsmDPP; - } + def _e64_dpp#Gen.Suffix + : VOPC64_DPP16_NoDst<{0, op}, psDPP, asm_name>, + SIMCInstr { + let AsmString = asm_name # "{_e64_dpp} " # AsmDPP; } defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in { - def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64, asm_name> { - let AsmString = asm_name # "{_e64_dpp} " # AsmDPP8; - } + def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64, asm_name> { + let AsmString = asm_name # "{_e64_dpp} " # AsmDPP8; } } - } // AssemblerPredicate = Gen.AssemblerPredicate + } // End AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace } multiclass VOPCX_Real_t16 op, string asm_name, @@ -1873,21 +1835,19 @@ defm V_CMPX_CLASS_F64 : VOPCX_Real_gfx11_gfx12<0x0ff>; // GFX10. //===----------------------------------------------------------------------===// -let AssemblerPredicate = isGFX10Only in { +let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in { multiclass VOPC_Real_gfx10 op> { - let DecoderNamespace = "GFX10" in { - def _e32_gfx10 : - VOPC_Real(NAME#"_e32"), SIEncodingFamily.GFX10>, - VOPCe; - def _e64_gfx10 : - VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, - VOP3a_gfx10<{0, op}, !cast(NAME#"_e64").Pfl> { - // Encoding used for VOPC instructions encoded as VOP3 differs from - // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. - bits<8> sdst; - let Inst{7-0} = sdst; - } - } // End DecoderNamespace = "GFX10" + def _e32_gfx10 : + VOPC_Real(NAME#"_e32"), SIEncodingFamily.GFX10>, + VOPCe; + def _e64_gfx10 : + VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, + VOP3a_gfx10<{0, op}, !cast(NAME#"_e64").Pfl> { + // Encoding used for VOPC instructions encoded as VOP3 differs from + // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. + bits<8> sdst; + let Inst{7-0} = sdst; + } if !cast(NAME#"_e32").Pfl.HasExtSDWA9 then def _sdwa_gfx10 : @@ -1898,22 +1858,20 @@ let AssemblerPredicate = isGFX10Only in { } multiclass VOPCX_Real_gfx10 op> { - let DecoderNamespace = "GFX10" in { - def _e32_gfx10 : - VOPC_Real(NAME#"_nosdst_e32"), SIEncodingFamily.GFX10>, - VOPCe { - let AsmString = !subst("_nosdst", "", !cast(NAME#"_nosdst_e32").PseudoInstr) - # " " # !cast(NAME#"_nosdst_e32").AsmOperands; - } - - def _e64_gfx10 : - VOP3_Real(NAME#"_nosdst_e64"), SIEncodingFamily.GFX10>, - VOP3a_gfx10<{0, op}, !cast(NAME#"_nosdst_e64").Pfl> { - let Inst{7-0} = ?; // sdst - let AsmString = !subst("_nosdst", "", !cast(NAME#"_nosdst_e64").Mnemonic) - # "{_e64} " # !cast(NAME#"_nosdst_e64").AsmOperands; - } - } // End DecoderNamespace = "GFX10" + def _e32_gfx10 : + VOPC_Real(NAME#"_nosdst_e32"), SIEncodingFamily.GFX10>, + VOPCe { + let AsmString = !subst("_nosdst", "", !cast(NAME#"_nosdst_e32").PseudoInstr) + # " " # !cast(NAME#"_nosdst_e32").AsmOperands; + } + + def _e64_gfx10 : + VOP3_Real(NAME#"_nosdst_e64"), SIEncodingFamily.GFX10>, + VOP3a_gfx10<{0, op}, !cast(NAME#"_nosdst_e64").Pfl> { + let Inst{7-0} = ?; // sdst + let AsmString = !subst("_nosdst", "", !cast(NAME#"_nosdst_e64").Mnemonic) + # "{_e64} " # !cast(NAME#"_nosdst_e64").AsmOperands; + } if !cast(NAME#"_nosdst_e32").Pfl.HasExtSDWA9 then def _sdwa_gfx10 : @@ -1925,7 +1883,7 @@ let AssemblerPredicate = isGFX10Only in { defm : VOPCXInstAliases; } -} // End AssemblerPredicate = isGFX10Only +} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" defm V_CMP_LT_I16 : VOPC_Real_gfx10<0x089>; defm V_CMP_EQ_I16 : VOPC_Real_gfx10<0x08a>; @@ -1990,25 +1948,23 @@ defm V_CMPX_TRU_F16 : VOPCX_Real_gfx10<0x0ff>; // GFX6, GFX7, GFX10. //===----------------------------------------------------------------------===// -let AssemblerPredicate = isGFX6GFX7 in { +let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in { multiclass VOPC_Real_gfx6_gfx7 op> { - let DecoderNamespace = "GFX6GFX7" in { - def _e32_gfx6_gfx7 : - VOPC_Real(NAME#"_e32"), SIEncodingFamily.SI>, - VOPCe; - def _e64_gfx6_gfx7 : - VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, - VOP3a_gfx6_gfx7(NAME#"_e64").Pfl> { - // Encoding used for VOPC instructions encoded as VOP3 differs from - // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. - bits<8> sdst; - let Inst{7-0} = sdst; - } - } // End DecoderNamespace = "GFX6GFX7" + def _e32_gfx6_gfx7 : + VOPC_Real(NAME#"_e32"), SIEncodingFamily.SI>, + VOPCe; + def _e64_gfx6_gfx7 : + VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, + VOP3a_gfx6_gfx7(NAME#"_e64").Pfl> { + // Encoding used for VOPC instructions encoded as VOP3 differs from + // VOP3e by destination name (sdst) as VOPC doesn't have vector dst. + bits<8> sdst; + let Inst{7-0} = sdst; + } defm : VOPCInstAliases; } -} // End AssemblerPredicate = isGFX6GFX7 +} // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" multiclass VOPC_Real_gfx6_gfx7_gfx10 op> : VOPC_Real_gfx6_gfx7, VOPC_Real_gfx10; diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td index 801afabbdb1401..2989d05e968ef9 100644 --- a/llvm/lib/Target/AMDGPU/VOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td @@ -835,7 +835,7 @@ class VOP_DPP_Pseudo pattern=[], AMDGPUAsmVariants.Disable); let Constraints = !if(P.NumSrcArgs, P.TieRegDPP # " = $vdst", ""); let DisableEncoding = !if(P.NumSrcArgs, P.TieRegDPP, ""); - let DecoderNamespace = "DPP"; + let DecoderNamespace = "GFX8"; VOPProfile Pfl = P; } @@ -906,7 +906,7 @@ class VOP_DPP_Base op, VOP_DPP_Pseudo ps, GFXGen Gen, VOP3_DPP16 { let AssemblerPredicate = Gen.AssemblerPredicate; let True16Predicate = !if(ps.Pfl.IsRealTrue16, UseRealTrue16Insts, NoTrue16Predicate); - let DecoderNamespace = "DPP"#Gen.DecoderNamespace# + let DecoderNamespace = Gen.DecoderNamespace# !if(ps.Pfl.IsRealTrue16, "", "_FAKE16"); } @@ -1463,7 +1463,7 @@ multiclass VOP3_Real_dpp_with_name op, string opName, multiclass VOP3_Real_dpp8_Base op, string opName = NAME> { defvar ps = !cast(opName#"_e64"); def _e64_dpp8#Gen.Suffix : Base_VOP3_DPP8 { - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; let AssemblerPredicate = Gen.AssemblerPredicate; } } @@ -1473,7 +1473,7 @@ multiclass VOP3Dot_Real_dpp8_Base op, string opName = NAME> def _e64_dpp8#Gen.Suffix : Base_VOP3_DPP8 { let Inst{11} = ?; let Inst{12} = ?; - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; let AssemblerPredicate = Gen.AssemblerPredicate; } } @@ -1482,7 +1482,7 @@ multiclass VOP3_Real_dpp8_with_name op, string opName, string asmName> { defvar ps = !cast(opName#"_e64"); let AsmString = asmName # ps.Pfl.AsmVOP3DPP8, - DecoderNamespace = "DPP8"#Gen.DecoderNamespace# + DecoderNamespace = Gen.DecoderNamespace# !if(ps.Pfl.IsRealTrue16, "", "_FAKE16"), True16Predicate = !if(ps.Pfl.IsRealTrue16, UseRealTrue16Insts, NoTrue16Predicate) in { @@ -1505,7 +1505,7 @@ multiclass VOP3be_Real_dpp op, string opName, defvar dpp_ps = !cast(opName #"_e64" #"_dpp"); def _e64_dpp#Gen.Suffix : Base_VOP3b_DPP16, SIMCInstr { - let DecoderNamespace = "DPP"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; let AssemblerPredicate = Gen.AssemblerPredicate; } } @@ -1514,7 +1514,7 @@ multiclass VOP3be_Real_dpp8 op, string opName, string asmName> { defvar ps = !cast(opName #"_e64"); def _e64_dpp8#Gen.Suffix : VOP3b_DPP8_Base { - let DecoderNamespace = "DPP8"#Gen.DecoderNamespace; + let DecoderNamespace = Gen.DecoderNamespace; let AssemblerPredicate = Gen.AssemblerPredicate; } } From f17e4151423a798c18533080fe7f8a3e922d7312 Mon Sep 17 00:00:00 2001 From: Billy Laws Date: Thu, 22 Feb 2024 11:36:18 +0000 Subject: [PATCH 19/19] [AArch64] Mangle names of all ARM64EC functions with entry thunks (#80996) This better matches MSVC output in cases where static functions have their addresses taken. --- llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp | 2 +- llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp | 3 ++- .../CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll | 6 ++++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp index c62582ac01a4cf..a99856dcc9439d 100644 --- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp @@ -712,7 +712,7 @@ bool AArch64Arm64ECCallLowering::processFunction( // name (emitting the definition) can grab it from the metadata. // // FIXME: Handle functions with weak linkage? - if (F.hasExternalLinkage() || F.hasWeakLinkage() || F.hasLinkOnceLinkage()) { + if (!F.hasLocalLinkage() || F.hasAddressTaken()) { if (std::optional MangledName = getArm64ECMangledFunctionName(F.getName().str())) { F.setMetadata("arm64ec_unmangled_name", diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp index 5b5ffd7b2feb06..4fa719ad67cf33 100644 --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -1121,7 +1121,8 @@ void AArch64AsmPrinter::emitFunctionEntryLabel() { TS->emitDirectiveVariantPCS(CurrentFnSym); } - if (TM.getTargetTriple().isWindowsArm64EC()) { + if (TM.getTargetTriple().isWindowsArm64EC() && + !MF->getFunction().hasLocalLinkage()) { // For ARM64EC targets, a function definition's name is mangled differently // from the normal symbol. We emit the alias from the unmangled symbol to // mangled symbol name here. diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll index 00ae34bf4b00f2..217f08be052180 100644 --- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll +++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll @@ -2,7 +2,8 @@ ; Validates when local linkage functions get a thunk generated. -; Being called does not cause a thunk to be generated. +; Being called does not cause a thunk to be generated or the symbol name to be mangled. +; CHECK-NOT: "#does_not_have_addr_taken": ; CHECK-NOT: $ientry_thunk$cdecl$v$f; define internal void @does_not_have_addr_taken(float) nounwind { ret void @@ -12,7 +13,8 @@ define void @calls_does_not_have_addr_taken() nounwind { ret void } -; Having an address taken does cause a thunk to be generated. +; Having an address taken does cause a thunk to be generated and the symbol name to be mangled. +; CHECK: "#has_addr_taken": ; CHECK: $ientry_thunk$cdecl$v$i8; define internal void @has_addr_taken(i64) nounwind { ret void