diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index 065abde62af8a..ff9376e635af9 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -1457,6 +1457,23 @@ let SubtargetPredicate = HasAshrPkInsts, isReMaterializable = 1 in { defm V_ASHR_PK_U8_I32 : VOP3Inst<"v_ashr_pk_u8_i32", VOP3_Profile, int_amdgcn_ashr_pk_u8_i32>; } // End SubtargetPredicate = HasAshrPkInsts, isReMaterializable = 1 +class AshrPkI8Pat: GCNPat< + (i16 (or (i16 (shl (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src1, i32:$src2)), (i32 lo), (i32 hi))))), (i16 8))), + (i16 (and (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src0, i32:$src2)), (i32 lo), (i32 hi))))), (i16 255))))), + (inst 0, VSrc_b32:$src0, 0, VSrc_b32:$src1, 0, VSrc_b32:$src2, 0 ) +>; + +class AshrPkU8Pat: GCNPat< + (i16 (or (i16 (shl (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src1, i32:$src2)), (i32 lo), (i32 hi))))), (i16 8))), + (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src0, i32:$src2)), (i32 lo), (i32 hi))))))), + (inst 0, VSrc_b32:$src0, 0, VSrc_b32:$src1, 0, VSrc_b32:$src2, 0 ) +>; + +let SubtargetPredicate = HasAshrPkInsts in { + def : AshrPkI8Pat; + def : AshrPkU8Pat; +} + //===----------------------------------------------------------------------===// // Integer Clamp Patterns //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll new file mode 100644 index 0000000000000..b5f43f9f68936 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll @@ -0,0 +1,62 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX950 %s +define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 { +; GFX950-LABEL: v_ashr_pk_i8_i32: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c +; GFX950-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 +; GFX950-NEXT: v_mov_b32_e32 v1, 0xffffff80 +; GFX950-NEXT: v_mov_b32_e32 v2, 0x7f +; GFX950-NEXT: v_mov_b32_e32 v0, 0 +; GFX950-NEXT: s_waitcnt lgkmcnt(0) +; GFX950-NEXT: s_ashr_i32 s1, s1, s2 +; GFX950-NEXT: s_ashr_i32 s0, s0, s2 +; GFX950-NEXT: v_med3_i32 v3, s0, v1, v2 +; GFX950-NEXT: v_med3_i32 v1, s1, v1, v2 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; GFX950-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX950-NEXT: global_store_short v0, v1, s[6:7] +; GFX950-NEXT: s_endpgm + %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0 + %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1 + %src2.clamp = and i32 %src2, 31 + %insert.1 = insertelement <2 x i32> poison, i32 %src2.clamp, i64 0 + %src2.broadcast = shufflevector <2 x i32> %insert.1, <2 x i32> poison, <2 x i32> zeroinitializer + %ashr = ashr <2 x i32> %build_vector, %src2.broadcast + %sat.low = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %ashr, <2 x i32> ) + %sat.hi = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %sat.low, <2 x i32> ) + %trunc = trunc nsw <2 x i32> %sat.hi to <2 x i8> + %ret = bitcast <2 x i8> %trunc to i16 + store i16 %ret, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 { +; GFX950-LABEL: v_ashr_pk_u8_i32: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c +; GFX950-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 +; GFX950-NEXT: v_mov_b32_e32 v1, 0xff +; GFX950-NEXT: v_mov_b32_e32 v0, 0 +; GFX950-NEXT: s_waitcnt lgkmcnt(0) +; GFX950-NEXT: s_ashr_i32 s1, s1, s2 +; GFX950-NEXT: s_ashr_i32 s0, s0, s2 +; GFX950-NEXT: v_med3_i32 v2, s0, 0, v1 +; GFX950-NEXT: v_med3_i32 v1, s1, 0, v1 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; GFX950-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX950-NEXT: global_store_short v0, v1, s[6:7] +; GFX950-NEXT: s_endpgm + %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0 + %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1 + %src2.clamp = and i32 %src2, 31 + %insert.1 = insertelement <2 x i32> poison, i32 %src2.clamp, i64 0 + %src2.broadcast = shufflevector <2 x i32> %insert.1, <2 x i32> poison, <2 x i32> zeroinitializer + %ashr = ashr <2 x i32> %build_vector, %src2.broadcast + %sat.low = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %ashr, <2 x i32> ) + %sat.hi = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %sat.low, <2 x i32> ) + %trunc = trunc nsw <2 x i32> %sat.hi to <2 x i8> + %ret = bitcast <2 x i8> %trunc to i16 + store i16 %ret, ptr addrspace(1) %out + ret void +}