Skip to content

[LoongArch] Add codegen support for ILP32D calling convention #141539

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -319,3 +319,19 @@ def : Pat<(bitconvert FPR64:$src), (MOVFR2GR_D FPR64:$src)>;
let Predicates = [HasBasicD, IsLA64] in {
def : PatFpr<frint, FRINT_D, FPR64>;
} // Predicates = [HasBasicD, IsLA64]

/// Pseudo-instructions needed for the soft-float ABI with LA32D

let Predicates = [HasBasicD, IsLA32] in {
// Moves two GPRs to an FPR.
let usesCustomInserter = 1 in
def BuildPairF64Pseudo
: Pseudo<(outs FPR64:$dst), (ins GPR:$src1, GPR:$src2),
[(set FPR64:$dst, (loongarch_build_pair_f64 GPR:$src1, GPR:$src2))]>;

// Moves an FPR to two GPRs.
let usesCustomInserter = 1 in
def SplitPairF64Pseudo
: Pseudo<(outs GPR:$dst1, GPR:$dst2), (ins FPR64:$src),
[(set GPR:$dst1, GPR:$dst2, (loongarch_split_pair_f64 FPR64:$src))]>;
} // Predicates = [HasBasicD, IsLA32]
309 changes: 268 additions & 41 deletions llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions llvm/lib/Target/LoongArch/LoongArchISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ enum NodeType : unsigned {

FTINT,

// Build and split F64 pair
BUILD_PAIR_F64,
SPLIT_PAIR_F64,

// Bit counting operations
CLZ_W,
CTZ_W,
Expand Down
12 changes: 12 additions & 0 deletions llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,13 @@ def SDT_LoongArchMovgr2fcsr : SDTypeProfile<0, 2, [SDTCisVT<0, GRLenVT>,
def SDT_LoongArchMovfcsr2gr : SDTypeProfile<1, 1, [SDTCisVT<0, GRLenVT>,
SDTCisSameAs<0, 1>]>;

def SDT_LoongArchBuildPairF64 : SDTypeProfile<1, 2, [SDTCisVT<0, f64>,
SDTCisVT<1, i32>,
SDTCisSameAs<1, 2>]>;
def SDT_LoongArchSplitPairF64 : SDTypeProfile<2, 1, [SDTCisVT<0, i32>,
SDTCisVT<1, i32>,
SDTCisVT<2, f64>]>;

// TODO: Add LoongArch specific DAG Nodes
// Target-independent nodes, but with target-specific formats.
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
Expand Down Expand Up @@ -165,6 +172,11 @@ def loongarch_iocsrwr_d : SDNode<"LoongArchISD::IOCSRWR_D",
def loongarch_cpucfg : SDNode<"LoongArchISD::CPUCFG", SDTUnaryOp,
[SDNPHasChain]>;

def loongarch_build_pair_f64 : SDNode<"LoongArchISD::BUILD_PAIR_F64",
SDT_LoongArchBuildPairF64>;
def loongarch_split_pair_f64 : SDNode<"LoongArchISD::SPLIT_PAIR_F64",
SDT_LoongArchSplitPairF64>;

def to_fclass_mask: SDNodeXForm<timm, [{
uint64_t Check = N->getZExtValue();
unsigned Mask = 0;
Expand Down
193 changes: 193 additions & 0 deletions llvm/test/CodeGen/LoongArch/calling-conv-ilp32d.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+d --target-abi=ilp32d < %s \
; RUN: | FileCheck %s

;; This file contains specific tests for the ilp32d ABI.

;; Check pass floating-point arguments whith FPRs.

define i32 @callee_float_in_fpr(i32 %a, float %b, double %c) nounwind {
; CHECK-LABEL: callee_float_in_fpr:
; CHECK: # %bb.0:
; CHECK-NEXT: ftintrz.w.s $fa0, $fa0
; CHECK-NEXT: movfr2gr.s $a1, $fa0
; CHECK-NEXT: ftintrz.w.d $fa0, $fa1
; CHECK-NEXT: movfr2gr.s $a2, $fa0
; CHECK-NEXT: add.w $a0, $a0, $a1
; CHECK-NEXT: add.w $a0, $a0, $a2
; CHECK-NEXT: ret
%b_fptosi = fptosi float %b to i32
%c_fptosi = fptosi double %c to i32
%1 = add i32 %a, %b_fptosi
%2 = add i32 %1, %c_fptosi
ret i32 %2
}

define i32 @caller_float_in_fpr() nounwind {
; CHECK-LABEL: caller_float_in_fpr:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.w $sp, $sp, -16
; CHECK-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; CHECK-NEXT: movgr2fr.w $fa1, $zero
; CHECK-NEXT: movgr2frh.w $fa1, $zero
; CHECK-NEXT: movgr2fr.w $fa0, $zero
; CHECK-NEXT: ori $a0, $zero, 1
; CHECK-NEXT: bl callee_float_in_fpr
; CHECK-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; CHECK-NEXT: addi.w $sp, $sp, 16
; CHECK-NEXT: ret
%1 = call i32 @callee_float_in_fpr(i32 1, float 0.0, double 0.0)
ret i32 %1
}

;; Check that the GPR is used once the FPRs are exhausted.

;; Must keep define on a single line due to an update_llc_test_checks.py limitation.
define i32 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) nounwind {
; CHECK-LABEL: callee_double_in_gpr_exhausted_fprs:
; CHECK: # %bb.0:
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: movgr2frh.w $fa0, $a1
; CHECK-NEXT: ftintrz.w.d $fa1, $fa7
; CHECK-NEXT: movfr2gr.s $a0, $fa1
; CHECK-NEXT: ftintrz.w.d $fa0, $fa0
; CHECK-NEXT: movfr2gr.s $a1, $fa0
; CHECK-NEXT: add.w $a0, $a0, $a1
; CHECK-NEXT: ret
%h_fptosi = fptosi double %h to i32
%i_fptosi = fptosi double %i to i32
%1 = add i32 %h_fptosi, %i_fptosi
ret i32 %1
}

define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
; CHECK-LABEL: caller_double_in_gpr_exhausted_fprs:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.w $sp, $sp, -16
; CHECK-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
; CHECK-NEXT: fld.d $fa1, $a0, %pc_lo12(.LCPI3_0)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
; CHECK-NEXT: fld.d $fa2, $a0, %pc_lo12(.LCPI3_1)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
; CHECK-NEXT: fld.d $fa3, $a0, %pc_lo12(.LCPI3_2)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
; CHECK-NEXT: fld.d $fa4, $a0, %pc_lo12(.LCPI3_3)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
; CHECK-NEXT: fld.d $fa5, $a0, %pc_lo12(.LCPI3_4)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
; CHECK-NEXT: fld.d $fa6, $a0, %pc_lo12(.LCPI3_5)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
; CHECK-NEXT: fld.d $fa7, $a0, %pc_lo12(.LCPI3_6)
; CHECK-NEXT: addi.w $a0, $zero, 1
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: ffint.s.w $fa0, $fa0
; CHECK-NEXT: fcvt.d.s $fa0, $fa0
; CHECK-NEXT: lu12i.w $a1, 262688
; CHECK-NEXT: move $a0, $zero
; CHECK-NEXT: bl callee_double_in_gpr_exhausted_fprs
; CHECK-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; CHECK-NEXT: addi.w $sp, $sp, 16
; CHECK-NEXT: ret
%1 = call i32 @callee_double_in_gpr_exhausted_fprs(
double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0,
double 7.0, double 8.0, double 9.0)
ret i32 %1
}

;; Check that the stack is used once the FPRs and GPRs are both exhausted.

;; Must keep define on a single line due to an update_llc_test_checks.py limitation.
define i32 @callee_double_on_stack_exhausted_fprs_gprs(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, double %j, double %k, double %l, double %m, double %n) nounwind {
; CHECK-LABEL: callee_double_on_stack_exhausted_fprs_gprs:
; CHECK: # %bb.0:
; CHECK-NEXT: fld.d $fa0, $sp, 0
; CHECK-NEXT: fld.d $fa1, $sp, 8
; CHECK-NEXT: ftintrz.w.d $fa0, $fa0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: ftintrz.w.d $fa0, $fa1
; CHECK-NEXT: movfr2gr.s $a1, $fa0
; CHECK-NEXT: add.w $a0, $a0, $a1
; CHECK-NEXT: ret
%m_fptosi = fptosi double %m to i32
%n_fptosi = fptosi double %n to i32
%1 = add i32 %m_fptosi, %n_fptosi
ret i32 %1
}

define i32 @caller_double_on_stack_exhausted_fprs_gprs() nounwind {
; CHECK-LABEL: caller_double_on_stack_exhausted_fprs_gprs:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.w $sp, $sp, -32
; CHECK-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
; CHECK-NEXT: lu12i.w $a0, 262816
; CHECK-NEXT: st.w $a0, $sp, 4
; CHECK-NEXT: st.w $zero, $sp, 0
; CHECK-NEXT: lu12i.w $a0, 262848
; CHECK-NEXT: st.w $a0, $sp, 12
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
; CHECK-NEXT: fld.d $fa1, $a0, %pc_lo12(.LCPI5_0)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
; CHECK-NEXT: fld.d $fa2, $a0, %pc_lo12(.LCPI5_1)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
; CHECK-NEXT: fld.d $fa3, $a0, %pc_lo12(.LCPI5_2)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
; CHECK-NEXT: fld.d $fa4, $a0, %pc_lo12(.LCPI5_3)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
; CHECK-NEXT: fld.d $fa5, $a0, %pc_lo12(.LCPI5_4)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
; CHECK-NEXT: fld.d $fa6, $a0, %pc_lo12(.LCPI5_5)
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
; CHECK-NEXT: fld.d $fa7, $a0, %pc_lo12(.LCPI5_6)
; CHECK-NEXT: addi.w $a0, $zero, 1
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: ffint.s.w $fa0, $fa0
; CHECK-NEXT: fcvt.d.s $fa0, $fa0
; CHECK-NEXT: lu12i.w $a1, 262688
; CHECK-NEXT: lu12i.w $a3, 262720
; CHECK-NEXT: lu12i.w $a5, 262752
; CHECK-NEXT: lu12i.w $a7, 262784
; CHECK-NEXT: st.w $zero, $sp, 8
; CHECK-NEXT: move $a0, $zero
; CHECK-NEXT: move $a2, $zero
; CHECK-NEXT: move $a4, $zero
; CHECK-NEXT: move $a6, $zero
; CHECK-NEXT: bl callee_double_on_stack_exhausted_fprs_gprs
; CHECK-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
; CHECK-NEXT: addi.w $sp, $sp, 32
; CHECK-NEXT: ret
%1 = call i32 @callee_double_on_stack_exhausted_fprs_gprs(
double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0,
double 7.0, double 8.0, double 9.0, double 10.0, double 11.0, double 12.0,
double 13.0, double 14.0)
ret i32 %1
}

;; Check returning doubles.

define double @callee_double_ret() nounwind {
; CHECK-LABEL: callee_double_ret:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.w $a0, $zero, 1
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: ffint.s.w $fa0, $fa0
; CHECK-NEXT: fcvt.d.s $fa0, $fa0
; CHECK-NEXT: ret
ret double 1.0
}

define i64 @caller_double_ret() nounwind {
; CHECK-LABEL: caller_double_ret:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.w $sp, $sp, -16
; CHECK-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; CHECK-NEXT: bl callee_double_ret
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: movfrh2gr.s $a1, $fa0
; CHECK-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; CHECK-NEXT: addi.w $sp, $sp, 16
; CHECK-NEXT: ret
%1 = call double @callee_double_ret()
%2 = bitcast double %1 to i64
ret i64 %2
}
14 changes: 5 additions & 9 deletions llvm/test/CodeGen/LoongArch/inline-asm-constraint-f.ll
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,14 @@ define double @constraint_f_double(double %a) nounwind {
define double @constraint_gpr(double %a) {
; LA32-LABEL: constraint_gpr:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: fst.d $fa0, $sp, 8
; LA32-NEXT: ld.w $a7, $sp, 8
; LA32-NEXT: ld.w $t0, $sp, 12
; LA32-NEXT: .cfi_def_cfa_offset 0
; LA32-NEXT: movfr2gr.s $a7, $fa0
; LA32-NEXT: movfrh2gr.s $t0, $fa0
; LA32-NEXT: #APP
; LA32-NEXT: move $a6, $a7
; LA32-NEXT: #NO_APP
; LA32-NEXT: st.w $a7, $sp, 4
; LA32-NEXT: st.w $a6, $sp, 0
; LA32-NEXT: fld.d $fa0, $sp, 0
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: movgr2fr.w $fa0, $a6
; LA32-NEXT: movgr2frh.w $fa0, $a7
; LA32-NEXT: ret
;
; LA64-LABEL: constraint_gpr:
Expand Down
14 changes: 4 additions & 10 deletions llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -279,11 +279,8 @@ define double @convert_u64_to_double(i64 %a) nounwind {
define double @bitcast_i64_to_double(i64 %a, i64 %b) nounwind {
; LA32-LABEL: bitcast_i64_to_double:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $a1, $sp, 12
; LA32-NEXT: st.w $a0, $sp, 8
; LA32-NEXT: fld.d $fa0, $sp, 8
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: movgr2frh.w $fa0, $a1
; LA32-NEXT: ret
;
; LA64-LABEL: bitcast_i64_to_double:
Expand All @@ -297,11 +294,8 @@ define double @bitcast_i64_to_double(i64 %a, i64 %b) nounwind {
define i64 @bitcast_double_to_i64(double %a) nounwind {
; LA32-LABEL: bitcast_double_to_i64:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: fst.d $fa0, $sp, 8
; LA32-NEXT: ld.w $a0, $sp, 8
; LA32-NEXT: ld.w $a1, $sp, 12
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: movfr2gr.s $a0, $fa0
; LA32-NEXT: movfrh2gr.s $a1, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: bitcast_double_to_i64:
Expand Down
40 changes: 16 additions & 24 deletions llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,8 @@ define double @load_acquire_double(ptr %ptr) {
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: ori $a1, $zero, 2
; LA32-NEXT: bl __atomic_load_8
; LA32-NEXT: st.w $a1, $sp, 4
; LA32-NEXT: st.w $a0, $sp, 0
; LA32-NEXT: fld.d $fa0, $sp, 0
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: movgr2frh.w $fa0, $a1
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
Expand Down Expand Up @@ -234,9 +233,8 @@ define double @load_unordered_double(ptr %ptr) {
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: move $a1, $zero
; LA32-NEXT: bl __atomic_load_8
; LA32-NEXT: st.w $a1, $sp, 4
; LA32-NEXT: st.w $a0, $sp, 0
; LA32-NEXT: fld.d $fa0, $sp, 0
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: movgr2frh.w $fa0, $a1
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
Expand Down Expand Up @@ -352,9 +350,8 @@ define double @load_monotonic_double(ptr %ptr) {
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: move $a1, $zero
; LA32-NEXT: bl __atomic_load_8
; LA32-NEXT: st.w $a1, $sp, 4
; LA32-NEXT: st.w $a0, $sp, 0
; LA32-NEXT: fld.d $fa0, $sp, 0
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: movgr2frh.w $fa0, $a1
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
Expand Down Expand Up @@ -481,9 +478,8 @@ define double @load_seq_cst_double(ptr %ptr) {
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: ori $a1, $zero, 5
; LA32-NEXT: bl __atomic_load_8
; LA32-NEXT: st.w $a1, $sp, 4
; LA32-NEXT: st.w $a0, $sp, 0
; LA32-NEXT: fld.d $fa0, $sp, 0
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: movgr2frh.w $fa0, $a1
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
Expand Down Expand Up @@ -605,9 +601,8 @@ define void @store_release_double(ptr %ptr, double %v) {
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: fst.d $fa0, $sp, 0
; LA32-NEXT: ld.w $a1, $sp, 0
; LA32-NEXT: ld.w $a2, $sp, 4
; LA32-NEXT: movfr2gr.s $a1, $fa0
; LA32-NEXT: movfrh2gr.s $a2, $fa0
; LA32-NEXT: ori $a3, $zero, 3
; LA32-NEXT: bl __atomic_store_8
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
Expand Down Expand Up @@ -723,9 +718,8 @@ define void @store_unordered_double(ptr %ptr, double %v) {
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: fst.d $fa0, $sp, 0
; LA32-NEXT: ld.w $a1, $sp, 0
; LA32-NEXT: ld.w $a2, $sp, 4
; LA32-NEXT: movfr2gr.s $a1, $fa0
; LA32-NEXT: movfrh2gr.s $a2, $fa0
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl __atomic_store_8
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
Expand Down Expand Up @@ -841,9 +835,8 @@ define void @store_monotonic_double(ptr %ptr, double %v) {
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: fst.d $fa0, $sp, 0
; LA32-NEXT: ld.w $a1, $sp, 0
; LA32-NEXT: ld.w $a2, $sp, 4
; LA32-NEXT: movfr2gr.s $a1, $fa0
; LA32-NEXT: movfrh2gr.s $a2, $fa0
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl __atomic_store_8
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
Expand Down Expand Up @@ -973,9 +966,8 @@ define void @store_seq_cst_double(ptr %ptr, double %v) {
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: fst.d $fa0, $sp, 0
; LA32-NEXT: ld.w $a1, $sp, 0
; LA32-NEXT: ld.w $a2, $sp, 4
; LA32-NEXT: movfr2gr.s $a1, $fa0
; LA32-NEXT: movfrh2gr.s $a2, $fa0
; LA32-NEXT: ori $a3, $zero, 5
; LA32-NEXT: bl __atomic_store_8
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
Expand Down
Loading