|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 |
| 3 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 |
| 4 | + |
| 5 | +define i64 @test_add_i64_i16_const(i16 %a) nounwind { |
| 6 | +; X86-LABEL: test_add_i64_i16_const: |
| 7 | +; X86: # %bb.0: |
| 8 | +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| 9 | +; X86-NEXT: addl $42, %eax |
| 10 | +; X86-NEXT: xorl %edx, %edx |
| 11 | +; X86-NEXT: retl |
| 12 | +; |
| 13 | +; X64-LABEL: test_add_i64_i16_const: |
| 14 | +; X64: # %bb.0: |
| 15 | +; X64-NEXT: movzwl %di, %eax |
| 16 | +; X64-NEXT: addq $42, %rax |
| 17 | +; X64-NEXT: retq |
| 18 | + %zext_a = zext i16 %a to i64 |
| 19 | + %sum = add nuw nsw i64 %zext_a, 42 |
| 20 | + ret i64 %sum |
| 21 | +} |
| 22 | + |
| 23 | +; TODO: First 48 bits are all zeros so we can safely truncate to 32 bit additon |
| 24 | +define i64 @test_add_i64_i16_zext(i16 %a, i16 %b) nounwind { |
| 25 | +; X86-LABEL: test_add_i64_i16_zext: |
| 26 | +; X86: # %bb.0: |
| 27 | +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx |
| 28 | +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| 29 | +; X86-NEXT: addl %ecx, %eax |
| 30 | +; X86-NEXT: xorl %edx, %edx |
| 31 | +; X86-NEXT: retl |
| 32 | +; |
| 33 | +; X64-LABEL: test_add_i64_i16_zext: |
| 34 | +; X64: # %bb.0: |
| 35 | +; X64-NEXT: movzwl %di, %ecx |
| 36 | +; X64-NEXT: movzwl %si, %eax |
| 37 | +; X64-NEXT: addq %rcx, %rax |
| 38 | +; X64-NEXT: retq |
| 39 | + %zext_a = zext i16 %a to i64 |
| 40 | + %zext_b = zext i16 %b to i64 |
| 41 | + %sum = add nuw nsw i64 %zext_a, %zext_b |
| 42 | + ret i64 %sum |
| 43 | +} |
| 44 | + |
| 45 | +; Negative: Set the 32nd bit of a to force 64 bit addition, we do not truncate to 32 bit addition in this case |
| 46 | +define i64 @negative_test_add_i64_i16(i16 %a) nounwind { |
| 47 | +; X86-LABEL: negative_test_add_i64_i16: |
| 48 | +; X86: # %bb.0: |
| 49 | +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| 50 | +; X86-NEXT: addl $42, %eax |
| 51 | +; X86-NEXT: movl $1, %edx |
| 52 | +; X86-NEXT: retl |
| 53 | +; |
| 54 | +; X64-LABEL: negative_test_add_i64_i16: |
| 55 | +; X64: # %bb.0: |
| 56 | +; X64-NEXT: movzwl %di, %ecx |
| 57 | +; X64-NEXT: movabsq $4294967338, %rax # imm = 0x10000002A |
| 58 | +; X64-NEXT: addq %rcx, %rax |
| 59 | +; X64-NEXT: retq |
| 60 | + %zext_a = zext i16 %a to i64 |
| 61 | + %or_a = or i64 %zext_a, 4294967296 |
| 62 | + %sum = add nuw nsw i64 %or_a, 42 |
| 63 | + ret i64 %sum |
| 64 | +} |
| 65 | + |
| 66 | +; Negative: We don't truncate to 32 bit addition in case of sign extension |
| 67 | +define i64 @negative_test_add_i64_i16_sext(i16 %a, i16 %b) nounwind { |
| 68 | +; X86-LABEL: negative_test_add_i64_i16_sext: |
| 69 | +; X86: # %bb.0: |
| 70 | +; X86-NEXT: pushl %esi |
| 71 | +; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx |
| 72 | +; X86-NEXT: movl %ecx, %esi |
| 73 | +; X86-NEXT: sarl $31, %esi |
| 74 | +; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax |
| 75 | +; X86-NEXT: movl %eax, %edx |
| 76 | +; X86-NEXT: sarl $31, %edx |
| 77 | +; X86-NEXT: addl %ecx, %eax |
| 78 | +; X86-NEXT: adcl %esi, %edx |
| 79 | +; X86-NEXT: popl %esi |
| 80 | +; X86-NEXT: retl |
| 81 | +; |
| 82 | +; X64-LABEL: negative_test_add_i64_i16_sext: |
| 83 | +; X64: # %bb.0: |
| 84 | +; X64-NEXT: # kill: def $esi killed $esi def $rsi |
| 85 | +; X64-NEXT: # kill: def $edi killed $edi def $rdi |
| 86 | +; X64-NEXT: movswq %di, %rcx |
| 87 | +; X64-NEXT: movswq %si, %rax |
| 88 | +; X64-NEXT: addq %rcx, %rax |
| 89 | +; X64-NEXT: retq |
| 90 | + %sext_a = sext i16 %a to i64 |
| 91 | + %sext_b = sext i16 %b to i64 |
| 92 | + %sum = add nuw nsw i64 %sext_a, %sext_b |
| 93 | + ret i64 %sum |
| 94 | +} |
0 commit comments