|
1732 | 1732 | } |
1733 | 1733 | ) |
1734 | 1734 |
|
| 1735 | +(define_insn "*aarch64_sve2_nbsl_unpred<mode>" |
| 1736 | + [(set (match_operand:VDQ_I 0 "register_operand") |
| 1737 | + (not:VDQ_I |
| 1738 | + (xor:VDQ_I |
| 1739 | + (and:VDQ_I |
| 1740 | + (xor:VDQ_I |
| 1741 | + (match_operand:VDQ_I 1 "register_operand") |
| 1742 | + (match_operand:VDQ_I 2 "register_operand")) |
| 1743 | + (match_operand:VDQ_I 3 "register_operand")) |
| 1744 | + (match_dup BSL_DUP))))] |
| 1745 | + "TARGET_SVE2" |
| 1746 | + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] |
| 1747 | + [ w , <bsl_1st> , <bsl_2nd> , w ; * ] nbsl\t%Z0.d, %Z0.d, %Z<bsl_dup>.d, %Z3.d |
| 1748 | + [ ?&w , w , w , w ; yes ] movprfx\t%Z0, %Z<bsl_mov>\;nbsl\t%Z0.d, %Z0.d, %Z<bsl_dup>.d, %Z3.d |
| 1749 | + } |
| 1750 | +) |
| 1751 | + |
1735 | 1752 | ;; Unpredicated bitwise select with inverted first operand. |
1736 | 1753 | ;; (op3 ? ~bsl_mov : bsl_dup) == ((~(bsl_mov ^ bsl_dup) & op3) ^ bsl_dup) |
1737 | 1754 | (define_expand "@aarch64_sve2_bsl1n<mode>" |
|
1777 | 1794 | } |
1778 | 1795 | ) |
1779 | 1796 |
|
| 1797 | +(define_insn "*aarch64_sve2_bsl1n_unpred<mode>" |
| 1798 | + [(set (match_operand:VDQ_I 0 "register_operand") |
| 1799 | + (xor:VDQ_I |
| 1800 | + (and:VDQ_I |
| 1801 | + (not:VDQ_I |
| 1802 | + (xor:VDQ_I |
| 1803 | + (match_operand:VDQ_I 1 "register_operand") |
| 1804 | + (match_operand:VDQ_I 2 "register_operand"))) |
| 1805 | + (match_operand:VDQ_I 3 "register_operand")) |
| 1806 | + (match_dup BSL_DUP)))] |
| 1807 | + "TARGET_SVE2" |
| 1808 | + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] |
| 1809 | + [ w , <bsl_1st> , <bsl_2nd> , w ; * ] bsl1n\t%Z0.d, %Z0.d, %Z<bsl_dup>.d, %Z3.d |
| 1810 | + [ ?&w , w , w , w ; yes ] movprfx\t%Z0, %Z<bsl_mov>\;bsl1n\t%Z0.d, %Z0.d, %Z<bsl_dup>.d, %Z3.d |
| 1811 | + } |
| 1812 | +) |
| 1813 | + |
1780 | 1814 | ;; Unpredicated bitwise select with inverted second operand. |
1781 | 1815 | ;; (bsl_dup ? bsl_mov : ~op3) == ((bsl_dup & bsl_mov) | (~op3 & ~bsl_dup)) |
1782 | 1816 | (define_expand "@aarch64_sve2_bsl2n<mode>" |
|
1851 | 1885 | } |
1852 | 1886 | ) |
1853 | 1887 |
|
| 1888 | +(define_insn "*aarch64_sve2_bsl2n_unpred<mode>" |
| 1889 | + [(set (match_operand:VDQ_I 0 "register_operand") |
| 1890 | + (ior:VDQ_I |
| 1891 | + (and:VDQ_I |
| 1892 | + (match_operand:VDQ_I 1 "register_operand") |
| 1893 | + (match_operand:VDQ_I 2 "register_operand")) |
| 1894 | + (and:VDQ_I |
| 1895 | + (not:VDQ_I (match_operand:VDQ_I 3 "register_operand")) |
| 1896 | + (not:VDQ_I (match_dup BSL_DUP)))))] |
| 1897 | + "TARGET_SVE2" |
| 1898 | + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] |
| 1899 | + [ w , <bsl_1st> , <bsl_2nd> , w ; * ] bsl2n\t%Z0.d, %Z0.d, %Z3.d, %Z<bsl_dup>.d |
| 1900 | + [ ?&w , w , w , w ; yes ] movprfx\t%Z0, %Z<bsl_mov>\;bsl2n\t%Z0.d, %Z0.d, %Z3.d, %Z<bsl_dup>.d |
| 1901 | + } |
| 1902 | +) |
| 1903 | + |
| 1904 | +(define_insn "*aarch64_sve2_bsl2n_unpred<mode>" |
| 1905 | + [(set (match_operand:VDQ_I 0 "register_operand") |
| 1906 | + (ior:VDQ_I |
| 1907 | + (and:VDQ_I |
| 1908 | + (match_operand:VDQ_I 1 "register_operand") |
| 1909 | + (match_operand:VDQ_I 2 "register_operand")) |
| 1910 | + (and:VDQ_I |
| 1911 | + (not:VDQ_I (match_dup BSL_DUP)) |
| 1912 | + (not:VDQ_I (match_operand:VDQ_I 3 "register_operand")))))] |
| 1913 | + "TARGET_SVE2" |
| 1914 | + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] |
| 1915 | + [ w , <bsl_1st> , <bsl_2nd> , w ; * ] bsl2n\t%Z0.d, %Z0.d, %Z3.d, %Z<bsl_dup>.d |
| 1916 | + [ ?&w , w , w , w ; yes ] movprfx\t%Z0, %Z<bsl_mov>\;bsl2n\t%Z0.d, %Z0.d, %Z3.d, %Z<bsl_dup>.d |
| 1917 | + } |
| 1918 | +) |
| 1919 | + |
1854 | 1920 | ;; ------------------------------------------------------------------------- |
1855 | 1921 | ;; ---- [INT] Shift-and-accumulate operations |
1856 | 1922 | ;; ------------------------------------------------------------------------- |
|
0 commit comments