diff --git a/Cargo.lock b/Cargo.lock index 0f28545759..f7d40e874e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,11 +4,11 @@ version = 3 [[package]] name = "addr2line" -version = "0.19.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ - "gimli", + "gimli 0.26.1", ] [[package]] @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "ahash" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" + [[package]] name = "ahash" version = "0.7.6" @@ -28,6 +34,15 @@ dependencies = [ "version_check", ] +[[package]] +name = "aho-corasick" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +dependencies = [ + "memchr", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -39,15 +54,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" [[package]] name = "arbitrary" -version = "1.2.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0224938f92e7aef515fac2ff2d18bd1115c1394ddf4a092e0c87e8be9499ee5" +checksum = "510c76ecefdceada737ea728f4f9a84bd2e1ef29f1ba555e560940fe279954de" dependencies = [ "derive_arbitrary", ] @@ -58,26 +73,26 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi 0.1.19", + "hermit-abi", "libc", "winapi", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -86,103 +101,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "bolero" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256a795047239482fdc98374c3a050d51ca8921427c842e089b3bd7267659427" -dependencies = [ - "bolero-afl", - "bolero-engine", - "bolero-generator", - "bolero-honggfuzz", - "bolero-libfuzzer", - "cfg-if 1.0.0", - "libtest-mimic", - "rand", -] - -[[package]] -name = "bolero-afl" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd0c2595f3cc558c84e285318f8d29d3552140ecd106dbf3a356094898dc5619" -dependencies = [ - "bolero-engine", - "cc", -] - -[[package]] -name = "bolero-engine" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "839667421d443c03ca5746ec1c1b8db10d305fdb062f6f20c62b3f4cfcd431b5" -dependencies = [ - "anyhow", - "backtrace", - "bolero-generator", - "lazy_static", - "pretty-hex", - "rand", -] - -[[package]] -name = "bolero-generator" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce5ff9a4b0a1f80c09e3a35c4dc47a3bed344e5a431f2b96ca74952beb6c0767" -dependencies = [ - "bolero-generator-derive", - "byteorder", - "either", - "rand_core", -] - -[[package]] -name = "bolero-generator-derive" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a2ef03f5627ff547424f470cdf527bc5c7551ec48bd560f3a0e794d0082c6f9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "bolero-honggfuzz" -version = "0.6.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dc187a50ea23588958b0160113a742181b09ba4dba8412072c5e311a062bb4b" -dependencies = [ - "bolero-engine", -] - -[[package]] -name = "bolero-libfuzzer" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7734f24b16e80871f6a54e636e0db8338c22eea957685b4751e29b1dce1a5b" -dependencies = [ - "bolero-engine", - "cc", -] +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bstr" @@ -207,15 +128,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" [[package]] name = "bytecheck" -version = "0.6.9" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +checksum = "314889ea31cda264cb7c3d6e6e5c9415a987ecb0e72c17c00d36fbb881d34abe" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -223,9 +144,9 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.9" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +checksum = "4a2b3b92c135dae665a6f760205b89187638e83bed17ef3e44e83c712cf30600" dependencies = [ "proc-macro2", "quote", @@ -240,24 +161,18 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "cast" -version = "0.3.0" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "cc" -version = "1.0.78" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ - "jobserver", + "rustc_version", ] [[package]] -name = "cfg-if" -version = "0.1.10" +name = "cc" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" [[package]] name = "cfg-if" @@ -271,13 +186,9 @@ version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term", - "atty", "bitflags", - "strsim", "textwrap", "unicode-width", - "vec_map", ] [[package]] @@ -291,11 +202,95 @@ dependencies = [ "trybuild", ] +[[package]] +name = "compiletest_rs" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0086d6ad78cf409c3061618cd98e2789d5c9ce598fc9651611cf62eae0a599cb" +dependencies = [ + "diff", + "filetime", + "getopts", + "lazy_static", + "libc", + "log", + "miow", + "regex", + "rustfix", + "serde", + "serde_derive", + "serde_json", + "tester", + "winapi", +] + +[[package]] +name = "cranelift-bforest" +version = "0.76.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e6bea67967505247f54fa2c85cf4f6e0e31c4e5692c9b70e4ae58e339067333" +dependencies = [ + "cranelift-entity", +] + +[[package]] +name = "cranelift-codegen" +version = "0.76.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48194035d2752bdd5bdae429e3ab88676e95f52a2b1355a5d4e809f9e39b1d74" +dependencies = [ + "cranelift-bforest", + "cranelift-codegen-meta", + "cranelift-codegen-shared", + "cranelift-entity", + "gimli 0.25.0", + "hashbrown 0.9.1", + "log", + "regalloc", + "smallvec", + "target-lexicon", +] + +[[package]] +name = "cranelift-codegen-meta" +version = "0.76.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976efb22fcab4f2cd6bd4e9913764616a54d895c1a23530128d04e03633c555f" +dependencies = [ + "cranelift-codegen-shared", + "cranelift-entity", +] + +[[package]] +name = "cranelift-codegen-shared" +version = "0.76.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" + +[[package]] +name = "cranelift-entity" +version = "0.76.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" + +[[package]] +name = "cranelift-frontend" +version = "0.76.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "279afcc0d3e651b773f94837c3d581177b348c8d69e928104b2e9fccb226f921" +dependencies = [ + "cranelift-codegen", + "hashbrown 0.9.1", + "log", + "smallvec", + "target-lexicon", +] + [[package]] name = "criterion" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" dependencies = [ "atty", "cast", @@ -319,9 +314,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" dependencies = [ "cast", "itertools", @@ -329,68 +324,48 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", - "crossbeam-utils 0.8.14", + "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", - "memoffset 0.7.1", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" dependencies = [ - "autocfg", - "cfg-if 0.1.10", + "cfg-if", "lazy_static", ] -[[package]] -name = "crossbeam-utils" -version = "0.8.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "csv" version = "1.1.6" @@ -399,7 +374,7 @@ checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -415,9 +390,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.26" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa" dependencies = [ "quote", "syn", @@ -425,9 +400,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" dependencies = [ "darling_core", "darling_macro", @@ -435,22 +410,23 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", + "strsim", "syn", ] [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" dependencies = [ "darling_core", "quote", @@ -459,9 +435,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf460bbff5f571bfc762da5102729f59f338be7db17a21fade44c5c4f5005350" +checksum = "b24629208e87a2d8b396ff43b15c4afb0a69cea3fbbaa9ed9b92b7c02f0aed73" dependencies = [ "proc-macro2", "quote", @@ -470,21 +446,36 @@ dependencies = [ [[package]] name = "diff" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" [[package]] -name = "dissimilar" -version = "1.0.5" +name = "dirs-next" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5f0c7e4bd266b8ab2550e6238d2e74977c23c15536ac7be45e9c95e2e3fbbb" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] [[package]] name = "dynasm" -version = "1.2.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add9a102807b524ec050363f09e06f1504214b0e1c7797f64261c891022dce8b" +checksum = "ab1096ebdaa974cd6a41a743e94dfa00cce9bfbf4690bcc73fdec6a903938ccc" dependencies = [ "bitflags", "byteorder", @@ -497,9 +488,9 @@ dependencies = [ [[package]] name = "dynasmrt" -version = "1.2.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64fba5a42bd76a17cad4bfa00de168ee1cbfa06a5e8ce992ae880218c05641a9" +checksum = "c20c69d1e16ae47889b47c301c790f48615cd9bfbdf586e3f6d4fde64af3d259" dependencies = [ "byteorder", "dynasm", @@ -508,24 +499,24 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "enumset" -version = "1.0.12" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753" +checksum = "6216d2c19a6fb5f29d1ada1dc7bc4367a8cbf0fa4af5cf12e07b5bbdde6b5b2c" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.6.1" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0" +checksum = "6451128aa6655d880755345d085494cf7561a6bee7c8dc821e5d77e6d267ecd4" dependencies = [ "darling", "proc-macro2", @@ -534,27 +525,21 @@ dependencies = [ ] [[package]] -name = "fastrand" -version = "1.8.0" +name = "fallible-iterator" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" -dependencies = [ - "instant", -] +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] -name = "finite-wasm" -version = "0.3.0" +name = "filetime" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b821e04f9b8562f75e2c3a1806d629084a78e7f154aadac9e8230ab9efd1d7" +checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98" dependencies = [ - "bitvec", - "dissimilar", - "num-traits", - "prefix-sum-vec", - "thiserror", - "wasmparser", - "wasmprinter", + "cfg-if", + "libc", + "redox_syscall", + "winapi", ] [[package]] @@ -564,49 +549,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] -name = "form_urlencoded" -version = "1.1.0" +name = "getopts" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" dependencies = [ - "percent-encoding", + "unicode-width", ] [[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "generator" -version = "0.7.2" +name = "getrandom" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266041a359dfa931b370ef684cceb84b166beb14f7f0421f4a6a3d0c446d12e" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cc", + "cfg-if", "libc", - "log", - "rustversion", - "windows", + "wasi", ] [[package]] -name = "getrandom" -version = "0.2.8" +name = "gimli" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi", + "fallible-iterator", + "indexmap", + "stable_deref_trait", ] [[package]] name = "gimli" -version = "0.27.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" [[package]] name = "glob" @@ -622,37 +599,31 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" dependencies = [ - "ahash", + "ahash 0.4.7", ] [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] -name = "heck" -version = "0.3.3" +name = "hashbrown" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" dependencies = [ - "unicode-segmentation", + "ahash 0.7.6", ] -[[package]] -name = "heck" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" - [[package]] name = "hermit-abi" version = "0.1.19" @@ -662,15 +633,6 @@ dependencies = [ "libc", ] -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -678,23 +640,39 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] -name = "idna" -version = "0.3.0" +name = "indexmap" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "autocfg", + "hashbrown 0.11.2", ] [[package]] -name = "indexmap" -version = "1.9.2" +name = "inkwell" +version = "0.1.0-beta.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "2223d0eba0ae6d40a3e4680c6a3209143471e1f38b41746ea309aa36dde9f90b" dependencies = [ - "autocfg", - "hashbrown 0.12.3", + "either", + "inkwell_internals", + "libc", + "llvm-sys", + "once_cell", + "parking_lot", + "regex", +] + +[[package]] +name = "inkwell_internals" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c7090af3d300424caa81976b8c97bca41cd70e861272c072e188ae082fb49f9" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -703,14 +681,14 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "itertools" -version = "0.10.5" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" dependencies = [ "either", ] @@ -721,26 +699,11 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" -[[package]] -name = "itoa" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" - -[[package]] -name = "jobserver" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" -dependencies = [ - "libc", -] - [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -759,15 +722,15 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "f98a04dce437184842841303488f70d0188c5f51437d2a834dc097eafa909a01" [[package]] name = "libfuzzer-sys" -version = "0.4.5" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fff891139ee62800da71b7fd5b508d570b9ad95e614a53c6f453ca08366038" +checksum = "36a9a84a6e8b55dfefb04235e55edb2b9a2a18488fcae777a6bdaa6f06f1deb3" dependencies = [ "arbitrary", "cc", @@ -775,47 +738,34 @@ dependencies = [ ] [[package]] -name = "libtest-mimic" -version = "0.3.0" +name = "llvm-sys" +version = "120.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a7b8ac1f53f7be8d895ce6f7f534e49581c85c499b47429634b2cb2995e2ae" +checksum = "b4a810627ac62b396f5fd2214ba9bbd8748d4d6efdc4d2c1c1303ea7a75763ce" dependencies = [ - "crossbeam-channel 0.4.4", - "rayon", - "structopt", - "termcolor", + "cc", + "lazy_static", + "libc", + "regex", + "semver 0.11.0", ] [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" dependencies = [ - "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.17" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "loom" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff50ecb28bb86013e935fb6683ab1f6d3a20016f123c76fd4c27470076ac30f5" -dependencies = [ - "cfg-if 1.0.0", - "generator", - "scoped-tls", - "tracing", - "tracing-subscriber", + "cfg-if", ] [[package]] @@ -836,23 +786,17 @@ dependencies = [ "regex-automata", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" -version = "2.5.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memmap2" -version = "0.5.8" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc" +checksum = "4647a11b578fead29cdbb34d4adef8dd3dc35b876c9c6d5240d83f205abfe96e" dependencies = [ "libc", ] @@ -867,21 +811,22 @@ dependencies = [ ] [[package]] -name = "memoffset" -version = "0.7.1" +name = "miniz_oxide" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ + "adler", "autocfg", ] [[package]] -name = "miniz_oxide" -version = "0.6.2" +name = "miow" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "adler", + "winapi", ] [[package]] @@ -890,49 +835,39 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi", "libc", ] [[package]] name = "object" -version = "0.30.1" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d864c91689fdc196779b98dba0aceac6118594c2df6ee5d943eb6a8df4d107a" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.17.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "oorandom" @@ -942,19 +877,13 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "output_vt100" -version = "0.1.3" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" +checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" dependencies = [ "winapi", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "parking_lot" version = "0.11.2" @@ -968,11 +897,11 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.6" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall", @@ -981,22 +910,25 @@ dependencies = [ ] [[package]] -name = "percent-encoding" -version = "2.2.0" +name = "pest" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +dependencies = [ + "ucd-trie", +] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "plotters" -version = "0.3.4" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" dependencies = [ "num-traits", "plotters-backend", @@ -1007,47 +939,35 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" dependencies = [ "plotters-backend", ] [[package]] name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "prefix-sum-vec" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa06bd51638b6e76ac9ba9b6afb4164fa647bd2916d722f2623fbb6d1ed8bdba" - -[[package]] -name = "pretty-hex" -version = "0.2.1" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5c99d529f0d30937f6f4b8a86d988047327bb88d04d2c4afc356de74722131" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" [[package]] name = "pretty_assertions" -version = "1.3.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" +checksum = "ec0cfe1b2403f172ba0f234e500906ee0a3e493fb81092dac23ebefe129301cc" dependencies = [ + "ansi_term", "ctor", "diff", "output_vt100", - "yansi", ] [[package]] @@ -1076,11 +996,11 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "fb37d2df5df740e582f28f8560cf425f52bb267d872fe58358eadb554909f07a" dependencies = [ - "unicode-ident", + "unicode-xid", ] [[package]] @@ -1105,28 +1025,23 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" -version = "0.8.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha", "rand_core", + "rand_hc", ] [[package]] @@ -1141,50 +1056,85 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.4" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] +[[package]] +name = "rand_hc" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +dependencies = [ + "rand_core", +] + [[package]] name = "rayon" -version = "1.6.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" dependencies = [ + "autocfg", + "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.10.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" +checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" dependencies = [ - "crossbeam-channel 0.5.6", + "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.14", + "crossbeam-utils", + "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ "bitflags", ] +[[package]] +name = "redox_users" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +dependencies = [ + "getrandom", + "redox_syscall", +] + +[[package]] +name = "regalloc" +version = "0.0.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" +dependencies = [ + "log", + "rustc-hash", + "smallvec", +] + [[package]] name = "regex" -version = "1.7.0" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ + "aho-corasick", + "memchr", "regex-syntax", ] @@ -1199,9 +1149,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "region" @@ -1235,12 +1185,12 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.39" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +checksum = "2cdcf5caf69bcc87b1e3f5427b4f21a32fdd53c2847687bdf9861abb1cdaa0d8" dependencies = [ "bytecheck", - "hashbrown 0.12.3", + "hashbrown 0.12.0", "ptr_meta", "rend", "rkyv_derive", @@ -1249,9 +1199,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.39" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +checksum = "a6cf557da1f81b8c7e889c59c9c3abaf6978f7feb156b9579e4f8bf6d7a2bada" dependencies = [ "proc-macro2", "quote", @@ -1264,26 +1214,44 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver", + "semver 1.0.4", +] + +[[package]] +name = "rustfix" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2c50b74badcddeb8f7652fa8323ce440b95286f8e4b64ebfd871c609672704e" +dependencies = [ + "anyhow", + "log", + "serde", + "serde_json", ] [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "3c9613b5a66ab9ba26415184cfc41156594925a9cf3a2057e57f31ff145f6568" [[package]] name = "same-file" @@ -1295,34 +1263,49 @@ dependencies = [ ] [[package]] -name = "scoped-tls" -version = "1.0.1" +name = "scopeguard" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] -name = "scopeguard" -version = "1.1.0" +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + +[[package]] +name = "semver" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] [[package]] -name = "seahash" -version = "4.1.0" +name = "semver" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" [[package]] -name = "semver" -version = "1.0.16" +name = "semver-parser" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] [[package]] name = "serde" -version = "1.0.152" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +dependencies = [ + "serde_derive", +] [[package]] name = "serde_cbor" @@ -1336,9 +1319,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -1347,11 +1330,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" dependencies = [ - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -1389,104 +1372,69 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] -name = "strum" -version = "0.24.1" +name = "stable_deref_trait" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros", -] +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] -name = "strum_macros" -version = "0.24.3" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" -dependencies = [ - "heck 0.4.0", - "proc-macro2", - "quote", - "rustversion", - "syn", -] +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.107" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" dependencies = [ "proc-macro2", "quote", - "unicode-ident", + "unicode-xid", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - [[package]] name = "target-lexicon" -version = "0.12.5" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" +checksum = "d9bffcddbc2458fa3e6058414599e3c838a022abae82e5c67b4f7f80298d5bff" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", - "fastrand", + "cfg-if", "libc", + "rand", "redox_syscall", "remove_dir_all", "winapi", ] +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + [[package]] name = "termcolor" -version = "1.1.3" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ "winapi-util", ] @@ -1501,15 +1449,28 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.11" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" +checksum = "eb78caec569a40f42c078c798c0e35b922d9054ec28e166f0d6ac447563d91a4" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "tester" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0639d10d8f4615f223a57275cf40f9bdb7cfbb806bcb7f7cc56e3beb55a576eb" +dependencies = [ + "cfg-if", + "getopts", + "libc", + "num_cpus", + "term", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -1521,18 +1482,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -1558,37 +1519,22 @@ dependencies = [ "serde_json", ] -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - [[package]] name = "toml" -version = "0.5.10" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -1597,9 +1543,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ "proc-macro2", "quote", @@ -1608,150 +1554,65 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.1.3" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", - "log", - "tracing-core", ] [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "245da694cc7fc4729f3f418b304cb57789f1bed2a78c575407ab8a23f53cb4d3" dependencies = [ + "lazy_static", "matchers", - "nu-ansi-term", - "once_cell", "regex", "sharded-slab", - "smallvec", "thread_local", "tracing", "tracing-core", - "tracing-log", -] - -[[package]] -name = "tracing-tracy" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a42311a35ed976d72f359de43e9fe028ec9d9f1051c4c52bd05a4f66ff3cbf" -dependencies = [ - "tracing-core", - "tracing-subscriber", - "tracy-client", -] - -[[package]] -name = "tracy-client" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ebfe7a24c18b5ba86d8920c124b41b942352f863fbe0c84d3d63428fa1860f" -dependencies = [ - "loom", - "once_cell", - "tracy-client-sys", -] - -[[package]] -name = "tracy-client-sys" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "178d021455e83078bb38c00b70046b95117ef0a0312cbef925f426d833d11c79" -dependencies = [ - "cc", ] [[package]] name = "trybuild" -version = "1.0.73" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed01de3de062db82c0920b5cabe804f88d599a3f217932292597c678c903754d" +checksum = "9d664de8ea7e531ad4c0f5a834f20b8cb2b8e6dfe88d05796ee7887518ed67b9" dependencies = [ "glob", - "once_cell", + "lazy_static", "serde", - "serde_derive", "serde_json", "termcolor", "toml", ] [[package]] -name = "unicode-bidi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" - -[[package]] -name = "unicode-ident" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.10.0" +name = "ucd-trie" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" +checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] -name = "url" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", -] - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "vec_map" -version = "0.8.2" +name = "unicode-xid" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "walkdir" @@ -1766,29 +1627,29 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", + "lazy_static", "log", - "once_cell", "proc-macro2", "quote", "syn", @@ -1797,9 +1658,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1807,9 +1668,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2", "quote", @@ -1820,9 +1681,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "wasm-encoder" @@ -1833,24 +1694,6 @@ dependencies = [ "leb128", ] -[[package]] -name = "wasm-encoder" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b47b995b096a689358ca9de6c727b94351b95b390dbbf6b7021c22797d36caa" -dependencies = [ - "leb128", -] - -[[package]] -name = "wasm-encoder" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05632e0a66a6ed8cca593c24223aabd6262f256c3693ad9822c315285f010614" -dependencies = [ - "leb128", -] - [[package]] name = "wasm-smith" version = "0.4.5" @@ -1860,7 +1703,7 @@ dependencies = [ "arbitrary", "indexmap", "leb128", - "wasm-encoder 0.4.1", + "wasm-encoder", ] [[package]] @@ -1870,27 +1713,69 @@ dependencies = [ "anyhow", "libfuzzer-sys", "wasm-smith", + "wasmer-compiler-cranelift", + "wasmer-compiler-llvm", "wasmer-compiler-singlepass-near", "wasmer-engine-universal-near", "wasmer-near", "wasmprinter", ] +[[package]] +name = "wasmer-compiler-cranelift" +version = "2.1.0" +dependencies = [ + "cranelift-codegen", + "cranelift-entity", + "cranelift-frontend", + "gimli 0.25.0", + "hashbrown 0.11.2", + "lazy_static", + "more-asserts", + "rayon", + "smallvec", + "target-lexicon", + "tracing", + "wasmer-compiler-near", + "wasmer-types-near", + "wasmer-vm-near", +] + +[[package]] +name = "wasmer-compiler-llvm" +version = "2.1.0" +dependencies = [ + "byteorder", + "cc", + "inkwell", + "itertools", + "lazy_static", + "libc", + "object", + "rayon", + "regex", + "rustc_version", + "semver 1.0.4", + "smallvec", + "target-lexicon", + "wasmer-compiler-near", + "wasmer-types-near", + "wasmer-vm-near", +] + [[package]] name = "wasmer-compiler-near" version = "2.4.0" dependencies = [ "enumset", - "finite-wasm", "hashbrown 0.11.2", "rkyv", "smallvec", "target-lexicon", "thiserror", - "tracing", "wasmer-types-near", "wasmer-vm-near", - "wasmparser", + "wasmparser 0.78.2", ] [[package]] @@ -1900,29 +1785,36 @@ dependencies = [ "byteorder", "dynasm", "dynasmrt", - "enumset", - "finite-wasm", "hashbrown 0.11.2", "lazy_static", - "memoffset 0.6.5", + "memoffset", "more-asserts", "rayon", "smallvec", - "strum", "target-lexicon", - "tracing", "wasmer-compiler-near", "wasmer-types-near", "wasmer-vm-near", ] +[[package]] +name = "wasmer-derive-near" +version = "2.4.0" +dependencies = [ + "compiletest_rs", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", + "wasmer-near", +] + [[package]] name = "wasmer-engine-near" version = "2.4.0" dependencies = [ "backtrace", "enumset", - "finite-wasm", "lazy_static", "memmap2", "more-asserts", @@ -1938,39 +1830,39 @@ dependencies = [ name = "wasmer-engine-universal-near" version = "2.4.0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "enumset", - "finite-wasm", "leb128", - "prefix-sum-vec", "region", "rkyv", "thiserror", - "tracing", "wasmer-compiler-near", "wasmer-engine-near", "wasmer-types-near", "wasmer-vm-near", - "wasmparser", "winapi", ] +[[package]] +name = "wasmer-integration-tests-ios" +version = "2.1.0" + [[package]] name = "wasmer-near" version = "2.4.0" dependencies = [ "anyhow", - "cfg-if 1.0.0", - "finite-wasm", + "cfg-if", "indexmap", "more-asserts", - "prefix-sum-vec", "target-lexicon", "tempfile", "thiserror", - "tracing", + "wasmer-compiler-cranelift", + "wasmer-compiler-llvm", "wasmer-compiler-near", "wasmer-compiler-singlepass-near", + "wasmer-derive-near", "wasmer-engine-near", "wasmer-engine-universal-near", "wasmer-types-near", @@ -1983,9 +1875,7 @@ dependencies = [ name = "wasmer-types-near" version = "2.4.0" dependencies = [ - "bolero", "indexmap", - "num-traits", "rkyv", "thiserror", ] @@ -1996,18 +1886,15 @@ version = "2.4.0" dependencies = [ "backtrace", "cc", - "cfg-if 1.0.0", - "finite-wasm", + "cfg-if", "indexmap", "libc", - "memoffset 0.6.5", + "memoffset", "more-asserts", "region", "rkyv", "thiserror", - "tracing", "wasmer-types-near", - "wasmparser", "winapi", ] @@ -2019,7 +1906,7 @@ dependencies = [ "tempfile", "thiserror", "wasmer-near", - "wast 38.0.1", + "wast", ] [[package]] @@ -2028,12 +1915,11 @@ version = "2.4.0" dependencies = [ "anyhow", "build-deps", - "cfg-if 1.0.0", + "cfg-if", "compiler-test-derive", "criterion", "glob", "lazy_static", - "rayon", "rustc_version", "serial_test", "tempfile", @@ -2041,9 +1927,8 @@ dependencies = [ "test-log", "tracing", "tracing-subscriber", - "tracing-tracy", - "tracy-client", - "wasm-encoder 0.12.0", + "wasmer-compiler-cranelift", + "wasmer-compiler-llvm", "wasmer-compiler-near", "wasmer-compiler-singlepass-near", "wasmer-engine-near", @@ -2057,22 +1942,24 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.99.0" +version = "0.78.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef3b717afc67f848f412d4f02c127dd3e35a0eecd58c684580414df4fde01d3" -dependencies = [ - "indexmap", - "url", -] +checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" + +[[package]] +name = "wasmparser" +version = "0.81.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98930446519f63d00a836efdc22f67766ceae8dbcc1571379f2bcabc6b2b9abc" [[package]] name = "wasmprinter" -version = "0.2.49" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27c13dff901f9354fa9a6a877152d9c5642513645985635c9b83bcca99e40ea1" +checksum = "a00ad4a51ba74183137c776ab37dea50b9f71db7454d7b654c2ba69ac5d9b223" dependencies = [ "anyhow", - "wasmparser", + "wasmparser 0.81.0", ] [[package]] @@ -2084,32 +1971,20 @@ dependencies = [ "leb128", ] -[[package]] -name = "wast" -version = "50.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2cbb59d4ac799842791fe7e806fa5dbbf6b5554d538e51cc8e176db6ff0ae34" -dependencies = [ - "leb128", - "memchr", - "unicode-width", - "wasm-encoder 0.20.0", -] - [[package]] name = "wat" -version = "1.0.52" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "584aaf7a1ecf4d383bbe1a25eeab0cbb8ff96acc6796707ff65cde48f4632f15" +checksum = "adcfaeb27e2578d2c6271a45609f4a055e6d7ba3a12eff35b1fd5ba147bdf046" dependencies = [ - "wast 50.0.0", + "wast", ] [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", @@ -2145,61 +2020,3 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c4bd0a50ac6020f65184721f758dba47bb9fbc2133df715ec74a237b26794a" -dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_msvc" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7711666096bd4096ffa835238905bb33fb87267910e154b18b44eaabb340f2" - -[[package]] -name = "windows_i686_gnu" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763fc57100a5f7042e3057e7e8d9bdd7860d330070251a73d003563a3bb49e1b" - -[[package]] -name = "windows_i686_msvc" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bc7cbfe58828921e10a9f446fcaaf649204dcfe6c1ddd712c5eebae6bda1106" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6868c165637d653ae1e8dc4d82c25d4f97dd6605eaa8d784b5c6e0ab2a252b65" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4d40883ae9cae962787ca76ba76390ffa29214667a111db9e0a1ad8377e809" - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - -[[package]] -name = "yansi" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" diff --git a/Cargo.toml b/Cargo.toml index f962671ae4..d42a54904b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,9 @@ autoexamples = false [dependencies] wasmer = { version = "=2.4.0", path = "lib/api", package = "wasmer-near" } wasmer-compiler = { version = "=2.4.0", path = "lib/compiler", package = "wasmer-compiler-near" } +wasmer-compiler-cranelift = { version = "2.0.0", path = "lib/compiler-cranelift", optional = true } wasmer-compiler-singlepass = { version = "=2.4.0", path = "lib/compiler-singlepass", optional = true, package = "wasmer-compiler-singlepass-near" } +wasmer-compiler-llvm = { version = "2.0.0", path = "lib/compiler-llvm", optional = true } wasmer-engine = { version = "=2.4.0", path = "lib/engine", package = "wasmer-engine-near" } wasmer-engine-universal = { version = "=2.4.0", path = "lib/engine-universal", optional = true, package = "wasmer-engine-universal-near" } wasmer-wast = { version = "2.0.0", path = "tests/lib/wast", optional = true } @@ -20,19 +22,22 @@ wasmer-types = { version = "=2.4.0", path = "lib/types", package = "wasmer-types wasmer-vm = { version = "=2.4.0", path = "lib/vm", package = "wasmer-vm-near" } cfg-if = "1.0" -tracing = "0.1" [workspace] members = [ "lib/api", "lib/compiler", + "lib/compiler-cranelift", "lib/compiler-singlepass", + "lib/compiler-llvm", + "lib/derive", "lib/engine", "lib/engine-universal", "lib/vm", "lib/types", "tests/lib/wast", "tests/lib/compiler-test-derive", + "tests/integration/ios", "fuzz", ] resolver = "2" @@ -50,16 +55,13 @@ criterion = "0.3" lazy_static = "1.4" serial_test = "0.5" compiler-test-derive = { path = "tests/lib/compiler-test-derive" } -rayon = "1.5" tempfile = "3.1" # For logging tests using the `RUST_LOG=debug` when testing test-log = { version = "0.2", default-features = false, features = ["trace"] } tracing = { version = "0.1", default-features = false, features = ["log"] } tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt"] } -tracing-tracy = "0.9" -tracy-client = "0.13" wat = "1.0" -wasm-encoder = "0.12" + [features] # Don't add the compiler features in default, please add them on the Makefile @@ -85,6 +87,29 @@ singlepass = [ "wasmer-compiler-singlepass", "compiler", ] +cranelift = [ + "wasmer-compiler-cranelift", + "compiler", +] +llvm = [ + "wasmer-compiler-llvm", + "compiler", +] + +# Testing features +test-singlepass = [ + "singlepass", +] +test-cranelift = [ + "cranelift", +] +test-llvm = [ + "llvm", +] +test-universal = [ + "universal", + "test-generator/test-universal", +] # Specifies that we're running in coverage testing mode. This disables tests # that raise signals because that interferes with tarpaulin. @@ -98,95 +123,120 @@ name = "static_and_dynamic_functions" harness = false [[bench]] -name = "limits" +name = "many_functions" harness = false -[[example]] -name = "tracy-exec" -path = "examples/tracy_exec.rs" -required-features = ["singlepass"] - [[example]] name = "early-exit" path = "examples/early_exit.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "engine-universal" path = "examples/engine_universal.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] + +[[example]] +name = "engine-headless" +path = "examples/engine_headless.rs" +required-features = ["cranelift"] + +[[example]] +name = "platform-headless-ios" +path = "examples/platform_ios_headless.rs" +required-features = ["cranelift"] + +[[example]] +name = "cross-compilation" +path = "examples/engine_cross_compilation.rs" +required-features = ["cranelift"] [[example]] name = "compiler-singlepass" path = "examples/compiler_singlepass.rs" required-features = ["singlepass"] +[[example]] +name = "compiler-cranelift" +path = "examples/compiler_cranelift.rs" +required-features = ["cranelift"] + +[[example]] +name = "compiler-llvm" +path = "examples/compiler_llvm.rs" +required-features = ["llvm"] + [[example]] name = "exported-function" path = "examples/exports_function.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "exported-global" path = "examples/exports_global.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "exported-memory" path = "examples/exports_memory.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "imported-function" path = "examples/imports_function.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "imported-global" path = "examples/imports_global.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] + +[[example]] +name = "tunables-limit-memory" +path = "examples/tunables_limit_memory.rs" +required-features = ["cranelift"] [[example]] name = "table" path = "examples/table.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "memory" path = "examples/memory.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "instance" path = "examples/instance.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "errors" path = "examples/errors.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "imported-function-env" path = "examples/imports_function_env.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "hello-world" path = "examples/hello_world.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] + +[[example]] +name = "metering" +path = "examples/metering.rs" +required-features = ["cranelift"] [[example]] name = "imports-exports" path = "examples/imports_exports.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] [[example]] name = "features" path = "examples/features.rs" -required-features = ["singlepass"] - -[[example]] -name = "coremark" -path = "examples/coremark.rs" -required-features = ["singlepass"] +required-features = ["cranelift"] diff --git a/Makefile b/Makefile index 8c95c79447..a5f53ec2c7 100644 --- a/Makefile +++ b/Makefile @@ -12,13 +12,24 @@ SHELL=/usr/bin/env bash # |------------|-----------|----------|--------------|-------| # | Compiler ⨯ Engine ⨯ Platform ⨯ Architecture ⨯ libc | # |------------|-----------|----------|--------------|-------| -# | Singlepass | Universal | Linux | amd64 | glibc | -# | | | Darwin | aarch64 | musl | -# | | | Windows | | | +# | Cranelift | Universal | Linux | amd64 | glibc | +# | LLVM | Dylib | Darwin | aarch64 | musl | +# | Singlepass | Staticlib | Windows | | | # |------------|-----------|----------|--------------|-------| # # Here is what works and what doesn't: # +# * Cranelift with the Universal engine works everywhere, +# +# * Cranelift with the Dylib engine works on Linux+Darwin/`amd64`, but +# it doesn't work on */`aarch64` or Windows/*. +# +# * LLVM with the Universal engine works on Linux+Darwin/`amd64`, +# but it doesn't work on */`aarch64` or Windows/*. +# +# * LLVM with the Dylib engine works on +# Linux+Darwin/`amd64`+`aarch64`, but it doesn't work on Windows/*. +# # * Singlepass with the Universal engine works on Linux+Darwin/`amd64`, but # it doesn't work on */`aarch64` or Windows/*. # @@ -90,11 +101,67 @@ endif # Variables that can be overriden by the users to force to enable or # to disable a specific compiler. +ENABLE_CRANELIFT ?= +ENABLE_LLVM ?= ENABLE_SINGLEPASS ?= # Which compilers we build. These have dependencies that may not be on the system. compilers := -exclude_tests := + +## +# Cranelift +## + +# If the user didn't disable the Cranelift compiler… +ifneq ($(ENABLE_CRANELIFT), 0) + # … then it can always be enabled. + compilers += cranelift + ENABLE_CRANELIFT := 1 +endif + +## +# LLVM +## + +# If the user didn't disable the LLVM compiler… +ifneq ($(ENABLE_LLVM), 0) + # … then maybe the user forced to enable the LLVM compiler. + ifeq ($(ENABLE_LLVM), 1) + LLVM_VERSION := $(shell llvm-config --version) + compilers += llvm + # … otherwise, we try to autodetect LLVM from `llvm-config` + else ifneq (, $(shell which llvm-config 2>/dev/null)) + LLVM_VERSION := $(shell llvm-config --version) + + # If findstring is not empty, then it have found the value + ifneq (, $(findstring 13,$(LLVM_VERSION))) + compilers += llvm + else ifneq (, $(findstring 12,$(LLVM_VERSION))) + compilers += llvm + endif + # … or try to autodetect LLVM from `llvm-config-`. + else + ifneq (, $(shell which llvm-config-13 2>/dev/null)) + LLVM_VERSION := $(shell llvm-config-13 --version) + compilers += llvm + else ifneq (, $(shell which llvm-config-12 2>/dev/null)) + LLVM_VERSION := $(shell llvm-config-12 --version) + compilers += llvm + endif + endif +endif + +exclude_tests := --exclude wasmer-cli +# We run integration tests separately (it requires building the c-api) +exclude_tests += --exclude wasmer-integration-tests-cli +exclude_tests += --exclude wasmer-integration-tests-ios + +ifneq (, $(findstring llvm,$(compilers))) + ENABLE_LLVM := 1 +else + # We exclude LLVM from our package testing + exclude_tests += --exclude wasmer-compiler-llvm +endif ## # Singlepass @@ -135,6 +202,43 @@ compilers := $(strip $(compilers)) # pairs are stored in the `compilers_engines` variable. compilers_engines := +## +# The Cranelift case. +## + +ifeq ($(ENABLE_CRANELIFT), 1) + compilers_engines += cranelift-universal + + ifneq (, $(filter 1, $(IS_DARWIN) $(IS_LINUX))) + ifeq ($(IS_AMD64), 1) + ifneq ($(LIBC), musl) + compilers_engines += cranelift-dylib + endif + else ifeq ($(IS_AARCH64), 1) + # The object crate doesn't support yet Darwin + Aarch64 relocations + ifneq ($(IS_DARWIN), 1) + compilers_engines += cranelift-dylib + endif + endif + endif +endif + +## +# The LLVM case. +## + +ifeq ($(ENABLE_LLVM), 1) + ifneq (, $(filter 1, $(IS_DARWIN) $(IS_LINUX))) + ifeq ($(IS_AMD64), 1) + compilers_engines += llvm-universal + compilers_engines += llvm-dylib + else ifeq ($(IS_AARCH64), 1) + compilers_engines += llvm-universal + compilers_engines += llvm-dylib + endif + endif +endif + ## # The Singlepass case. ## @@ -209,6 +313,9 @@ $(info Cargo features:) $(info   * Compilers: `$(bold)$(green)${compiler_features}$(reset)`.) $(info Rust version: $(bold)$(green)$(shell rustc --version)$(reset).) $(info NodeJS version: $(bold)$(green)$(shell node --version)$(reset).) +ifeq ($(ENABLE_LLVM), 1) + $(info LLVM version: $(bold)$(green)${LLVM_VERSION}$(reset).) +endif $(info ) $(info ) $(info --------------) @@ -266,8 +373,56 @@ endif # ##### -test: - cargo test --release --all $(compiler_features) +test: test-compilers test-packages test-examples + +test-compilers: + cargo test --release --tests $(compiler_features) + +test-packages: + cargo test --all --release $(exclude_tests) + cargo test --manifest-path lib/compiler-cranelift/Cargo.toml --release --no-default-features --features=std + cargo test --manifest-path lib/compiler-singlepass/Cargo.toml --release --no-default-features --features=std + +##### +# +# Testing compilers. +# +##### + +test-compilers-compat: $(foreach compiler,$(compilers),test-$(compiler)) + +test-singlepass-dylib: + cargo test --release --tests $(compiler_features) -- singlepass::dylib + +test-singlepass-universal: + cargo test --release --tests $(compiler_features) -- singlepass::universal + +test-cranelift-dylib: + cargo test --release --tests $(compiler_features) -- cranelift::dylib + +test-cranelift-universal: + cargo test --release --tests $(compiler_features) -- cranelift::universal + +test-llvm-dylib: + cargo test --release --tests $(compiler_features) -- llvm::dylib + +test-llvm-universal: + cargo test --release --tests $(compiler_features) -- llvm::universal + +test-singlepass: $(foreach singlepass_engine,$(filter singlepass-%,$(compilers_engines)),test-$(singlepass_engine)) + +test-cranelift: $(foreach cranelift_engine,$(filter cranelift-%,$(compilers_engines)),test-$(cranelift_engine)) + +test-llvm: $(foreach llvm_engine,$(filter llvm-%,$(compilers_engines)),test-$(llvm_engine)) + +test-examples: + cargo test --release $(compiler_features) --examples + +test-integration: + cargo test -p wasmer-integration-tests-cli + +test-integration-ios: + cargo test -p wasmer-integration-tests-ios ##### # diff --git a/PACKAGING.md b/PACKAGING.md index 7b392fff50..f6a98b218e 100644 --- a/PACKAGING.md +++ b/PACKAGING.md @@ -5,7 +5,7 @@ * Wasmer provides several compilers and the `Makefile` autodetects when compilers can be compiled and/or installed. Set the environment - variables `ENABLE_SINGLEPASS=1` to force compiler + variables `ENABLE_{CRANELIFT,LLVM,SINGLEPASS}=1` to force compiler to be build or to fail trying, e.g: ```sh @@ -23,7 +23,7 @@ * In case you must build/install directly with `cargo`, make sure to enable at least one compiler feature, like e.g. `--features - singlepass`, + cranelift`, * Beware that compiling with `cargo build --workspace --features …` will not enable features on the subcrates in the workspace and @@ -37,7 +37,7 @@ * The `wasmer-headless` CLI contains a subset of the `wasmer`'s functionalities and should only be packaged when splitting — it must be built explicitly with: - + ```sh $ make build-wasmer-headless-minimal install-wasmer-headless-minimal ``` diff --git a/benches/limits.rs b/benches/limits.rs deleted file mode 100644 index a8d5d3099a..0000000000 --- a/benches/limits.rs +++ /dev/null @@ -1,173 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use wasmer::*; -use wasmer_engine_universal::UniversalExecutableRef; - -pub struct LargeContract { - pub functions: u32, - pub locals_per_function: u32, - pub panic_imports: u32, // How many times to import `env.panic` -} - -impl Default for LargeContract { - fn default() -> Self { - Self { - functions: 1, - locals_per_function: 0, - panic_imports: 0, - } - } -} - -impl LargeContract { - /// Construct a contract with many entitites. - /// - /// Currently supports constructing contracts that contain a specified number of functions with the - /// specified number of locals each. - /// - /// Exports a function called `main` that does nothing. - pub fn make(&self) -> Vec { - use wasm_encoder::{ - CodeSection, EntityType, Export, ExportSection, Function, FunctionSection, - ImportSection, Instruction, Module, TypeSection, ValType, - }; - - // Won't generate a valid WASM without functions. - assert!( - self.functions >= 1, - "must specify at least 1 function to be generated" - ); - let mut module = Module::new(); - let mut type_section = TypeSection::new(); - type_section.function([], []); - module.section(&type_section); - - if self.panic_imports != 0 { - let mut import_section = ImportSection::new(); - for _ in 0..self.panic_imports { - import_section.import("env", "panic", EntityType::Function(0)); - } - module.section(&import_section); - } - - let mut functions_section = FunctionSection::new(); - for _ in 0..self.functions { - functions_section.function(0); - } - module.section(&functions_section); - - let mut exports_section = ExportSection::new(); - exports_section.export("main", Export::Function(0)); - module.section(&exports_section); - - let mut code_section = CodeSection::new(); - for _ in 0..self.functions { - let mut f = Function::new([(self.locals_per_function, ValType::I64)]); - f.instruction(&Instruction::End); - code_section.function(&f); - } - module.section(&code_section); - - module.finish() - } -} - -fn many_functions(c: &mut Criterion) { - let mut group = c.benchmark_group("many_functions"); - for functions in [1, 10, 100, 1000, 10000] { - let wasm = LargeContract { - functions, - ..Default::default() - } - .make(); - let store = Store::new(&Universal::new(Singlepass::new()).engine()); - group.bench_function(BenchmarkId::new("compile+instantiate", functions), |b| { - b.iter(|| { - let module = Module::new(&store, &wasm).unwrap(); - let imports = imports! {}; - let _ = Instance::new(&module, &imports).unwrap(); - }) - }); - - let module = Module::new(&store, &wasm).unwrap(); - let imports = imports! {}; - let instance = Instance::new(&module, &imports).unwrap(); - group.bench_function(BenchmarkId::new("lookup_main", functions), |b| { - b.iter(|| { - let _: Function = instance.lookup_function("main").unwrap(); - }) - }); - - let main: Function = instance.lookup_function("main").unwrap(); - group.bench_function(BenchmarkId::new("call_main", functions), |b| { - b.iter(|| { - black_box(main.call(&[]).unwrap()); - }) - }); - - let wasm = wat::parse_bytes(wasm.as_ref()).unwrap(); - let executable = store.engine().compile(&wasm, store.tunables()).unwrap(); - group.bench_function(BenchmarkId::new("serialize", functions), |b| { - b.iter(|| { - black_box(executable.serialize().unwrap()); - }) - }); - - let serialized = executable.serialize().unwrap(); - group.bench_function(BenchmarkId::new("load", functions), |b| { - b.iter(|| unsafe { - let deserialized = UniversalExecutableRef::deserialize(&serialized).unwrap(); - black_box(store.engine().load(&deserialized).unwrap()); - }) - }); - } -} - -fn many_locals(c: &mut Criterion) { - let mut group = c.benchmark_group("many_locals"); - for (functions, locals_per_function) in [(10, 100), (100, 1000), (1000, 10000)] { - let wasm = LargeContract { - functions, - locals_per_function, - ..Default::default() - } - .make(); - let size = functions * locals_per_function; - let store = Store::new(&Universal::new(Singlepass::new()).engine()); - group.bench_function(BenchmarkId::new("compile+instantiate", size), |b| { - b.iter(|| { - let module = Module::new(&store, &wasm).unwrap(); - let imports = imports! {}; - let _ = Instance::new(&module, &imports).unwrap(); - }) - }); - - let wasm = wat::parse_bytes(wasm.as_ref()).unwrap(); - let executable = store.engine().compile(&wasm, store.tunables()).unwrap(); - group.bench_function(BenchmarkId::new("serialize", size), |b| { - b.iter(|| { - black_box(executable.serialize().unwrap()); - }) - }); - - let serialized = executable.serialize().unwrap(); - group.bench_function(BenchmarkId::new("load", size), |b| { - b.iter(|| unsafe { - let deserialized = UniversalExecutableRef::deserialize(&serialized).unwrap(); - black_box(store.engine().load(&deserialized).unwrap()); - }) - }); - } -} - -criterion_group! { - name = functions; - config = Criterion::default(); - targets = many_functions -} -criterion_group! { - name = locals; - config = Criterion::default(); - targets = many_locals -} - -criterion_main!(functions, locals); diff --git a/benches/many_functions.rs b/benches/many_functions.rs new file mode 100644 index 0000000000..4c16ac7ffe --- /dev/null +++ b/benches/many_functions.rs @@ -0,0 +1,84 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use wasmer::*; +use wasmer_engine_universal::UniversalExecutableRef; + +fn call_many_functions(n: usize) -> String { + let fndefs = (0..n) + .map(|idx| format!(r#"(func $fn{idx} return)"#, idx = idx)) + .collect::(); + let calls = (0..n) + .map(|idx| format!("call $fn{idx}\n", idx = idx)) + .collect::(); + format!( + r#"(module {fndefs} (func (export "main") {calls} return) (func (export "single") call $fn0 return))"#, + fndefs = fndefs, + calls = calls + ) +} + +fn nops(c: &mut Criterion) { + for size in [1, 10, 100, 1000, 10000] { + let wat = call_many_functions(size); + let store = Store::new(&Universal::new(Singlepass::new()).engine()); + let mut compile = c.benchmark_group("compile"); + compile.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { + b.iter(|| { + let module = Module::new(&store, &wat).unwrap(); + let imports = imports! {}; + let _ = Instance::new(&module, &imports).unwrap(); + }) + }); + drop(compile); + + let module = Module::new(&store, &wat).unwrap(); + let imports = imports! {}; + let instance = Instance::new(&module, &imports).unwrap(); + let mut get_main = c.benchmark_group("get_main"); + get_main.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { + b.iter(|| { + let _: Function = instance.lookup_function("main").unwrap(); + }) + }); + drop(get_main); + let main: Function = instance.lookup_function("main").unwrap(); + let mut call_main = c.benchmark_group("call_main"); + call_main.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { + b.iter(|| { + black_box(main.call(&[]).unwrap()); + }) + }); + drop(call_main); + + let single: Function = instance.lookup_function("single").unwrap(); + let mut call_single = c.benchmark_group("call_single"); + call_single.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { + b.iter(|| { + black_box(single.call(&[]).unwrap()); + }) + }); + drop(call_single); + + let mut serialize = c.benchmark_group("serialize"); + let wasm = wat::parse_bytes(wat.as_ref()).unwrap(); + let executable = store.engine().compile(&wasm, store.tunables()).unwrap(); + serialize.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { + b.iter(|| { + black_box(executable.serialize().unwrap()); + }) + }); + drop(serialize); + + let serialized = executable.serialize().unwrap(); + let mut deserialize = c.benchmark_group("deserialize"); + deserialize.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { + b.iter(|| unsafe { + let deserialized = UniversalExecutableRef::deserialize(&serialized).unwrap(); + black_box(store.engine().load(&deserialized).unwrap()); + }) + }); + } +} + +criterion_group!(benches, nops); + +criterion_main!(benches); diff --git a/benches/static_and_dynamic_functions.rs b/benches/static_and_dynamic_functions.rs index 0db18069d6..b33d0a573e 100644 --- a/benches/static_and_dynamic_functions.rs +++ b/benches/static_and_dynamic_functions.rs @@ -147,6 +147,19 @@ pub fn run_basic_dynamic_function(store: &Store, compiler_name: &str, c: &mut Cr } fn run_static_benchmarks(_c: &mut Criterion) { + #[cfg(feature = "llvm")] + { + let store = Store::new(&Universal::new(wasmer_compiler_llvm::LLVM::new()).engine()); + run_basic_static_function(&store, "llvm", _c); + } + + #[cfg(feature = "cranelift")] + { + let store = + Store::new(&Universal::new(wasmer_compiler_cranelift::Cranelift::new()).engine()); + run_basic_static_function(&store, "cranelift", _c); + } + #[cfg(feature = "singlepass")] { let store = @@ -156,6 +169,19 @@ fn run_static_benchmarks(_c: &mut Criterion) { } fn run_dynamic_benchmarks(_c: &mut Criterion) { + #[cfg(feature = "llvm")] + { + let store = Store::new(&Universal::new(wasmer_compiler_llvm::LLVM::new()).engine()); + run_basic_dynamic_function(&store, "llvm", _c); + } + + #[cfg(feature = "cranelift")] + { + let store = + Store::new(&Universal::new(wasmer_compiler_cranelift::Cranelift::new()).engine()); + run_basic_dynamic_function(&store, "cranelift", _c); + } + #[cfg(feature = "singlepass")] { let store = diff --git a/deny.toml b/deny.toml index d03aaabff3..13a8000bbb 100644 --- a/deny.toml +++ b/deny.toml @@ -35,6 +35,9 @@ skip = [ { name = "gimli", version = "=0.25.0" }, { name = "semver", version = "=0.11.0" }, ] +skip-tree = [ + { name = "cranelift-frontend", version = "0.76.0" }, +] [sources] unknown-registry = "deny" diff --git a/docs/deps_dedup.dot b/docs/deps_dedup.dot index c9d98a470e..a9231d309f 100644 --- a/docs/deps_dedup.dot +++ b/docs/deps_dedup.dot @@ -14,7 +14,7 @@ digraph dependencies { n13 [label="wasmer-cache", color=orange]; n14 [label="wasmer-cli", color=orange]; - + subgraph cluster_compiler { label="Compilers"; color=brown; @@ -40,7 +40,7 @@ digraph dependencies { n6; n7; } - + subgraph cluster_abi { label="Provided ABIs"; diff --git a/examples/README.md b/examples/README.md index ac0d667779..738a10a0a3 100644 --- a/examples/README.md +++ b/examples/README.md @@ -48,49 +48,49 @@ example. Execute the example ```shell - $ cargo run --example hello-world --release --features "singlepass" + $ cargo run --example hello-world --release --features "cranelift" ``` 2. [**Instantiating a module**][instance], explains the basics of using Wasmer and how to create an instance out of a Wasm module. - + _Keywords_: instance, module. - +
Execute the example ```shell - $ cargo run --example instance --release --features "singlepass" + $ cargo run --example instance --release --features "cranelift" ```
3. [**Handling errors**][errors], explains the basics of interacting with Wasm module memory. - + _Keywords_: instance, error. - +
Execute the example ```shell - $ cargo run --example errors --release --features "singlepass" + $ cargo run --example errors --release --features "cranelift" ```
4. [**Interacting with memory**][memory], explains the basics of interacting with Wasm module memory. - + _Keywords_: memory, module. - +
Execute the example ```shell - $ cargo run --example memory --release --features "singlepass" + $ cargo run --example memory --release --features "cranelift" ```
@@ -98,46 +98,46 @@ example. ### Exports 1. [**Exported global**][exported-global], explains how to work with - exported globals: get/set their value, have information about their + exported globals: get/set their value, have information about their type. - + _Keywords_: export, global.
Execute the example ```shell - $ cargo run --example exported-global --release --features "singlepass" + $ cargo run --example exported-global --release --features "cranelift" ```
- + 2. [**Exported function**][exported-function], explains how to get and how to call an exported function. They come in 2 flavors: dynamic, and “static”/native. The pros and cons are discussed briefly. - + _Keywords_: export, function, dynamic, static, native.
Execute the example ```shell - $ cargo run --example exported-function --release --features "singlepass" + $ cargo run --example exported-function --release --features "cranelift" ```
-3. [**Exported memory**][exported-memory], explains how to read from +3. [**Exported memory**][exported-memory], explains how to read from and write to exported memory. - + _Keywords_: export, memory.
Execute the example ```shell - $ cargo run --example exported-memory --release --features "singlepass" + $ cargo run --example exported-memory --release --features "cranelift" ```
@@ -146,29 +146,29 @@ example. 1. [**Imported global**][imported-global], explains how to work with imported globals: create globals, import them, get/set their value. - + _Keywords_: import, global.
Execute the example ```shell - $ cargo run --example imported-global --release --features "singlepass" + $ cargo run --example imported-global --release --features "cranelift" ```
-2. [**Imported function**][imported-function], explains how to define +2. [**Imported function**][imported-function], explains how to define an imported function. They come in 2 flavors: dynamic, and “static”/native. - + _Keywords_: import, function, dynamic, static, native.
Execute the example ```shell - $ cargo run --example imported-function --release --features "singlepass" + $ cargo run --example imported-function --release --features "cranelift" ```
@@ -183,11 +183,11 @@ example. Execute the example ```shell - $ cargo run --example table --release --features "singlepass" + $ cargo run --example table --release --features "cranelift" ``` - + 2. [**Memory**][memory], explains how to use Wasm Memories from the Wasmer API. Memory example is a work in progress. @@ -197,7 +197,7 @@ example. Execute the example ```shell - $ cargo run --example memory --release --features "singlepass" + $ cargo run --example memory --release --features "cranelift" ``` @@ -213,7 +213,7 @@ example. Execute the example ```shell - $ cargo run --example tunables-limit-memory --release --features "singlepass" + $ cargo run --example tunables-limit-memory --release --features "cranelift" ``` @@ -224,14 +224,14 @@ example. Universal engine is, and how to set it up. The example completes itself with the compilation of the Wasm module, its instantiation, and finally, by calling an exported function. - + _Keywords_: Universal, engine, in-memory, executable code. - +
Execute the example ```shell - $ cargo run --example engine-universal --release --features "singlepass" + $ cargo run --example engine-universal --release --features "cranelift" ```
@@ -240,7 +240,7 @@ example. is, and how to set it up. The example completes itself with the compilation of the Wasm module, its instantiation, and finally, by calling an exported function. - + _Keywords_: native, engine, shared library, dynamic library, executable code. @@ -248,7 +248,7 @@ example. Execute the example ```shell - $ cargo run --example engine-dylib --release --features "singlepass" + $ cargo run --example engine-dylib --release --features "cranelift" ``` @@ -258,7 +258,7 @@ example. it. The example completes itself with the instantiation of a pre-compiled Wasm module, and finally, by calling an exported function. - + _Keywords_: native, engine, constrained environment, ahead-of-time compilation, cross-compilation, executable code, serialization. @@ -266,7 +266,7 @@ example. Execute the example ```shell - $ cargo run --example engine-headless --release --features "singlepass" + $ cargo run --example engine-headless --release --features "cranelift" ``` @@ -274,37 +274,37 @@ example. 4. [**Cross-compilation**][cross-compilation], illustrates the power of the abstraction over the engines and the compilers, such as it is possible to cross-compile a Wasm module for a custom target. - + _Keywords_: engine, compiler, cross-compilation.
Execute the example ```shell - $ cargo run --example cross-compilation --release --features "singlepass" + $ cargo run --example cross-compilation --release --features "cranelift" ```
- + 5. [**Features**][features], illustrates how to enable WebAssembly features that aren't yet stable. - + _Keywords_: engine, features. - +
Execute the example - + ```shell - $ cargo run --example features --release --features "singlepass" + $ cargo run --example features --release --features "cranelift" ``` - +
### Compilers 1. [**Singlepass compiler**][compiler-singlepass], explains how to use the [`wasmer-compiler-singlepass`] compiler. - + _Keywords_: compiler, singlepass.
@@ -316,18 +316,46 @@ example.
+2. [**Cranelift compiler**][compiler-cranelift], explains how to use + the [`wasmer-compiler-cranelift`] compiler. + + _Keywords_: compiler, cranelift. + +
+ Execute the example + + ```shell + $ cargo run --example compiler-cranelift --release --features "cranelift" + ``` + +
+ +3. [**LLVM compiler**][compiler-llvm], explains how to use the + [`wasmer-compiler-llvm`] compiler. + + _Keywords_: compiler, llvm. + +
+ Execute the example + + ```shell + $ cargo run --example compiler-llvm --release --features "llvm" + ``` + +
+ ### Integrations 1. [**WASI**][wasi], explains how to use the [WebAssembly System Interface][WASI] (WASI), i.e. the [`wasmer-wasi`] crate. - + _Keywords_: wasi, system, interface
Execute the example ```shell - $ cargo run --example wasi --release --features "singlepass,wasi" + $ cargo run --example wasi --release --features "cranelift,wasi" ```
@@ -341,7 +369,7 @@ example. Execute the example ```shell - $ cargo run --example wasi-pipes --release --features "singlepass,wasi" + $ cargo run --example wasi-pipes --release --features "cranelift,wasi" ``` @@ -351,6 +379,8 @@ example. [engine-dylib]: ./engine_dylib.rs [engine-headless]: ./engine_headless.rs [compiler-singlepass]: ./compiler_singlepass.rs +[compiler-cranelift]: ./compiler_cranelift.rs +[compiler-llvm]: ./compiler_llvm.rs [cross-compilation]: ./engine_cross_compilation.rs [exported-global]: ./exports_global.rs [exported-function]: ./exports_function.rs @@ -366,5 +396,7 @@ example. [tunables-limit-memory]: ./tunables_limit_memory.rs [features]: ./features.rs [`wasmer-compiler-singlepass`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-singlepass +[`wasmer-compiler-cranelift`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-cranelift +[`wasmer-compiler-llvm`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-llvm [`wasmer-wasi`]: https://github.com/wasmerio/wasmer/tree/master/lib/wasi [WASI]: https://github.com/WebAssembly/WASI diff --git a/examples/compiler_cranelift.rs b/examples/compiler_cranelift.rs new file mode 100644 index 0000000000..2d7e446c8e --- /dev/null +++ b/examples/compiler_cranelift.rs @@ -0,0 +1,66 @@ +//! A Wasm module can be compiled with multiple compilers. +//! +//! This example illustrates how to use the Cranelift compiler. +//! +//! You can run the example directly by executing in Wasmer root: +//! +//! ```shell +//! cargo run --example compiler-cranelift --release --features "cranelift" +//! ``` +//! +//! Ready? + +use wasmer::{imports, wat2wasm, Instance, Module, Store, Value}; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_engine_universal::Universal; + +fn main() -> Result<(), Box> { + // Let's declare the Wasm module with the text representation. + let wasm_bytes = wat2wasm( + r#" +(module + (type $sum_t (func (param i32 i32) (result i32))) + (func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) + local.get $x + local.get $y + i32.add) + (export "sum" (func $sum_f))) +"# + .as_bytes(), + )?; + + // Use Cranelift compiler with the default settings + let compiler = Cranelift::default(); + + // Create the store + let store = Store::new(&Universal::new(compiler).engine()); + + println!("Compiling module..."); + // Let's compile the Wasm module. + let module = Module::new(&store, wasm_bytes)?; + + // Create an empty import object. + let import_object = imports! {}; + + println!("Instantiating module..."); + // Let's instantiate the Wasm module. + let instance = Instance::new(&module, &import_object)?; + + let sum = instance.exports.get_function("sum")?; + + println!("Calling `sum` function..."); + // Let's call the `sum` exported function. The parameters are a + // slice of `Value`s. The results are a boxed slice of `Value`s. + let results = sum.call(&[Value::I32(1), Value::I32(2)])?; + + println!("Results: {:?}", results); + assert_eq!(results.to_vec(), vec![Value::I32(3)]); + + Ok(()) +} + +#[test] +#[cfg(feature = "cranelift")] +fn test_compiler_cranelift() -> Result<(), Box> { + main() +} diff --git a/examples/compiler_llvm.rs b/examples/compiler_llvm.rs new file mode 100644 index 0000000000..f062a1c0bc --- /dev/null +++ b/examples/compiler_llvm.rs @@ -0,0 +1,66 @@ +//! A Wasm module can be compiled with multiple compilers. +//! +//! This example illustrates how to use the LLVM compiler. +//! +//! You can run the example directly by executing in Wasmer root: +//! +//! ```shell +//! cargo run --example compiler-llvm --release --features "llvm" +//! ``` +//! +//! Ready? + +use wasmer::{imports, wat2wasm, Instance, Module, Store, Value}; +use wasmer_compiler_llvm::LLVM; +use wasmer_engine_universal::Universal; + +fn main() -> Result<(), Box> { + // Let's declare the Wasm module with the text representation. + let wasm_bytes = wat2wasm( + r#" +(module + (type $sum_t (func (param i32 i32) (result i32))) + (func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) + local.get $x + local.get $y + i32.add) + (export "sum" (func $sum_f))) +"# + .as_bytes(), + )?; + + // Use LLVM compiler with the default settings + let compiler = LLVM::default(); + + // Create the store + let store = Store::new(&Universal::new(compiler).engine()); + + println!("Compiling module..."); + // Let's compile the Wasm module. + let module = Module::new(&store, wasm_bytes)?; + + // Create an empty import object. + let import_object = imports! {}; + + println!("Instantiating module..."); + // Let's instantiate the Wasm module. + let instance = Instance::new(&module, &import_object)?; + + let sum = instance.exports.get_function("sum")?; + + println!("Calling `sum` function..."); + // Let's call the `sum` exported function. The parameters are a + // slice of `Value`s. The results are a boxed slice of `Value`s. + let results = sum.call(&[Value::I32(1), Value::I32(2)])?; + + println!("Results: {:?}", results); + assert_eq!(results.to_vec(), vec![Value::I32(3)]); + + Ok(()) +} + +#[test] +#[cfg(feature = "llvm")] +fn test_compiler_llvm() -> Result<(), Box> { + main() +} diff --git a/examples/coremark.rs b/examples/coremark.rs deleted file mode 100644 index c71067aae9..0000000000 --- a/examples/coremark.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! Benchmark the codegen quality with a compute benchmark. -//! -//! ```shell -//! cargo run --example coremark --release --features "singlepass" -//! ``` - -use wasmer::{imports, Function, FunctionType, Instance, Module, Store, Type, Value}; -use wasmer_compiler_singlepass::Singlepass; -use wasmer_engine_universal::Universal; - -fn main() -> Result<(), Box> { - let wasm_bytes = include_bytes!("coremark.wasm"); - let compiler = Singlepass::default(); - let store = Store::new(&Universal::new(compiler).engine()); - println!("Compiling module..."); - let module = Module::new(&store, wasm_bytes)?; - let clock_ms_sig = FunctionType::new(vec![], vec![Type::I64]); - let start = std::time::Instant::now(); - let import_object = imports! { - "env" => { - "clock_ms" => Function::new(&store, clock_ms_sig, move |_| { - Ok(vec![Value::I64(start.elapsed().as_millis() as u64 as i64)]) - }) - } - }; - println!("Instantiating module..."); - let instance = Instance::new(&module, &import_object)?; - let run = instance.lookup_function("run").expect("function lookup"); - println!("Calling CoreMark 1.0. Should take 12~20 seconds..."); - let results = run.call(&[])?; - println!("Score: {:?}", results); - Ok(()) -} - -#[test] -#[cfg(feature = "singlepass")] -fn test_compiler_singlepass() -> Result<(), Box> { - main() -} diff --git a/examples/coremark.wasm b/examples/coremark.wasm deleted file mode 100644 index c5d6b878d5..0000000000 Binary files a/examples/coremark.wasm and /dev/null differ diff --git a/examples/early_exit.rs b/examples/early_exit.rs index 345836e417..71d7ef82cb 100644 --- a/examples/early_exit.rs +++ b/examples/early_exit.rs @@ -9,7 +9,7 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example early-exit --release --features "singlepass" +//! cargo run --example early-exit --release --features "cranelift" //! ``` //! //! Ready? @@ -17,7 +17,7 @@ use anyhow::bail; use std::fmt; use wasmer::{imports, wat2wasm, Function, Instance, Module, NativeFunc, RuntimeError, Store}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; // First we need to create an error type that we'll use to signal the end of execution. @@ -55,7 +55,7 @@ fn main() -> anyhow::Result<()> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -82,10 +82,7 @@ fn main() -> anyhow::Result<()> { // // Get the `run` function which we'll use as our entrypoint. println!("Calling `run` function..."); - let run_func: NativeFunc<(i32, i32), i32> = instance - .lookup_function("run") - .ok_or(anyhow::anyhow!("could not find `run` export"))? - .native()?; + let run_func: NativeFunc<(i32, i32), i32> = instance.exports.get_native_function("run")?; // When we call a function it can either succeed or fail. We expect it to fail. match run_func.call(1, 7) { diff --git a/examples/engine_cross_compilation.rs b/examples/engine_cross_compilation.rs new file mode 100644 index 0000000000..8b575cd6fa --- /dev/null +++ b/examples/engine_cross_compilation.rs @@ -0,0 +1,111 @@ +//! Defining an engine in Wasmer is one of the fundamental steps. +//! +//! As a reminder, an engine applies roughly 2 steps: +//! +//! 1. It compiles the Wasm module bytes to executable code, through +//! the intervention of a compiler, +//! 2. It stores the executable code somewhere. +//! +//! This example focuses on the first step: the compiler. It +//! illustrates how the abstraction over the compiler is so powerful +//! that it is possible to cross-compile a Wasm module. +//! +//! You can run the example directly by executing in Wasmer root: +//! +//! ```shell +//! cargo run --example cross-compilation --release --features "cranelift" +//! ``` +//! +//! Ready? + +use std::str::FromStr; +use wasmer::{wat2wasm, Module, RuntimeError, Store}; +use wasmer_compiler::{CpuFeature, Target, Triple}; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_engine_dylib::Dylib; + +fn main() -> Result<(), Box> { + // Let's declare the Wasm module with the text representation. + let wasm_bytes = wat2wasm( + br#" +(module + (type $sum_t (func (param i32 i32) (result i32))) + (func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) + local.get $x + local.get $y + i32.add) + (export "sum" (func $sum_f))) +"#, + )?; + + // Define a compiler configuration. + // + // In this situation, the compiler is + // `wasmer_compiler_cranelift`. The compiler is responsible to + // compile the Wasm module into executable code. + let compiler_config = Cranelift::default(); + + // Here we go. + // + // Let's define the target “triple”. Historically, such things had + // three fields, though additional fields have been added over + // time. + let triple = Triple::from_str("x86_64-linux-musl") + .map_err(|error| RuntimeError::new(error.to_string()))?; + + // Here we go again. + // + // Let's define a CPU feature. + let mut cpu_feature = CpuFeature::set(); + cpu_feature.insert(CpuFeature::from_str("sse2")?); + + // Here we go finally. + // + // Let's build the target. + let target = Target::new(triple, cpu_feature); + println!("Chosen target: {:?}", target); + + // Define the engine that will drive everything. + // + // In this case, the engine is `wasmer_engine_dylib` which means + // that a shared object is going to be generated. + // + // That's where we specify the target for the compiler. + // + // Use the Dylib engine. + let engine = Dylib::new(compiler_config) + // Here we go. + // Pass the target to the engine! The engine will share + // this information with the compiler. + .target(target) + // Get the engine. + .engine(); + + // Create a store, that holds the engine. + let store = Store::new(&engine); + + println!("Compiling module..."); + // Let's compile the Wasm module. + let _module = Module::new(&store, wasm_bytes)?; + + println!("Module compiled successfully."); + + // Congrats, the Wasm module is cross-compiled! + // + // What to do with that? It is possible to use an engine (probably + // a headless engine) to execute the cross-compiled Wasm module an + // the targeted platform. + + Ok(()) +} + +#[test] +#[cfg(not(any( + windows, + // We don't support yet crosscompilation in macOS + all(target_os = "macos"), + target_env = "musl", +)))] +fn test_cross_compilation() -> Result<(), Box> { + main() +} diff --git a/examples/engine_headless.rs b/examples/engine_headless.rs new file mode 100644 index 0000000000..41f8b3fcde --- /dev/null +++ b/examples/engine_headless.rs @@ -0,0 +1,154 @@ +//! Defining an engine in Wasmer is one of the fundamental steps. +//! +//! This example illustrates a neat feature of engines: their ability +//! to run in a headless mode. At the time of writing, all engines +//! have a headless mode, but it's not a requirement of the `Engine` +//! trait (defined in the `wasmer_engine` crate). +//! +//! What problem does it solve, and what does it mean? +//! +//! Once a Wasm module is compiled into executable code and stored +//! somewhere (e.g. in memory with the Universal engine, or in a +//! shared object file with the Dylib engine), the module can be +//! instantiated and executed. But imagine for a second the following +//! scenario: +//! +//! * Modules are compiled ahead of time, to be instantiated later +//! on. +//! * Modules are cross-compiled on a machine ahead of time +//! to be run on another machine later one. +//! +//! In both scenarios, the environment where the compiled Wasm module +//! will be executed can be very constrained. For such particular +//! contexts, Wasmer can be compiled _without_ the compilers, so that +//! the `wasmer` binary is as small as possible. Indeed, there is no +//! need for a compiler since the Wasm module is already compiled. All +//! we need is an engine that _only_ drives the instantiation and +//! execution of the Wasm module. +//! +//! And that, that's a headless engine. +//! +//! To achieve such a scenario, a Wasm module must be compiled, then +//! serialized —for example into a file—, then later, potentially on +//! another machine, deserialized. The next steps are classical: The +//! Wasm module is instantiated and executed. +//! +//! This example uses a `compiler` because it illustrates the entire +//! workflow, but keep in mind the compiler isn't required after the +//! compilation step. +//! +//! You can run the example directly by executing in Wasmer root: +//! +//! ```shell +//! cargo run --example engine-headless --release --features "cranelift" +//! ``` +//! +//! Ready? + +use tempfile::NamedTempFile; +use wasmer::imports; +use wasmer::wat2wasm; +use wasmer::Instance; +use wasmer::Module; +use wasmer::Store; +use wasmer::Value; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_engine_dylib::Dylib; + +fn main() -> Result<(), Box> { + // First step, let's compile the Wasm module and serialize it. + // Note: we need a compiler here. + let serialized_module_file = { + // Let's declare the Wasm module with the text representation. + let wasm_bytes = wat2wasm( + r#" +(module + (type $sum_t (func (param i32 i32) (result i32))) + (func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) + local.get $x + local.get $y + i32.add) + (export "sum" (func $sum_f))) +"# + .as_bytes(), + )?; + + // Define a compiler configuration. + // + // In this situation, the compiler is + // `wasmer_compiler_cranelift`. The compiler is responsible to + // compile the Wasm module into executable code. + let compiler_config = Cranelift::default(); + + println!("Creating Dylib engine..."); + // Define the engine that will drive everything. + // + // In this case, the engine is `wasmer_engine_dylib` which + // means that a shared object is going to be generated. So + // when we are going to serialize the compiled Wasm module, we + // are going to store it in a file with the `.so` extension + // for example (or `.dylib`, or `.dll` depending of the + // platform). + let engine = Dylib::new(compiler_config).engine(); + + // Create a store, that holds the engine. + let store = Store::new(&engine); + + println!("Compiling module..."); + // Let's compile the Wasm module. + let module = Module::new(&store, wasm_bytes)?; + + println!("Serializing module..."); + // Here we go. Let's serialize the compiled Wasm module in a + // file. + let serialized_module_file = NamedTempFile::new()?; + module.serialize_to_file(&serialized_module_file)?; + + serialized_module_file + }; + + // Second step, deserialize the compiled Wasm module, and execute + // it, for example with Wasmer without a compiler. + { + println!("Creating headless Dylib engine..."); + // We create a headless Dylib engine. + let engine = Dylib::headless().engine(); + let store = Store::new(&engine); + + println!("Deserializing module..."); + // Here we go. + // + // Deserialize the compiled Wasm module. This code is unsafe + // because Wasmer can't assert the bytes are valid (see the + // `wasmer::Module::deserialize`'s documentation to learn + // more). + let module = unsafe { Module::deserialize_from_file(&store, serialized_module_file) }?; + + // Congrats, the Wasm module has been deserialized! Now let's + // execute it for the sake of having a complete example. + + // Create an import object. Since our Wasm module didn't declare + // any imports, it's an empty object. + let import_object = imports! {}; + + println!("Instantiating module..."); + // Let's instantiate the Wasm module. + let instance = Instance::new(&module, &import_object)?; + + println!("Calling `sum` function..."); + // The Wasm module exports a function called `sum`. + let sum = instance.exports.get_function("sum")?; + let results = sum.call(&[Value::I32(1), Value::I32(2)])?; + + println!("Results: {:?}", results); + assert_eq!(results.to_vec(), vec![Value::I32(3)]); + } + + Ok(()) +} + +#[test] +#[cfg(not(any(windows, target_arch = "aarch64", target_env = "musl")))] +fn test_engine_headless() -> Result<(), Box> { + main() +} diff --git a/examples/engine_universal.rs b/examples/engine_universal.rs index c9c88d8c35..0d51244f9e 100644 --- a/examples/engine_universal.rs +++ b/examples/engine_universal.rs @@ -13,13 +13,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example engine-universal --release --features "singlepass" +//! cargo run --example engine-universal --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Instance, Module, Store, Value}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -40,9 +40,9 @@ fn main() -> Result<(), Box> { // Define a compiler configuration. // // In this situation, the compiler is - // `wasmer_compiler_singlepass`. The compiler is responsible to + // `wasmer_compiler_cranelift`. The compiler is responsible to // compile the Wasm module into executable code. - let compiler_config = Singlepass::default(); + let compiler_config = Cranelift::default(); println!("Creating Universal engine..."); // Define the engine that will drive everything. @@ -76,9 +76,7 @@ fn main() -> Result<(), Box> { println!("Calling `sum` function..."); // The Wasm module exports a function called `sum`. - let sum = instance - .lookup_function("sum") - .ok_or("could not find `sum` export")?; + let sum = instance.exports.get_function("sum")?; let results = sum.call(&[Value::I32(1), Value::I32(2)])?; println!("Results: {:?}", results); diff --git a/examples/errors.rs b/examples/errors.rs index 4bbac7eb6b..d145fd776e 100644 --- a/examples/errors.rs +++ b/examples/errors.rs @@ -8,13 +8,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example errors --release --features "singlepass" +//! cargo run --example errors --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Instance, Module, Store}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -39,7 +39,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -60,8 +60,8 @@ fn main() -> Result<(), Box> { // // Let's get it. let div_by_zero = instance - .lookup_function("div_by_zero") - .ok_or("could not find `div_by_zero` export")? + .exports + .get_function("div_by_zero")? .native::<(), i32>()?; println!("Calling `div_by_zero` function..."); diff --git a/examples/exports_function.rs b/examples/exports_function.rs index cc8b0d5e14..07ce4a0d67 100644 --- a/examples/exports_function.rs +++ b/examples/exports_function.rs @@ -12,13 +12,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example exported-function --release --features "singlepass" +//! cargo run --example exported-function --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Instance, Module, Store, Value}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -40,7 +40,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -67,9 +67,7 @@ fn main() -> Result<(), Box> { // ``` // get::(name)`. // ``` - let sum = instance - .lookup_function("sum") - .ok_or("could not find `sum` export")?; + let sum = instance.exports.get_function("sum")?; println!("Calling `sum` function..."); // Let's call the `sum` exported function. The parameters are a diff --git a/examples/exports_global.rs b/examples/exports_global.rs index 44900d398c..80aad13302 100644 --- a/examples/exports_global.rs +++ b/examples/exports_global.rs @@ -10,13 +10,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example exported-global --release --features "singlepass" +//! cargo run --example exported-global --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Instance, Module, Mutability, Store, Type, Value}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -38,7 +38,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -65,19 +65,13 @@ fn main() -> Result<(), Box> { // ``` // get::(name)`. // ``` - let one = match instance.lookup("one") { - Some(wasmer::Export::Global(g)) => g, - _ => return Err("could not find `one` export as a global".into()), - }; - let some = match instance.lookup("some") { - Some(wasmer::Export::Global(g)) => g, - _ => return Err("could not find `some` export as a global".into()), - }; + let one = instance.exports.get_global("one")?; + let some = instance.exports.get_global("some")?; println!("Getting globals types information..."); // Let's get the globals types. The results are `GlobalType`s. - let one_type = one.from.ty(); - let some_type = some.from.ty(); + let one_type = one.ty(); + let some_type = some.ty(); println!("`one` type: {:?} {:?}", one_type.mutability, one_type.ty); assert_eq!(one_type.mutability, Mutability::Const); @@ -95,12 +89,12 @@ fn main() -> Result<(), Box> { // We will use an exported function for the `one` global // and the Global API for `some`. let get_one = instance - .lookup_function("get_one") - .ok_or("could not find `get_one` export")? + .exports + .get_function("get_one")? .native::<(), f32>()?; let one_value = get_one.call()?; - let some_value = some.from.get(&store); + let some_value = some.get(); println!("`one` value: {:?}", one_value); assert_eq!(one_value, 1.0); @@ -111,13 +105,13 @@ fn main() -> Result<(), Box> { println!("Setting global values..."); // Trying to set the value of a immutable global (`const`) // will result in a `RuntimeError`. - let result = unsafe { one.from.set(Value::F32(42.0)) }; + let result = one.set(Value::F32(42.0)); assert_eq!( - result.expect_err("Expected an error").to_string(), + result.expect_err("Expected an error").message(), "Attempted to set an immutable global" ); - let one_result = one.from.get(&store); + let one_result = one.get(); println!("`one` value after `set`: {:?}", one_result); assert_eq!(one_result, Value::F32(1.0)); @@ -127,18 +121,16 @@ fn main() -> Result<(), Box> { // // We will use both for the `some` global. let set_some = instance - .lookup_function("set_some") - .ok_or("could not find `set_some` export")? + .exports + .get_function("set_some")? .native::()?; set_some.call(21.0)?; - let some_result = some.from.get(&store); + let some_result = some.get(); println!("`some` value after `set_some`: {:?}", some_result); assert_eq!(some_result, Value::F32(21.0)); - unsafe { - some.from.set(Value::F32(42.0))?; - } - let some_result = some.from.get(&store); + some.set(Value::F32(42.0))?; + let some_result = some.get(); println!("`some` value after `set`: {:?}", some_result); assert_eq!(some_result, Value::F32(42.0)); diff --git a/examples/exports_memory.rs b/examples/exports_memory.rs index 5ec21ffb35..66d25a2333 100644 --- a/examples/exports_memory.rs +++ b/examples/exports_memory.rs @@ -6,13 +6,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example exported-memory --release --features "singlepass" +//! cargo run --example exported-memory --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Array, Instance, Module, Store, WasmPtr}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -25,9 +25,8 @@ fn main() -> Result<(), Box> { (global $offset i32 (i32.const 42)) (global $length (mut i32) (i32.const 13)) - (func (export "load_offset") (result i32) - global.get $offset) - (func (export "load_length") (result i32) + (func (export "load") (result i32 i32) + global.get $offset global.get $length) (data (global.get $offset) "Hello, World!")) @@ -38,7 +37,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -51,23 +50,14 @@ fn main() -> Result<(), Box> { // Let's instantiate the Wasm module. let instance = Instance::new(&module, &import_object)?; - let load_offset = instance - .lookup_function("load_offset") - .ok_or("could not find `load_offset` export")? - .native::<(), WasmPtr>()?; - let load_length = instance - .lookup_function("load_length") - .ok_or("could not find `load_length` export")? - .native::<(), i32>()?; + let load = instance + .exports + .get_native_function::<(), (WasmPtr, i32)>("load")?; // Here we go. // // The Wasm module exports a memory under "mem". Let's get it. - let memory = match instance.lookup("mem") { - Some(wasmer::Export::Memory(m)) => m, - _ => return Err("could not find `mem` as an exported memory".into()), - }; - let memory = wasmer::Memory::from_vmmemory(&store, memory); + let memory = instance.exports.get_memory("mem")?; // Now that we have the exported memory, let's get some // information about it. @@ -87,16 +77,15 @@ fn main() -> Result<(), Box> { // // Fortunately, the Wasm module exports a `load` function // which will tell us the offset and length of the string. - let ptr = load_offset.call()?; + let (ptr, length) = load.call()?; println!("String offset: {:?}", ptr.offset()); - let length = load_length.call()?; println!("String length: {:?}", length); // We now know where to fin our string, let's read it. // // We will get bytes out of the memory so we need to // decode them into a string. - let str = ptr.get_utf8_string(&memory, length as u32).unwrap(); + let str = ptr.get_utf8_string(memory, length as u32).unwrap(); println!("Memory contents: {:?}", str); // What about changing the contents of the memory with a more @@ -105,7 +94,7 @@ fn main() -> Result<(), Box> { // To do that, we'll dereference our pointer and change the content // of each `Cell` let new_str = b"Hello, Wasmer!"; - let values = ptr.deref(&memory, 0, new_str.len() as u32).unwrap(); + let values = ptr.deref(memory, 0, new_str.len() as u32).unwrap(); for i in 0..new_str.len() { values[i].set(new_str[i]); } @@ -117,7 +106,7 @@ fn main() -> Result<(), Box> { // before. println!("New string length: {:?}", new_str.len()); - let str = ptr.get_utf8_string(&memory, new_str.len() as u32).unwrap(); + let str = ptr.get_utf8_string(memory, new_str.len() as u32).unwrap(); println!("New memory contents: {:?}", str); // Much better, don't you think? diff --git a/examples/features.rs b/examples/features.rs index 209436f962..ef3574d6ef 100644 --- a/examples/features.rs +++ b/examples/features.rs @@ -5,13 +5,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example features --release --features "singlepass" +//! cargo run --example features --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Features, Instance, Module, Store, Value}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> anyhow::Result<()> { @@ -28,7 +28,7 @@ fn main() -> anyhow::Result<()> { )?; // Set up the compiler. - let compiler = Singlepass::default(); + let compiler = Cranelift::default(); // Let's declare the features. let mut features = Features::new(); @@ -46,9 +46,7 @@ fn main() -> anyhow::Result<()> { // :-). let import_object = imports! {}; let instance = Instance::new(&module, &import_object)?; - let swap = instance - .lookup_function("swap") - .ok_or(anyhow::anyhow!("could not find `swap` export"))?; + let swap = instance.exports.get_function("swap")?; let results = swap.call(&[Value::I32(1), Value::I64(2)])?; diff --git a/examples/hello_world.rs b/examples/hello_world.rs index b8f9845c85..c2c34913b6 100644 --- a/examples/hello_world.rs +++ b/examples/hello_world.rs @@ -3,11 +3,11 @@ //! You can run the example directly by executing the following in the Wasmer root: //! //! ```shell -//! cargo run --example hello-world --release --features "singlepass" +//! cargo run --example hello-world --release --features "cranelift" //! ``` use wasmer::{imports, wat2wasm, Function, Instance, Module, NativeFunc, Store}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> anyhow::Result<()> { @@ -42,9 +42,9 @@ fn main() -> anyhow::Result<()> { // You can use `Store::default()` for that. // // However for the purposes of showing what's happening, we create a compiler - // (`Singlepass`) and pass it to an engine (`Universal`). We then pass the engine to + // (`Cranelift`) and pass it to an engine (`Universal`). We then pass the engine to // the store and are now ready to compile and run WebAssembly! - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); // We then use our store and Wasm bytes to compile a `Module`. // A `Module` is a compiled WebAssembly module that isn't ready to execute yet. @@ -77,10 +77,7 @@ fn main() -> anyhow::Result<()> { // // Recall that the Wasm module exported a function named "run", this is getting // that exported function from the `Instance`. - let run_func: NativeFunc<(), ()> = instance - .lookup_function("run") - .ok_or(anyhow::anyhow!("could not find `run` export"))? - .native()?; + let run_func: NativeFunc<(), ()> = instance.exports.get_native_function("run")?; // Finally, we call our exported Wasm function which will call our "say_hello" // function and return. diff --git a/examples/imports_exports.rs b/examples/imports_exports.rs index 23167fbd2b..a832ed8b8d 100644 --- a/examples/imports_exports.rs +++ b/examples/imports_exports.rs @@ -10,15 +10,16 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example imports-exports --release --features "singlepass" +//! cargo run --example imports-exports --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{ - imports, wat2wasm, Function, FunctionType, Global, Instance, Module, Store, Type, Value, + imports, wat2wasm, Function, FunctionType, Global, Instance, Memory, Module, Store, Table, + Type, Value, }; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -43,7 +44,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -100,20 +101,20 @@ fn main() -> Result<(), Box> { // // Let's get them. println!("Getting the exported function..."); - let function = instance.lookup("guest_function"); - println!("Got exported function: {:?}", function); + let function = instance.exports.get::("guest_function")?; + println!("Got exported function of type: {:?}", function.ty()); println!("Getting the exported global..."); - let global = instance.lookup("guest_global"); - println!("Got exported global: {:?}", global); + let global = instance.exports.get::("guest_global")?; + println!("Got exported global of type: {:?}", global.ty()); println!("Getting the exported memory..."); - let memory = instance.lookup("guest_memory"); - println!("Got exported memory: {:?}", memory); + let memory = instance.exports.get::("guest_memory")?; + println!("Got exported memory of type: {:?}", memory.ty()); println!("Getting the exported table..."); - let table = instance.lookup("guest_table"); - println!("Got exported table: {:?}", table); + let table = instance.exports.get::("guest_table")?; + println!("Got exported table of type: {:?}", table.ty()); Ok(()) } diff --git a/examples/imports_function.rs b/examples/imports_function.rs index 544bafdbfb..767e0bdd57 100644 --- a/examples/imports_function.rs +++ b/examples/imports_function.rs @@ -12,13 +12,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example imported-function --release --features "singlepass" +//! cargo run --example imported-function --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Function, FunctionType, Instance, Module, Store, Type, Value}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -42,7 +42,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -86,8 +86,8 @@ fn main() -> Result<(), Box> { // // The Wasm module exports a function called `sum`. Let's get it. let sum = instance - .lookup_function("sum") - .ok_or("could not find `sum` export")? + .exports + .get_function("sum")? .native::<(i32, i32), i32>()?; println!("Calling `sum` function..."); diff --git a/examples/imports_function_env.rs b/examples/imports_function_env.rs index 49ec23621d..0cb4b8ffec 100644 --- a/examples/imports_function_env.rs +++ b/examples/imports_function_env.rs @@ -14,14 +14,14 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example imported-function-env --release --features "singlepass" +//! cargo run --example imported-function-env --release --features "cranelift" //! ``` //! //! Ready? use std::sync::{Arc, Mutex}; use wasmer::{imports, wat2wasm, Function, Instance, Module, Store, WasmerEnv}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -49,7 +49,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -69,11 +69,10 @@ fn main() -> Result<(), Box> { // possible to know the size of the `Env` at compile time (i.e it has to // implement the `Sized` trait) and that it implement the `WasmerEnv` trait. // We derive a default implementation of `WasmerEnv` here. - #[derive(Clone)] + #[derive(WasmerEnv, Clone)] struct Env { counter: Arc>, } - impl WasmerEnv for Env {} // Create the functions fn get_counter(env: &Env) -> i32 { @@ -102,8 +101,8 @@ fn main() -> Result<(), Box> { // // The Wasm module exports a function called `increment_counter_loop`. Let's get it. let increment_counter_loop = instance - .lookup_function("increment_counter_loop") - .ok_or("could not find `increment_counter_loop` export")? + .exports + .get_function("increment_counter_loop")? .native::()?; let counter_value: i32 = *shared_counter.lock().unwrap(); diff --git a/examples/imports_global.rs b/examples/imports_global.rs index d6a38c58cd..6da88c1d8c 100644 --- a/examples/imports_global.rs +++ b/examples/imports_global.rs @@ -10,13 +10,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example imported-global --release --features "singlepass" +//! cargo run --example imported-global --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Global, Instance, Module, Store, Value}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -38,7 +38,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -66,12 +66,12 @@ fn main() -> Result<(), Box> { // The Wasm module only imports some globals. We'll have to interact // with them either using the Global API or exported functions. let get_some = instance - .lookup_function("get_some") - .ok_or("could not find `get_some` export")? + .exports + .get_function("get_some")? .native::<(), f32>()?; let get_other = instance - .lookup_function("get_other") - .ok_or("could not find `get_other` export")? + .exports + .get_function("get_other")? .native::<(), f32>()?; let some_result = get_some.call()?; @@ -103,8 +103,8 @@ fn main() -> Result<(), Box> { // Changes made to global through exported functions will // be reflected on the host side. let set_other = instance - .lookup_function("set_other") - .ok_or("could not find `set_other` export")? + .exports + .get_function("set_other")? .native::()?; set_other.call(42.0)?; diff --git a/examples/instance.rs b/examples/instance.rs index e2d8e921f4..1d0b16f945 100644 --- a/examples/instance.rs +++ b/examples/instance.rs @@ -9,13 +9,13 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example instance --release --features "singlepass" +//! cargo run --example instance --release --features "cranelift" //! ``` //! //! Ready? use wasmer::{imports, wat2wasm, Instance, Module, Store}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; fn main() -> Result<(), Box> { @@ -39,7 +39,7 @@ fn main() -> Result<(), Box> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -61,8 +61,8 @@ fn main() -> Result<(), Box> { // as the main focus of this example is to show how to create an instance out // of a Wasm module and have basic interactions with it. let add_one = instance - .lookup_function("add_one") - .ok_or("could not find `add_one` export")? + .exports + .get_function("add_one")? .native::()?; println!("Calling `add_one` function..."); diff --git a/examples/memory.rs b/examples/memory.rs index 816b4ba93d..221ec41fb7 100644 --- a/examples/memory.rs +++ b/examples/memory.rs @@ -9,14 +9,14 @@ //! You can run the example directly by executing in Wasmer root: //! //! ```shell -//! cargo run --example memory --release --features "singlepass" +//! cargo run --example memory --release --features "cranelift" //! ``` //! //! Ready? use std::mem; use wasmer::{imports, wat2wasm, Bytes, Instance, Module, NativeFunc, Pages, Store}; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; // this example is a work in progress: @@ -57,7 +57,7 @@ fn main() -> anyhow::Result<()> { // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. @@ -73,22 +73,10 @@ fn main() -> anyhow::Result<()> { // The module exports some utility functions, let's get them. // // These function will be used later in this example. - let mem_size: NativeFunc<(), i32> = instance - .lookup_function("mem_size") - .ok_or(anyhow::anyhow!("could not find `mem_size` export"))? - .native()?; - let get_at: NativeFunc = instance - .lookup_function("get_at") - .ok_or(anyhow::anyhow!("could not find `get_at` export"))? - .native()?; - let set_at: NativeFunc<(i32, i32), ()> = instance - .lookup_function("set_at") - .ok_or(anyhow::anyhow!("could not find `set_at` export"))? - .native()?; - let memory = match instance.lookup("memory") { - Some(wasmer::Export::Memory(m)) => m, - _ => anyhow::bail!("could not find `memory` as an exported memory"), - }; + let mem_size: NativeFunc<(), i32> = instance.exports.get_native_function("mem_size")?; + let get_at: NativeFunc = instance.exports.get_native_function("get_at")?; + let set_at: NativeFunc<(i32, i32), ()> = instance.exports.get_native_function("set_at")?; + let memory = instance.exports.get_memory("memory")?; // We now have an instance ready to be used. // @@ -101,17 +89,15 @@ fn main() -> anyhow::Result<()> { // The size in bytes can be found either by querying its pages or by // querying the memory directly. println!("Querying memory size..."); - assert_eq!(memory.from.size(), Pages::from(1)); - assert_eq!(memory.from.size().bytes(), Bytes::from(65536 as usize)); - unsafe { - assert_eq!(memory.from.vmmemory().as_ref().current_length, 65536); - } + assert_eq!(memory.size(), Pages::from(1)); + assert_eq!(memory.size().bytes(), Bytes::from(65536 as usize)); + assert_eq!(memory.data_size(), 65536); // Sometimes, the guest module may also export a function to let you // query the memory. Here we have a `mem_size` function, let's try it: let result = mem_size.call()?; println!("Memory size: {:?}", result); - assert_eq!(Pages::from(result as u32), memory.from.size()); + assert_eq!(Pages::from(result as u32), memory.size()); // Now that we know the size of our memory, it's time to see how wa // can change this. @@ -120,11 +106,9 @@ fn main() -> anyhow::Result<()> { // see how we can do that: println!("Growing memory..."); // Here we are requesting two more pages for our memory. - memory.from.grow(Pages::from(2))?; - assert_eq!(memory.from.size(), Pages::from(3)); - unsafe { - assert_eq!(memory.from.vmmemory().as_ref().current_length, 65536 * 3); - } + memory.grow(2)?; + assert_eq!(memory.size(), Pages::from(3)); + assert_eq!(memory.data_size(), 65536 * 3); // Now that we know how to query and adjust the size of the memory, // let's see how wa can write to it or read from it. diff --git a/examples/metering.rs b/examples/metering.rs new file mode 100644 index 0000000000..8e715c1f7f --- /dev/null +++ b/examples/metering.rs @@ -0,0 +1,173 @@ +//! Wasmer will let you easily run Wasm module in a Rust host. +//! +//! This example illustrates the basics of using Wasmer metering features: +//! +//! 1. How to enable metering in a module +//! 2. How to meter a specific function call +//! 3. How to make execution fails if cost exceeds a given limit +//! +//! You can run the example directly by executing in Wasmer root: +//! +//! ```shell +//! cargo run --example metering --release --features "cranelift" +//! ``` +//! +//! Ready? + +use anyhow::bail; +use std::sync::Arc; +use wasmer::wasmparser::Operator; +use wasmer::CompilerConfig; +use wasmer::{imports, wat2wasm, Instance, Module, Store}; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_engine_universal::Universal; +use wasmer_middlewares::{ + metering::{get_remaining_points, set_remaining_points, MeteringPoints}, + Metering, +}; + +fn main() -> anyhow::Result<()> { + // Let's declare the Wasm module. + // + // We are using the text representation of the module here but you can also load `.wasm` + // files using the `include_bytes!` macro. + let wasm_bytes = wat2wasm( + br#" +(module + (type $add_t (func (param i32) (result i32))) + (func $add_one_f (type $add_t) (param $value i32) (result i32) + local.get $value + i32.const 1 + i32.add) + (export "add_one" (func $add_one_f))) +"#, + )?; + + // Let's define our cost function. + // + // This function will be called for each `Operator` encountered during + // the Wasm module execution. It should return the cost of the operator + // that it received as it first argument. + let cost_function = |operator: &Operator| -> u64 { + match operator { + Operator::LocalGet { .. } | Operator::I32Const { .. } => 1, + Operator::I32Add { .. } => 2, + _ => 0, + } + }; + + // Now let's create our metering middleware. + // + // `Metering` needs to be configured with a limit and a cost function. + // + // For each `Operator`, the metering middleware will call the cost + // function and subtract the cost from the remaining points. + let metering = Arc::new(Metering::new(10, cost_function)); + let mut compiler_config = Cranelift::default(); + compiler_config.push_middleware(metering); + + // Create a Store. + // + // We use our previously create compiler configuration + // with the Universal engine. + let store = Store::new(&Universal::new(compiler_config).engine()); + + println!("Compiling module..."); + // Let's compile the Wasm module. + let module = Module::new(&store, wasm_bytes)?; + + // Create an empty import object. + let import_object = imports! {}; + + println!("Instantiating module..."); + // Let's instantiate the Wasm module. + let instance = Instance::new(&module, &import_object)?; + + // We now have an instance ready to be used. + // + // Our module exports a single `add_one` function. We want to + // measure the cost of executing this function. + let add_one = instance + .exports + .get_function("add_one")? + .native::()?; + + println!("Calling `add_one` function once..."); + add_one.call(1)?; + + // As you can see here, after the first call we have 6 remaining points. + // + // This is correct, here are the details of how it has been computed: + // * `local.get $value` is a `Operator::LocalGet` which costs 1 point; + // * `i32.const` is a `Operator::I32Const` which costs 1 point; + // * `i32.add` is a `Operator::I32Add` which costs 2 points. + let remaining_points_after_first_call = get_remaining_points(&instance); + assert_eq!( + remaining_points_after_first_call, + MeteringPoints::Remaining(6) + ); + + println!( + "Remaining points after the first call: {:?}", + remaining_points_after_first_call + ); + + println!("Calling `add_one` function twice..."); + add_one.call(1)?; + + // We spent 4 more points with the second call. + // We have 2 remaining points. + let remaining_points_after_second_call = get_remaining_points(&instance); + assert_eq!( + remaining_points_after_second_call, + MeteringPoints::Remaining(2) + ); + + println!( + "Remaining points after the second call: {:?}", + remaining_points_after_second_call + ); + + // Because calling our `add_one` function consumes 4 points, + // calling it a third time will fail: we already consume 8 + // points, there are only two remaining. + println!("Calling `add_one` function a third time..."); + match add_one.call(1) { + Ok(result) => { + bail!( + "Expected failure while calling `add_one`, found: {}", + result + ); + } + Err(_) => { + println!("Calling `add_one` failed."); + + // Because the last needed more than the remaining points, we should have an error. + let remaining_points = get_remaining_points(&instance); + + match remaining_points { + MeteringPoints::Remaining(..) => { + bail!("No metering error: there are remaining points") + } + MeteringPoints::Exhausted => println!("Not enough points remaining"), + } + } + } + + // Now let's see how we can set a new limit... + println!("Set new remaining points to 10"); + let new_limit = 10; + set_remaining_points(&instance, new_limit); + + let remaining_points = get_remaining_points(&instance); + assert_eq!(remaining_points, MeteringPoints::Remaining(new_limit)); + + println!("Remaining points: {:?}", remaining_points); + + Ok(()) +} + +#[test] +fn test_metering() -> anyhow::Result<()> { + main() +} diff --git a/examples/platform_ios_headless.rs b/examples/platform_ios_headless.rs new file mode 100644 index 0000000000..8a6eb8028c --- /dev/null +++ b/examples/platform_ios_headless.rs @@ -0,0 +1,71 @@ +//! Defining an engine in Wasmer is one of the fundamental steps. +//! +//! This example builds on that of 'engine_headless.rs' but instead of +//! serializing a module and then deserializing it again for your host machines target, +//! We instead create an engine for our target architecture (In this case an ARM64 iOS device), +//! serialize a simple module to a .dylib file that can be copied to an iOS project and +//! deserialized/ran using the 'Headless C-API'. +//! +//! ```shell +//! cargo run --example platform-headless-ios --release --features "cranelift" +//! ``` +//! +//! Ready? + +use std::path::Path; +use std::str::FromStr; +use wasmer::{wat2wasm, Module, RuntimeError, Store}; +use wasmer_compiler::{CpuFeature, Target, Triple}; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_engine_dylib::Dylib; + +fn main() -> Result<(), Box> { + // Let's declare the Wasm module with the text representation. + let wasm_bytes = wat2wasm( + r#" +(module +(type $sum_t (func (param i32 i32) (result i32))) +(func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) +local.get $x +local.get $y +i32.add) +(export "sum" (func $sum_f))) +"# + .as_bytes(), + )?; + + // Create a compiler for iOS + let compiler_config = Cranelift::default(); + // Change it to `x86_64-apple-ios` if you want to target the iOS simulator + let triple = Triple::from_str("aarch64-apple-ios") + .map_err(|error| RuntimeError::new(error.to_string()))?; + + // Let's build the target. + let mut cpu_feature = CpuFeature::set(); + cpu_feature.insert(CpuFeature::from_str("sse2")?); + let target = Target::new(triple, cpu_feature); + println!("Chosen target: {:?}", target); + + println!("Creating Dylib engine..."); + let engine = Dylib::new(compiler_config).target(target).engine(); + + // Create a store, that holds the engine. + let store = Store::new(&engine); + + println!("Compiling module..."); + // Let's compile the Wasm module. + let module = Module::new(&store, wasm_bytes)?; + // Here we go. Let's serialize the compiled Wasm module in a + // file. + println!("Serializing module..."); + let dylib_file = Path::new("./sum.dylib"); + module.serialize_to_file(dylib_file)?; + + Ok(()) +} + +#[test] +#[cfg(target_os = "macos")] +fn test_engine_headless_ios() -> Result<(), Box> { + main() +} diff --git a/examples/table.rs b/examples/table.rs index c63f550bf2..2ae33badf7 100644 --- a/examples/table.rs +++ b/examples/table.rs @@ -1,9 +1,8 @@ use wasmer::{ imports, wat2wasm, Function, Instance, Module, NativeFunc, Store, TableType, Type, Value, }; -use wasmer_compiler_singlepass::Singlepass; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; -use wasmer_vm::TableElement; /// A function we'll call through a table. fn host_callback(arg1: i32, arg2: i32) -> i32 { @@ -30,14 +29,14 @@ fn main() -> anyhow::Result<()> { (func $call_callback (type $call_callback_t) (param $idx i32) (param $arg1 i32) (param $arg2 i32) (result i32) - (call_indirect (type $callback_t) + (call_indirect (type $callback_t) (local.get $arg1) (local.get $arg2) (local.get $idx))) ;; A default function that we'll pad the table with. ;; This function doubles both its inputs and then sums them. (func $default_fn (type $callback_t) (param $a i32) (param $b i32) (result i32) - (i32.add + (i32.add (i32.mul (local.get $a) (i32.const 2)) (i32.mul (local.get $b) (i32.const 2)))) @@ -52,7 +51,7 @@ fn main() -> anyhow::Result<()> { )?; // We set up our store with an engine and a compiler. - let store = Store::new(&Universal::new(Singlepass::default()).engine()); + let store = Store::new(&Universal::new(Cranelift::default()).engine()); // Then compile our Wasm. let module = Module::new(&store, wasm_bytes)?; let import_object = imports! {}; @@ -62,10 +61,8 @@ fn main() -> anyhow::Result<()> { // We get our function that calls (i32, i32) -> i32 functions via table. // The first argument is the table index and the next 2 are the 2 arguments // to be passed to the function found in the table. - let call_via_table: NativeFunc<(i32, i32, i32), i32> = instance - .lookup_function("call_callback") - .ok_or(anyhow::anyhow!("could not find `call_callback` export"))? - .native()?; + let call_via_table: NativeFunc<(i32, i32, i32), i32> = + instance.exports.get_native_function("call_callback")?; // And then call it with table index 1 and arguments 2 and 7. let result = call_via_table.call(1, 2, 7)?; @@ -74,26 +71,17 @@ fn main() -> anyhow::Result<()> { assert_eq!(result, 18); // We then get the table from the instance. - let export = instance - .lookup("__indirect_function_table") - .ok_or(anyhow::anyhow!( - "could not find `__indirect_function_table` export" - ))?; - let guest_table = if let wasmer::Export::Table(guest_table) = export { - // And demonstrate that it has the properties that we set in the Wasm. - assert_eq!(guest_table.from.size(), 3); - assert_eq!( - guest_table.ty(), - &TableType { - ty: Type::FuncRef, - minimum: 3, - maximum: Some(6) - } - ); - guest_table - } else { - anyhow::bail!("`__indirect_function_table` is not a table") - }; + let guest_table = instance.exports.get_table("__indirect_function_table")?; + // And demonstrate that it has the properties that we set in the Wasm. + assert_eq!(guest_table.size(), 3); + assert_eq!( + guest_table.ty(), + &TableType { + ty: Type::FuncRef, + minimum: 3, + maximum: Some(6) + } + ); // == Setting elements in a table == @@ -101,10 +89,7 @@ fn main() -> anyhow::Result<()> { let func = Function::new_native(&store, host_callback); // And set table index 1 of that table to the host_callback `Function`. - guest_table - .from - .set(1, func.into()) - .map_err(|t| anyhow::anyhow!(format!("trap: {:?}", t)))?; + guest_table.set(1, func.into())?; // We then repeat the call from before but this time it will find the host function // that we put at table index 1. @@ -119,13 +104,10 @@ fn main() -> anyhow::Result<()> { // And grow the table by 3 elements, filling in our host_callback in all the // new elements of the table. - let previous_size = guest_table - .from - .grow(3, func.into()) - .ok_or(anyhow::anyhow!("could not grow the table"))?; + let previous_size = guest_table.grow(3, func.into())?; assert_eq!(previous_size, 3); - assert_eq!(guest_table.from.size(), 6); + assert_eq!(guest_table.size(), 6); assert_eq!( guest_table.ty(), &TableType { @@ -136,13 +118,9 @@ fn main() -> anyhow::Result<()> { ); // Now demonstrate that the function we grew the table with is actually in the table. for table_index in 3..6 { - if let Some(TableElement::FuncRef(f)) = guest_table.from.get(table_index as _) { - unsafe { - let result = Function::from_vm_funcref(&store, f) - .expect("funcref should not be null") - .call(&[Value::I32(1), Value::I32(9)])?; - assert_eq!(result[0], Value::I32(10)); - } + if let Value::FuncRef(Some(f)) = guest_table.get(table_index as _).unwrap() { + let result = f.call(&[Value::I32(1), Value::I32(9)])?; + assert_eq!(result[0], Value::I32(10)); } else { panic!("expected to find funcref in table!"); } @@ -154,10 +132,7 @@ fn main() -> anyhow::Result<()> { // Now overwrite index 0 with our host_callback. let func = Function::new_native(&store, host_callback); - guest_table - .from - .set(0, func.into()) - .map_err(|e| anyhow::anyhow!(format!("trap: {:?}", e)))?; + guest_table.set(0, func.into())?; // And verify that it does what we expect. let result = call_via_table.call(0, 2, 7)?; assert_eq!(result, 9); @@ -165,13 +140,9 @@ fn main() -> anyhow::Result<()> { // Now demonstrate that the host and guest see the same table and that both // get the same result. for table_index in 3..6 { - if let Some(TableElement::FuncRef(f)) = guest_table.from.get(table_index as _) { - unsafe { - let result = Function::from_vm_funcref(&store, f) - .expect("funcref should not be null") - .call(&[Value::I32(1), Value::I32(9)])?; - assert_eq!(result[0], Value::I32(10)); - } + if let Value::FuncRef(Some(f)) = guest_table.get(table_index as _).unwrap() { + let result = f.call(&[Value::I32(1), Value::I32(9)])?; + assert_eq!(result[0], Value::I32(10)); } else { panic!("expected to find funcref in table!"); } diff --git a/examples/tracy_exec.rs b/examples/tracy_exec.rs deleted file mode 100644 index 81738575a1..0000000000 --- a/examples/tracy_exec.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! This is a simple example introducing the core concepts of the Wasmer API. -//! -//! You can run the example directly by executing the following in the Wasmer root: -//! -//! ```shell -//! cargo run --example hello-world --release --features "cranelift" -//! ``` - -use anyhow::Context; -use tracing_subscriber::layer::SubscriberExt; -use wasmer::{imports, wat2wasm, Function, Instance, Module, Singlepass, Store}; -use wasmer_engine_universal::Universal; - -// Note: the below makes the code under profiling _much much_ slower (several orders of magnitude) -/* -#[global_allocator] -static GLOBAL: tracy_client::ProfiledAllocator = - tracy_client::ProfiledAllocator::new(std::alloc::System, 100); -*/ - -fn main() -> anyhow::Result<()> { - tracing::subscriber::set_global_default( - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::Layer::new()) - .with(tracing_tracy::TracyLayer::new()), - ) - .expect("Failed setting tracing subscriber"); - - // Prepare rayon so that we don't see a huge 3-4ms for the first rayon usage - tracing::info_span!("init_rayon").in_scope(|| { - rayon::ThreadPoolBuilder::new().build_global().unwrap(); - }); - - // Load the wasm - let args = std::env::args().collect::>(); - let input = std::fs::read(args.get(1).context("Usage: tracy_exec ")?) - .context("Failed reading input file")?; - let wasm_bytes = wat2wasm(&input).context("Failed parsing the input as wasm")?; - - // Configure a Store - let store = Store::new(&Universal::new(Singlepass::new()).engine()); - - // Compile a Module - let module = Module::new(&store, wasm_bytes)?; - - // Link in an Instance with minimal functions - fn gas(_foo: i32) {} - let import_object = imports! { - "env" => { - "gas" => Function::new_native(&store, gas), - } - }; - let _instance = Instance::new(&module, &import_object)?; - - // Then everything is ready and compiled, so there is nothing more to trace. - - // Sleep a bit for Tracy to have enough time to gather everything - tracy_client::non_continuous_frame!("tracy cleanup sleep"); - std::thread::sleep(std::time::Duration::from_millis(100)); - - Ok(()) -} diff --git a/examples/tunables_limit_memory.rs b/examples/tunables_limit_memory.rs new file mode 100644 index 0000000000..28d31450b5 --- /dev/null +++ b/examples/tunables_limit_memory.rs @@ -0,0 +1,183 @@ +use std::ptr::NonNull; +use std::sync::Arc; + +use wasmer::{ + imports, + vm::{self, MemoryError, MemoryStyle, TableStyle, VMMemoryDefinition, VMTableDefinition}, + wat2wasm, BaseTunables, Instance, Memory, MemoryType, Module, Pages, Store, TableType, Target, + Tunables, +}; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_engine_universal::Universal; + +/// A custom tunables that allows you to set a memory limit. +/// +/// After adjusting the memory limits, it delegates all other logic +/// to the base tunables. +pub struct LimitingTunables { + /// The maximum a linear memory is allowed to be (in Wasm pages, 64 KiB each). + /// Since Wasmer ensures there is only none or one memory, this is practically + /// an upper limit for the guest memory. + limit: Pages, + /// The base implementation we delegate all the logic to + base: T, +} + +impl LimitingTunables { + pub fn new(base: T, limit: Pages) -> Self { + Self { limit, base } + } + + /// Takes an input memory type as requested by the guest and sets + /// a maximum if missing. The resulting memory type is final if + /// valid. However, this can produce invalid types, such that + /// validate_memory must be called before creating the memory. + fn adjust_memory(&self, requested: &MemoryType) -> MemoryType { + let mut adjusted = requested.clone(); + if requested.maximum.is_none() { + adjusted.maximum = Some(self.limit); + } + adjusted + } + + /// Ensures the a given memory type does not exceed the memory limit. + /// Call this after adjusting the memory. + fn validate_memory(&self, ty: &MemoryType) -> Result<(), MemoryError> { + if ty.minimum > self.limit { + return Err(MemoryError::Generic( + "Minimum exceeds the allowed memory limit".to_string(), + )); + } + + if let Some(max) = ty.maximum { + if max > self.limit { + return Err(MemoryError::Generic( + "Maximum exceeds the allowed memory limit".to_string(), + )); + } + } else { + return Err(MemoryError::Generic("Maximum unset".to_string())); + } + + Ok(()) + } +} + +impl Tunables for LimitingTunables { + /// Construct a `MemoryStyle` for the provided `MemoryType` + /// + /// Delegated to base. + fn memory_style(&self, memory: &MemoryType) -> MemoryStyle { + let adjusted = self.adjust_memory(memory); + self.base.memory_style(&adjusted) + } + + /// Construct a `TableStyle` for the provided `TableType` + /// + /// Delegated to base. + fn table_style(&self, table: &TableType) -> TableStyle { + self.base.table_style(table) + } + + /// Create a memory owned by the host given a [`MemoryType`] and a [`MemoryStyle`]. + /// + /// The requested memory type is validated, adjusted to the limited and then passed to base. + fn create_host_memory( + &self, + ty: &MemoryType, + style: &MemoryStyle, + ) -> Result, MemoryError> { + let adjusted = self.adjust_memory(ty); + self.validate_memory(&adjusted)?; + self.base.create_host_memory(&adjusted, style) + } + + /// Create a memory owned by the VM given a [`MemoryType`] and a [`MemoryStyle`]. + /// + /// Delegated to base. + unsafe fn create_vm_memory( + &self, + ty: &MemoryType, + style: &MemoryStyle, + vm_definition_location: NonNull, + ) -> Result, MemoryError> { + let adjusted = self.adjust_memory(ty); + self.validate_memory(&adjusted)?; + self.base + .create_vm_memory(&adjusted, style, vm_definition_location) + } + + /// Create a table owned by the host given a [`TableType`] and a [`TableStyle`]. + /// + /// Delegated to base. + fn create_host_table( + &self, + ty: &TableType, + style: &TableStyle, + ) -> Result, String> { + self.base.create_host_table(ty, style) + } + + /// Create a table owned by the VM given a [`TableType`] and a [`TableStyle`]. + /// + /// Delegated to base. + unsafe fn create_vm_table( + &self, + ty: &TableType, + style: &TableStyle, + vm_definition_location: NonNull, + ) -> Result, String> { + self.base.create_vm_table(ty, style, vm_definition_location) + } +} + +fn main() -> Result<(), Box> { + // A Wasm module with one exported memory (min: 7 pages, max: unset) + let wat = br#"(module (memory 7) (export "memory" (memory 0)))"#; + + // Alternatively: A Wasm module with one exported memory (min: 7 pages, max: 80 pages) + // let wat = br#"(module (memory 7 80) (export "memory" (memory 0)))"#; + + let wasm_bytes = wat2wasm(wat)?; + + // Any compiler and any engine do the job here + let compiler = Cranelift::default(); + let engine = Universal::new(compiler).engine(); + + // Here is where the fun begins + + let base = BaseTunables::for_target(&Target::default()); + let tunables = LimitingTunables::new(base, Pages(24)); + + // Create a store, that holds the engine and our custom tunables + let store = Store::new_with_tunables(&engine, tunables); + + println!("Compiling module..."); + let module = Module::new(&store, wasm_bytes)?; + + println!("Instantiating module..."); + let import_object = imports! {}; + + // Now at this point, our custom tunables are used + let instance = Instance::new(&module, &import_object)?; + + // Check what happened + let mut memories: Vec = instance + .exports + .iter() + .memories() + .map(|pair| pair.1.clone()) + .collect(); + assert_eq!(memories.len(), 1); + + let first_memory = memories.pop().unwrap(); + println!("Memory of this instance: {:?}", first_memory); + assert_eq!(first_memory.ty().maximum.unwrap(), Pages(24)); + + Ok(()) +} + +#[test] +fn test_tunables_limit_memory() -> Result<(), Box> { + main() +} diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 5cf6247f66..c5fd28a5bd 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -13,11 +13,15 @@ anyhow = "1" wasm-smith = "0.4.4" libfuzzer-sys = "0.4.0" wasmer = { path = "../lib/api", package = "wasmer-near" } +wasmer-compiler-cranelift = { path = "../lib/compiler-cranelift", optional = true } +wasmer-compiler-llvm = { path = "../lib/compiler-llvm", optional = true } wasmer-compiler-singlepass = { path = "../lib/compiler-singlepass", package = "wasmer-compiler-singlepass-near", optional = true } wasmer-engine-universal = { path = "../lib/engine-universal", package = "wasmer-engine-universal-near", optional = true } wasmprinter = "0.2" [features] +cranelift = [ "wasmer-compiler-cranelift" ] +llvm = [ "wasmer-compiler-llvm" ] singlepass = [ "wasmer-compiler-singlepass" ] universal = [ "wasmer-engine-universal" ] @@ -26,7 +30,27 @@ name = "equivalence_universal" path = "fuzz_targets/equivalence_universal.rs" required-features = ["universal"] +[[bin]] +name = "universal_cranelift" +path = "fuzz_targets/universal_cranelift.rs" +required-features = ["universal", "cranelift"] + +[[bin]] +name = "universal_llvm" +path = "fuzz_targets/universal_llvm.rs" +required-features = ["universal", "llvm"] + [[bin]] name = "universal_singlepass" path = "fuzz_targets/universal_singlepass.rs" required-features = ["universal", "singlepass"] + +[[bin]] +name = "metering" +path = "fuzz_targets/metering.rs" +required-features = ["universal", "cranelift"] + +[[bin]] +name = "deterministic" +path = "fuzz_targets/deterministic.rs" +required-features = ["universal", "cranelift", "llvm", "singlepass"] diff --git a/fuzz/fuzz_targets/deterministic.rs b/fuzz/fuzz_targets/deterministic.rs new file mode 100644 index 0000000000..b613d5449d --- /dev/null +++ b/fuzz/fuzz_targets/deterministic.rs @@ -0,0 +1,81 @@ +#![no_main] + +use libfuzzer_sys::{arbitrary, arbitrary::Arbitrary, fuzz_target}; +use wasm_smith::{Config, ConfiguredModule}; +use wasmer::{CompilerConfig, Engine, Module, Store}; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_compiler_llvm::LLVM; +use wasmer_compiler_singlepass::Singlepass; +use wasmer_engine_dylib::Dylib; +use wasmer_engine_universal::Universal; + +#[derive(Arbitrary, Debug, Default, Copy, Clone)] +struct NoImportsConfig; +impl Config for NoImportsConfig { + fn max_imports(&self) -> usize { + 0 + } + fn max_memory_pages(&self) -> u32 { + // https://github.com/wasmerio/wasmer/issues/2187 + 65535 + } + fn allow_start_export(&self) -> bool { + false + } +} + +fn compile_and_compare(name: &str, engine: impl Engine, wasm: &[u8]) { + let store = Store::new(&engine); + + // compile for first time + let module = Module::new(&store, wasm).unwrap(); + let first = module.serialize().unwrap(); + + // compile for second time + let module = Module::new(&store, wasm).unwrap(); + let second = module.serialize().unwrap(); + + if first != second { + panic!("non-deterministic compilation from {}", name); + } +} + +fuzz_target!(|module: ConfiguredModule| { + let wasm_bytes = module.to_bytes(); + + let mut compiler = Cranelift::default(); + compiler.canonicalize_nans(true); + compiler.enable_verifier(); + compile_and_compare( + "universal-cranelift", + Universal::new(compiler.clone()).engine(), + &wasm_bytes, + ); + //compile_and_compare( + // "dylib-cranelift", + // Dylib::new(compiler).engine(), + // &wasm_bytes, + //); + + let mut compiler = LLVM::default(); + compiler.canonicalize_nans(true); + compiler.enable_verifier(); + compile_and_compare( + "universal-llvm", + Universal::new(compiler.clone()).engine(), + &wasm_bytes, + ); + //compile_and_compare("dylib-llvm", Dylib::new(compiler).engine(), &wasm_bytes); + + let compiler = Singlepass::default(); + compile_and_compare( + "universal-singlepass", + Universal::new(compiler.clone()).engine(), + &wasm_bytes, + ); + //compile_and_compare( + // "dylib-singlepass", + // Dylib::new(compiler).engine(), + // &wasm_bytes, + //); +}); diff --git a/fuzz/fuzz_targets/equivalence_universal.rs b/fuzz/fuzz_targets/equivalence_universal.rs index 15364cd8cc..09c4a0c718 100644 --- a/fuzz/fuzz_targets/equivalence_universal.rs +++ b/fuzz/fuzz_targets/equivalence_universal.rs @@ -5,6 +5,10 @@ use anyhow::Result; use libfuzzer_sys::{arbitrary, arbitrary::Arbitrary, fuzz_target}; use wasm_smith::{Config, ConfiguredModule}; use wasmer::{imports, CompilerConfig, Instance, Module, Store, Val}; +#[cfg(feature = "cranelift")] +use wasmer_compiler_cranelift::Cranelift; +#[cfg(feature = "llvm")] +use wasmer_compiler_llvm::LLVM; #[cfg(feature = "singlepass")] use wasmer_compiler_singlepass::Singlepass; use wasmer_engine_universal::Universal; @@ -60,6 +64,28 @@ fn maybe_instantiate_singlepass(wasm_bytes: &[u8]) -> Result> { Ok(Some(instance)) } +#[cfg(feature = "cranelift")] +fn maybe_instantiate_cranelift(wasm_bytes: &[u8]) -> Result> { + let mut compiler = Cranelift::default(); + compiler.canonicalize_nans(true); + compiler.enable_verifier(); + let store = Store::new(&Universal::new(compiler).engine()); + let module = Module::new(&store, &wasm_bytes)?; + let instance = Instance::new(&module, &imports! {})?; + Ok(Some(instance)) +} + +#[cfg(feature = "llvm")] +fn maybe_instantiate_llvm(wasm_bytes: &[u8]) -> Result> { + let mut compiler = LLVM::default(); + compiler.canonicalize_nans(true); + compiler.enable_verifier(); + let store = Store::new(&Universal::new(compiler).engine()); + let module = Module::new(&store, &wasm_bytes)?; + let instance = Instance::new(&module, &imports! {})?; + Ok(Some(instance)) +} + #[derive(Debug)] enum FunctionResult { Error(String), @@ -157,4 +183,25 @@ fuzz_target!(|module: WasmSmithModule| { let singlepass = maybe_instantiate_singlepass(&wasm_bytes) .transpose() .map(evaluate_instance); + #[cfg(feature = "cranelift")] + let cranelift = maybe_instantiate_cranelift(&wasm_bytes) + .transpose() + .map(evaluate_instance); + #[cfg(feature = "llvm")] + let llvm = maybe_instantiate_llvm(&wasm_bytes) + .transpose() + .map(evaluate_instance); + + #[cfg(all(feature = "singlepass", feature = "cranelift"))] + if singlepass.is_some() && cranelift.is_some() { + assert_eq!(singlepass.as_ref().unwrap(), cranelift.as_ref().unwrap()); + } + #[cfg(all(feature = "singlepass", feature = "llvm"))] + if singlepass.is_some() && llvm.is_some() { + assert_eq!(singlepass.as_ref().unwrap(), llvm.as_ref().unwrap()); + } + #[cfg(all(feature = "cranelift", feature = "llvm"))] + if cranelift.is_some() && llvm.is_some() { + assert_eq!(cranelift.as_ref().unwrap(), llvm.as_ref().unwrap()); + } }); diff --git a/fuzz/fuzz_targets/metering.rs b/fuzz/fuzz_targets/metering.rs index bbd858147a..69d541967c 100644 --- a/fuzz/fuzz_targets/metering.rs +++ b/fuzz/fuzz_targets/metering.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use wasm_smith::{Config, ConfiguredModule}; use wasmer::wasmparser::Operator; use wasmer::{imports, CompilerConfig, Instance, Module, Store}; +use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_universal::Universal; use wasmer_middlewares::Metering; @@ -50,7 +51,7 @@ fuzz_target!(|module: WasmSmithModule| { return; } - let mut compiler = Singlepass::default(); + let mut compiler = Cranelift::default(); compiler.canonicalize_nans(true); compiler.enable_verifier(); let metering = Arc::new(Metering::new(10, cost)); diff --git a/fuzz/fuzz_targets/universal_cranelift.rs b/fuzz/fuzz_targets/universal_cranelift.rs new file mode 100644 index 0000000000..05133e4428 --- /dev/null +++ b/fuzz/fuzz_targets/universal_cranelift.rs @@ -0,0 +1,59 @@ +#![no_main] + +use libfuzzer_sys::{arbitrary, arbitrary::Arbitrary, fuzz_target}; +use wasm_smith::{Config, ConfiguredModule}; +use wasmer::{imports, CompilerConfig, Instance, Module, Store}; +use wasmer_compiler_cranelift::Cranelift; +use wasmer_engine_universal::Universal; + +#[derive(Arbitrary, Debug, Default, Copy, Clone)] +struct NoImportsConfig; +impl Config for NoImportsConfig { + fn max_imports(&self) -> usize { + 0 + } + fn max_memory_pages(&self) -> u32 { + // https://github.com/wasmerio/wasmer/issues/2187 + 65535 + } + fn allow_start_export(&self) -> bool { + false + } +} +#[derive(Arbitrary)] +struct WasmSmithModule(ConfiguredModule); +impl std::fmt::Debug for WasmSmithModule { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&wasmprinter::print_bytes(self.0.to_bytes()).unwrap()) + } +} + +fuzz_target!(|module: WasmSmithModule| { + let wasm_bytes = module.0.to_bytes(); + + if let Ok(path) = std::env::var("DUMP_TESTCASE") { + use std::fs::File; + use std::io::Write; + let mut file = File::create(path).unwrap(); + file.write_all(&wasm_bytes).unwrap(); + return; + } + + let mut compiler = Cranelift::default(); + compiler.canonicalize_nans(true); + compiler.enable_verifier(); + let store = Store::new(&Universal::new(compiler).engine()); + let module = Module::new(&store, &wasm_bytes).unwrap(); + match Instance::new(&module, &imports! {}) { + Ok(_) => {} + Err(e) => { + let error_message = format!("{}", e); + if error_message.starts_with("RuntimeError: ") + && error_message.contains("out of bounds") + { + return; + } + panic!("{}", e); + } + } +}); diff --git a/fuzz/fuzz_targets/universal_llvm.rs b/fuzz/fuzz_targets/universal_llvm.rs new file mode 100644 index 0000000000..0d0044e026 --- /dev/null +++ b/fuzz/fuzz_targets/universal_llvm.rs @@ -0,0 +1,59 @@ +#![no_main] + +use libfuzzer_sys::{arbitrary, arbitrary::Arbitrary, fuzz_target}; +use wasm_smith::{Config, ConfiguredModule}; +use wasmer::{imports, CompilerConfig, Instance, Module, Store}; +use wasmer_compiler_llvm::LLVM; +use wasmer_engine_universal::Universal; + +#[derive(Arbitrary, Debug, Default, Copy, Clone)] +struct NoImportsConfig; +impl Config for NoImportsConfig { + fn max_imports(&self) -> usize { + 0 + } + fn max_memory_pages(&self) -> u32 { + // https://github.com/wasmerio/wasmer/issues/2187 + 65535 + } + fn allow_start_export(&self) -> bool { + false + } +} +#[derive(Arbitrary)] +struct WasmSmithModule(ConfiguredModule); +impl std::fmt::Debug for WasmSmithModule { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&wasmprinter::print_bytes(self.0.to_bytes()).unwrap()) + } +} + +fuzz_target!(|module: WasmSmithModule| { + let wasm_bytes = module.0.to_bytes(); + + if let Ok(path) = std::env::var("DUMP_TESTCASE") { + use std::fs::File; + use std::io::Write; + let mut file = File::create(path).unwrap(); + file.write_all(&wasm_bytes).unwrap(); + return; + } + + let mut compiler = LLVM::default(); + compiler.canonicalize_nans(true); + compiler.enable_verifier(); + let store = Store::new(&Universal::new(compiler).engine()); + let module = Module::new(&store, &wasm_bytes).unwrap(); + match Instance::new(&module, &imports! {}) { + Ok(_) => {} + Err(e) => { + let error_message = format!("{}", e); + if error_message.starts_with("RuntimeError: ") + && error_message.contains("out of bounds") + { + return; + } + panic!("{}", e); + } + } +}); diff --git a/fuzzbuzz.yaml b/fuzzbuzz.yaml index 197c3e4133..cbf6d4aef5 100644 --- a/fuzzbuzz.yaml +++ b/fuzzbuzz.yaml @@ -2,6 +2,7 @@ wasmer: language: rust features: - universal + - cranelift - singlepass deps: - run: apt update diff --git a/lib/README.md b/lib/README.md index aaf820c812..1a946a7feb 100644 --- a/lib/README.md +++ b/lib/README.md @@ -9,6 +9,9 @@ composed of a set of crates. We can group them as follows: * `cli` — The Wasmer CLI itself, * `compiler` — The base for the compiler implementations, it defines the framework for the compilers and provides everything they need: + * `compiler-cranelift` — A WebAssembly compiler based on the Cranelift compiler infrastructure, + * `compiler-llvm` — A WebAssembly compiler based on the LLVM compiler infrastructure; recommended + for runtime speed performance, * `compiler-singlepass` — A WebAssembly compiler based on our own compilation infrastructure; recommended for compilation-time speed performance. * `derive` — A set of procedural macros used inside Wasmer, diff --git a/lib/api/Cargo.toml b/lib/api/Cargo.toml index 97f3f19a6c..9bc835c9a8 100644 --- a/lib/api/Cargo.toml +++ b/lib/api/Cargo.toml @@ -16,14 +16,10 @@ name = "wasmer" # Shared dependencies. [dependencies] # - Mandatory shared dependencies. -cfg-if = "1.0" -finite-wasm = "0.3.0" indexmap = { version = "1.6" } -more-asserts = "0.2" -prefix-sum-vec = "0.1.2" +cfg-if = "1.0" thiserror = "1.0" -tracing = "0.1" - +more-asserts = "0.2" # - Optional shared dependencies. wat = { version = "1.0", optional = true } @@ -32,11 +28,14 @@ wat = { version = "1.0", optional = true } # - Mandatory dependencies for `sys`. wasmer-vm = { path = "../vm", version = "=2.4.0", package = "wasmer-vm-near" } wasmer-compiler = { path = "../compiler", version = "=2.4.0", package = "wasmer-compiler-near" } +wasmer-derive = { path = "../derive", version = "=2.4.0", package = "wasmer-derive-near" } wasmer-engine = { path = "../engine", version = "=2.4.0", package = "wasmer-engine-near" } wasmer-types = { path = "../types", version = "=2.4.0", package = "wasmer-types-near" } target-lexicon = { version = "0.12.2", default-features = false } # - Optional dependencies for `sys`. wasmer-compiler-singlepass = { path = "../compiler-singlepass", package = "wasmer-compiler-singlepass-near", version = "=2.4.0", optional = true} +wasmer-compiler-cranelift = { path = "../compiler-cranelift", version = "2.1.0", optional = true } +wasmer-compiler-llvm = { path = "../compiler-llvm", version = "2.1.0", optional = true } wasmer-engine-universal = { path = "../engine-universal", package = "wasmer-engine-universal-near", version = "=2.4.0", optional = true } # - Mandatory dependencies for `sys` on Windows. [target.'cfg(all(not(target_arch = "wasm32"), target_os = "windows"))'.dependencies] @@ -68,11 +67,27 @@ compiler = [ "compiler", "wasmer-compiler-singlepass", ] + cranelift = [ + "compiler", + "wasmer-compiler-cranelift", + ] + llvm = [ + "compiler", + "wasmer-compiler-llvm", + ] default-compiler = [] default-singlepass = [ "default-compiler", "singlepass", ] + default-cranelift = [ + "default-compiler", + "cranelift", + ] + default-llvm = [ + "default-compiler", + "llvm", + ] # - Engines. engine = ["sys"] universal = [ @@ -84,6 +99,11 @@ default-engine = [] "default-engine", "universal", ] +# - Experimental / in-development features +experimental-reference-types-extern-ref = [ + "sys", + "wasmer-types/experimental-reference-types-extern-ref", +] [package.metadata.docs.rs] -features = ["compiler", "core", "default-compiler", "default-engine", "engine", "jit", "native", "singlepass", "sys", "sys-default", "universal"] +features = ["compiler", "core", "cranelift", "default-compiler", "default-engine", "engine", "jit", "llvm", "native", "singlepass", "sys", "sys-default", "universal"] diff --git a/lib/api/README.md b/lib/api/README.md index 8005467eb9..c0145bc881 100644 --- a/lib/api/README.md +++ b/lib/api/README.md @@ -60,13 +60,17 @@ Wasmer is not only fast, but also designed to be *highly customizable*: transform WebAssembly into executable code: * [`wasmer-compiler-singlepass`] provides a fast compilation-time but an unoptimized runtime speed, - + * [`wasmer-compiler-cranelift`] provides the right balance between + compilation-time and runtime performance, useful for development, + * [`wasmer-compiler-llvm`] provides a deeply optimized executable + code with the fastest runtime speed, ideal for production. + * **Headless mode** — Once a WebAssembly module has been compiled, it is possible to serialize it in a file for example, and later execute it with Wasmer with headless mode turned on. Headless Wasmer has no compiler, which makes it more portable and faster to load. It's ideal for constrainted environments. - + * **Cross-compilation** — Most compilers support cross-compilation. It means it possible to pre-compile a WebAssembly module targetting a different architecture or platform and serialize it, to then run it @@ -98,5 +102,7 @@ Made with ❤️ by the Wasmer team, for the community [`wasmer-engine-universal`]: https://github.com/wasmerio/wasmer/tree/master/lib/engine-universal [`wasmer-engine-dylib`]: https://github.com/wasmerio/wasmer/tree/master/lib/engine-dylib -[`wasmer-engine-staticlib`]: https://github.com/wasmerio/wasmer/tree/master/lib/engine-staticlib +[`wasmer-engine-staticlib`]: https://github.com/wasmerio/wasmer/tree/master/lib/engine-staticlib [`wasmer-compiler-singlepass`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-singlepass +[`wasmer-compiler-cranelift`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-cranelift +[`wasmer-compiler-llvm`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-llvm diff --git a/lib/api/src/lib.rs b/lib/api/src/lib.rs index e40a2b7bfb..ff4809b8af 100644 --- a/lib/api/src/lib.rs +++ b/lib/api/src/lib.rs @@ -6,7 +6,7 @@ missing_docs, trivial_numeric_casts, unused_extern_crates, - rustdoc::broken_intra_doc_links + broken_intra_doc_links )] #![warn(unused_import_braces)] #![cfg_attr( @@ -86,6 +86,10 @@ //! transform WebAssembly into executable code: //! * [`wasmer-compiler-singlepass`] provides a fast compilation-time //! but an unoptimized runtime speed, +//! * [`wasmer-compiler-cranelift`] provides the right balance between +//! compilation-time and runtime performance, useful for development, +//! * [`wasmer-compiler-llvm`] provides a deeply optimized executable +//! code with the fastest runtime speed, ideal for production. //! //! * **Headless mode** — Once a WebAssembly module has been compiled, it //! is possible to serialize it in a file for example, and later execute @@ -287,6 +291,14 @@ //! set defaults. //! //! The features that enable new functionality are: +//! - `cranelift` +#![cfg_attr(feature = "cranelift", doc = "(enabled),")] +#![cfg_attr(not(feature = "cranelift"), doc = "(disabled),")] +//! enables Wasmer's [Cranelift compiler][wasmer-compiler-cranelift], +//! - `llvm` +#![cfg_attr(feature = "llvm", doc = "(enabled),")] +#![cfg_attr(not(feature = "llvm"), doc = "(disabled),")] +//! enables Wasmer's [LLVM compiler][wasmer-compiler-lvm], //! - `singlepass` #![cfg_attr(feature = "singlepass", doc = "(enabled),")] #![cfg_attr(not(feature = "singlepass"), doc = "(disabled),")] @@ -307,6 +319,14 @@ //! The features that set defaults come in sets that are mutually exclusive. //! //! The first set is the default compiler set: +//! - `default-cranelift` +#![cfg_attr(feature = "default-cranelift", doc = "(enabled),")] +#![cfg_attr(not(feature = "default-cranelift"), doc = "(disabled),")] +//! set Wasmer's Cranelift compiler as the default, +//! - `default-llvm` +#![cfg_attr(feature = "default-llvm", doc = "(enabled),")] +#![cfg_attr(not(feature = "default-llvm"), doc = "(disabled),")] +//! set Wasmer's LLVM compiler as the default, //! - `default-singlepass` #![cfg_attr(feature = "default-singlepass", doc = "(enabled),")] #![cfg_attr(not(feature = "default-singlepass"), doc = "(disabled),")] @@ -339,6 +359,8 @@ //! [`wasmer-engine-dylib`]: https://docs.rs/wasmer-engine-dylib/ //! [`wasmer-engine-staticlib`]: https://docs.rs/wasmer-engine-staticlib/ //! [`wasmer-compiler-singlepass`]: https://docs.rs/wasmer-compiler-singlepass/ +//! [`wasmer-compiler-llvm`]: https://docs.rs/wasmer-compiler-llvm/ +//! [`wasmer-compiler-cranelift`]: https://docs.rs/wasmer-compiler-cranelift/ //! [`wasmer-wasi`]: https://docs.rs/wasmer-wasi/ //! [`wasm-pack`]: https://github.com/rustwasm/wasm-pack/ //! [`wasm-bindgen`]: https://github.com/rustwasm/wasm-bindgen diff --git a/lib/api/src/sys/cell.rs b/lib/api/src/sys/cell.rs index 4540c2b305..c7d6bc4dff 100644 --- a/lib/api/src/sys/cell.rs +++ b/lib/api/src/sys/cell.rs @@ -1,5 +1,8 @@ pub use std::cell::Cell; +use core::cmp::Ordering; +use core::fmt::{self, Debug}; + /// A mutable Wasm-memory location. #[repr(transparent)] pub struct WasmCell<'a, T: ?Sized> { @@ -10,6 +13,56 @@ unsafe impl Send for WasmCell<'_, T> where T: Send {} unsafe impl Sync for WasmCell<'_, T> {} +impl<'a, T: Copy> Clone for WasmCell<'a, T> { + #[inline] + fn clone(&self) -> WasmCell<'a, T> { + WasmCell { inner: self.inner } + } +} + +impl PartialEq for WasmCell<'_, T> { + #[inline] + fn eq(&self, other: &WasmCell) -> bool { + self.inner.eq(&other.inner) + } +} + +impl Eq for WasmCell<'_, T> {} + +impl PartialOrd for WasmCell<'_, T> { + #[inline] + fn partial_cmp(&self, other: &WasmCell) -> Option { + self.inner.partial_cmp(&other.inner) + } + + #[inline] + fn lt(&self, other: &WasmCell) -> bool { + self.inner < other.inner + } + + #[inline] + fn le(&self, other: &WasmCell) -> bool { + self.inner <= other.inner + } + + #[inline] + fn gt(&self, other: &WasmCell) -> bool { + self.inner > other.inner + } + + #[inline] + fn ge(&self, other: &WasmCell) -> bool { + self.inner >= other.inner + } +} + +impl Ord for WasmCell<'_, T> { + #[inline] + fn cmp(&self, other: &WasmCell) -> Ordering { + self.inner.cmp(&other.inner) + } +} + impl<'a, T> WasmCell<'a, T> { /// Creates a new `WasmCell` containing the given value. /// @@ -28,8 +81,66 @@ impl<'a, T> WasmCell<'a, T> { } } +impl<'a, T: Copy> WasmCell<'a, T> { + /// Returns a copy of the contained value. + /// + /// # Examples + /// + /// ``` + /// use std::cell::Cell; + /// use wasmer::WasmCell; + /// + /// let cell = Cell::new(5); + /// let wasm_cell = WasmCell::new(&cell); + /// let five = wasm_cell.get(); + /// ``` + #[inline] + pub fn get(&self) -> T { + self.inner.get() + } + + /// Get an unsafe mutable pointer to the inner item + /// in the Cell. + /// + /// # Safety + /// + /// This method is highly discouraged to use. We have it for + /// compatibility reasons with Emscripten. + /// It is unsafe because changing an item inline will change + /// the underlying memory. + /// + /// It's highly encouraged to use the `set` method instead. + #[deprecated( + since = "2.0.0", + note = "Please use the memory-safe set method instead" + )] + #[doc(hidden)] + pub unsafe fn get_mut(&self) -> &'a mut T { + &mut *self.inner.as_ptr() + } +} + +impl Debug for WasmCell<'_, T> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "WasmCell({:?})", self.inner.get()) + } +} + impl WasmCell<'_, T> { /// Sets the contained value. + /// + /// # Examples + /// + /// ``` + /// use std::cell::Cell; + /// use wasmer::WasmCell; + /// + /// let cell = Cell::new(5); + /// let wasm_cell = WasmCell::new(&cell); + /// wasm_cell.set(10); + /// assert_eq!(cell.get(), 10); + /// ``` #[inline] pub fn set(&self, val: T) { self.inner.set(val); diff --git a/lib/api/src/sys/env.rs b/lib/api/src/sys/env.rs index 28e35d7815..dc5e0e3341 100644 --- a/lib/api/src/sys/env.rs +++ b/lib/api/src/sys/env.rs @@ -27,6 +27,43 @@ impl From for HostEnvInitError { /// /// # Examples /// +/// This trait can be derived like so: +/// +/// ``` +/// use wasmer::{WasmerEnv, LazyInit, Memory, NativeFunc}; +/// +/// #[derive(WasmerEnv, Clone)] +/// pub struct MyEnvWithNoInstanceData { +/// non_instance_data: u8, +/// } +/// +/// #[derive(WasmerEnv, Clone)] +/// pub struct MyEnvWithInstanceData { +/// non_instance_data: u8, +/// #[wasmer(export)] +/// memory: LazyInit, +/// #[wasmer(export(name = "real_name"))] +/// func: LazyInit>, +/// #[wasmer(export(optional = true, alias = "memory2", alias = "_memory2"))] +/// optional_memory: LazyInit, +/// } +/// +/// ``` +/// +/// When deriving `WasmerEnv`, you must wrap your types to be initialized in +/// [`LazyInit`]. The derive macro will also generate helper methods of the form +/// `_ref` and `_ref_unchecked` for easy access to the +/// data. +/// +/// The valid arguments to `export` are: +/// - `name = "string"`: specify the name of this item in the Wasm module. If this is not specified, it will default to the name of the field. +/// - `optional = true`: specify whether this export is optional. Defaults to +/// `false`. Being optional means that if the export can't be found, the +/// [`LazyInit`] will be left uninitialized. +/// - `alias = "string"`: specify additional names to look for in the Wasm module. +/// `alias` may be specified multiple times to search for multiple aliases. +/// ------- +/// /// This trait may also be implemented manually: /// ``` /// # use wasmer::{WasmerEnv, LazyInit, Memory, Instance, HostEnvInitError}; @@ -37,7 +74,9 @@ impl From for HostEnvInitError { /// /// impl WasmerEnv for MyEnv { /// fn init_with_instance(&mut self, instance: &Instance) -> Result<(), HostEnvInitError> { -/// todo!() +/// let memory: Memory = instance.get_with_generics_weak("memory").unwrap(); +/// self.memory.initialize(memory.clone()); +/// Ok(()) /// } /// } /// ``` diff --git a/lib/api/src/sys/exports.rs b/lib/api/src/sys/exports.rs index e8ddf81616..79f69cd859 100644 --- a/lib/api/src/sys/exports.rs +++ b/lib/api/src/sys/exports.rs @@ -1,6 +1,10 @@ -use crate::sys::externals::Extern; +use crate::sys::externals::{Extern, Function, Global, Memory, Table}; use crate::sys::import_object::LikeNamespace; +use crate::sys::native::NativeFunc; +use crate::sys::WasmTypeList; use indexmap::IndexMap; +use std::fmt; +use std::iter::{ExactSizeIterator, FromIterator}; use std::sync::Arc; use thiserror::Error; use wasmer_vm::Export; @@ -68,6 +72,23 @@ impl Exports { Default::default() } + /// Creates a new `Exports` with capacity `n`. + pub fn with_capacity(n: usize) -> Self { + Self { + map: Arc::new(IndexMap::with_capacity(n)), + } + } + + /// Return the number of exports in the `Exports` map. + pub fn len(&self) -> usize { + self.map.len() + } + + /// Return whether or not there are no exports + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + /// Insert a new export into this `Exports` map. pub fn insert(&mut self, name: S, value: E) where @@ -78,6 +99,183 @@ impl Exports { .unwrap() .insert(name.into(), value.into()); } + + /// Get an export given a `name`. + /// + /// The `get` method is specifically made for usage inside of + /// Rust APIs, as we can detect what's the desired type easily. + /// + /// If you want to get an export dynamically with type checking + /// please use the following functions: `get_func`, `get_memory`, + /// `get_table` or `get_global` instead. + /// + /// If you want to get an export dynamically handling manually + /// type checking manually, please use `get_extern`. + pub fn get<'a, T: Exportable<'a>>(&'a self, name: &str) -> Result { + match self.map.get(name) { + None => Err(ExportError::Missing(name.to_string())), + Some(extern_) => T::get_self_from_extern(extern_.clone()), + } + } + + /// Get an export as a `Global`. + pub fn get_global(&self, name: &str) -> Result { + self.get(name) + } + + /// Get an export as a `Memory`. + pub fn get_memory(&self, name: &str) -> Result { + self.get(name) + } + + /// Get an export as a `Table`. + pub fn get_table(&self, name: &str) -> Result { + self.get(name) + } + + /// Get an export as a `Func`. + pub fn get_function(&self, name: &str) -> Result { + self.get(name) + } + + /// Get an export as a `NativeFunc`. + pub fn get_native_function( + &self, + name: &str, + ) -> Result, ExportError> + where + Args: WasmTypeList, + Rets: WasmTypeList, + { + self.get_function(name)? + .native() + .map_err(|_| ExportError::IncompatibleType) + } + + /// Hack to get this working with nativefunc too + pub fn get_with_generics<'a, T, Args, Rets>(&'a self, name: &str) -> Result + where + Args: WasmTypeList, + Rets: WasmTypeList, + T: ExportableWithGenerics<'a, Args, Rets>, + { + match self.map.get(name) { + None => Err(ExportError::Missing(name.to_string())), + Some(extern_) => T::get_self_from_extern_with_generics(extern_.clone()), + } + } + + /// Like `get_with_generics` but with a WeakReference to the `InstanceRef` internally. + /// This is useful for passing data into `WasmerEnv`, for example. + pub fn get_with_generics_weak<'a, T, Args, Rets>(&'a self, name: &str) -> Result + where + Args: WasmTypeList, + Rets: WasmTypeList, + T: ExportableWithGenerics<'a, Args, Rets>, + { + let mut out: T = self.get_with_generics(name)?; + out.into_weak_instance_ref(); + Ok(out) + } + + /// Get an export as an `Extern`. + pub fn get_extern(&self, name: &str) -> Option<&Extern> { + self.map.get(name) + } + + /// Returns true if the `Exports` contains the given export name. + pub fn contains(&self, name: S) -> bool + where + S: Into, + { + self.map.contains_key(&name.into()) + } + + /// Get an iterator over the exports. + pub fn iter(&self) -> ExportsIterator> { + ExportsIterator { + iter: self.map.iter(), + } + } +} + +impl fmt::Debug for Exports { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +/// An iterator over exports. +pub struct ExportsIterator<'a, I> +where + I: Iterator + Sized, +{ + iter: I, +} + +impl<'a, I> Iterator for ExportsIterator<'a, I> +where + I: Iterator + Sized, +{ + type Item = (&'a String, &'a Extern); + + fn next(&mut self) -> Option { + self.iter.next() + } +} + +impl<'a, I> ExactSizeIterator for ExportsIterator<'a, I> +where + I: Iterator + ExactSizeIterator + Sized, +{ + fn len(&self) -> usize { + self.iter.len() + } +} + +impl<'a, I> ExportsIterator<'a, I> +where + I: Iterator + Sized, +{ + /// Get only the functions. + pub fn functions(self) -> impl Iterator + Sized { + self.iter.filter_map(|(name, export)| match export { + Extern::Function(function) => Some((name, function)), + _ => None, + }) + } + + /// Get only the memories. + pub fn memories(self) -> impl Iterator + Sized { + self.iter.filter_map(|(name, export)| match export { + Extern::Memory(memory) => Some((name, memory)), + _ => None, + }) + } + + /// Get only the globals. + pub fn globals(self) -> impl Iterator + Sized { + self.iter.filter_map(|(name, export)| match export { + Extern::Global(global) => Some((name, global)), + _ => None, + }) + } + + /// Get only the tables. + pub fn tables(self) -> impl Iterator + Sized { + self.iter.filter_map(|(name, export)| match export { + Extern::Table(table) => Some((name, table)), + _ => None, + }) + } +} + +impl FromIterator<(String, Extern)> for Exports { + fn from_iter>(iter: I) -> Self { + Self { + map: Arc::new(IndexMap::from_iter(iter)), + } + } } impl LikeNamespace for Exports { @@ -91,6 +289,10 @@ impl LikeNamespace for Exports { .map(|(k, v)| (k.clone(), v.to_export())) .collect() } + + fn as_exports(&self) -> Option { + Some(self.clone()) + } } /// This trait is used to mark types as gettable from an [`Instance`]. @@ -102,4 +304,39 @@ pub trait Exportable<'a>: Sized { /// /// [`Module`]: crate::Module fn to_export(&self) -> Export; + + /// Implementation of how to get the export corresponding to the implementing type + /// from an [`Instance`] by name. + /// + /// [`Instance`]: crate::Instance + fn get_self_from_extern(_extern: Extern) -> Result; + + /// Convert the extern internally to hold a weak reference to the `InstanceRef`. + /// This is useful for preventing cycles, for example for data stored in a + /// type implementing `WasmerEnv`. + fn into_weak_instance_ref(&mut self); +} + +/// A trait for accessing exports (like [`Exportable`]) but it takes generic +/// `Args` and `Rets` parameters so that `NativeFunc` can be accessed directly +/// as well. +pub trait ExportableWithGenerics<'a, Args: WasmTypeList, Rets: WasmTypeList>: Sized { + /// Get an export with the given generics. + fn get_self_from_extern_with_generics(_extern: Extern) -> Result; + /// Convert the extern internally to hold a weak reference to the `InstanceRef`. + /// This is useful for preventing cycles, for example for data stored in a + /// type implementing `WasmerEnv`. + fn into_weak_instance_ref(&mut self); +} + +/// We implement it for all concrete [`Exportable`] types (that are `Clone`) +/// with empty `Args` and `Rets`. +impl<'a, T: Exportable<'a> + Clone + 'static> ExportableWithGenerics<'a, (), ()> for T { + fn get_self_from_extern_with_generics(_extern: Extern) -> Result { + T::get_self_from_extern(_extern).map(|i| i.clone()) + } + + fn into_weak_instance_ref(&mut self) { + ::into_weak_instance_ref(self); + } } diff --git a/lib/api/src/sys/externals/function.rs b/lib/api/src/sys/externals/function.rs index 8e1c64ef29..be750369eb 100644 --- a/lib/api/src/sys/externals/function.rs +++ b/lib/api/src/sys/externals/function.rs @@ -1,4 +1,5 @@ -use crate::sys::exports::Exportable; +use crate::sys::exports::{ExportError, Exportable}; +use crate::sys::externals::Extern; use crate::sys::store::Store; use crate::sys::types::{Val, ValFuncRef}; use crate::sys::FunctionType; @@ -13,7 +14,7 @@ use std::fmt; use std::sync::Arc; use wasmer_vm::{ raise_user_trap, resume_panic, wasmer_call_trampoline, Export, ExportFunction, - ExportFunctionMetadata, ImportInitializerFuncPtr, TableElement, VMCallerCheckedAnyfunc, + ExportFunctionMetadata, ImportInitializerFuncPtr, VMCallerCheckedAnyfunc, VMDynamicFunctionContext, VMFuncRef, VMFunction, VMFunctionBody, VMFunctionEnvironment, VMFunctionKind, VMTrampoline, }; @@ -65,49 +66,6 @@ impl wasmer_types::WasmValueType for Function { } } -impl Function { - /// Convert a `VMFuncRef` into a `Function`. - /// - /// Returns `None` if the funcref is null. - /// - /// # Safety - /// - /// Must ensure that the returned Function does not outlive the containing instance. - pub unsafe fn from_vm_funcref(store: &Store, func_ref: VMFuncRef) -> Option { - if func_ref.is_null() { - return None; - } - let wasmer_vm::VMCallerCheckedAnyfunc { - func_ptr: address, - type_index: signature, - vmctx, - } = **func_ref; - let export = wasmer_vm::ExportFunction { - // TODO: - // figure out if we ever need a value here: need testing with complicated import patterns - metadata: None, - vm_function: wasmer_vm::VMFunction { - address, - signature, - // TODO: review this comment (unclear if it's still correct): - // All functions in tables are already Static (as dynamic functions - // are converted to use the trampolines with static signatures). - kind: wasmer_vm::VMFunctionKind::Static, - vmctx, - call_trampoline: None, - instance_ref: None, - }, - }; - Some(Function::from_vm_export(store, export)) - } -} - -impl From for TableElement { - fn from(f: Function) -> Self { - TableElement::FuncRef(f.vm_funcref()) - } -} - fn build_export_function_metadata( env: Env, import_init_function_ptr: for<'a> fn( @@ -207,11 +165,10 @@ impl Function { /// # use wasmer::{Function, FunctionType, Type, Store, Value, WasmerEnv}; /// # let store = Store::default(); /// # - /// #[derive(Clone)] + /// #[derive(WasmerEnv, Clone)] /// struct Env { /// multiplier: i32, /// }; - /// impl WasmerEnv for Env {} /// let env = Env { multiplier: 2 }; /// /// let signature = FunctionType::new(vec![Type::I32, Type::I32], vec![Type::I32]); @@ -229,11 +186,10 @@ impl Function { /// # let store = Store::default(); /// const I32_I32_TO_I32: ([Type; 2], [Type; 1]) = ([Type::I32, Type::I32], [Type::I32]); /// - /// #[derive(Clone)] + /// #[derive(WasmerEnv, Clone)] /// struct Env { /// multiplier: i32, /// }; - /// impl WasmerEnv for Env {} /// let env = Env { multiplier: 2 }; /// /// let f = Function::new_with_env(&store, I32_I32_TO_I32, env, |env, args| { @@ -275,7 +231,7 @@ impl Function { let signature = store .engine() // TODO(0-copy): - .register_signature(ty); + .register_signature((&ty).into()); Self { store: store.clone(), @@ -328,7 +284,7 @@ impl Function { let signature = store .engine() // TODO(0-copy): - .register_signature(function.ty()); + .register_signature((&function.ty()).into()); Self { store: store.clone(), @@ -359,11 +315,10 @@ impl Function { /// # use wasmer::{Store, Function, WasmerEnv}; /// # let store = Store::default(); /// # - /// #[derive(Clone)] + /// #[derive(WasmerEnv, Clone)] /// struct Env { /// multiplier: i32, /// }; - /// impl WasmerEnv for Env {} /// let env = Env { multiplier: 2 }; /// /// fn sum_and_multiply(env: &Env, a: i32, b: i32) -> i32 { @@ -389,7 +344,7 @@ impl Function { build_export_function_metadata::(env, Env::init_with_instance); let vmctx = VMFunctionEnvironment { host_env }; - let signature = store.engine().register_signature(function.ty()); + let signature = store.engine().register_signature((&function.ty()).into()); Self { store: store.clone(), exported: ExportFunction { @@ -740,6 +695,21 @@ impl<'a> Exportable<'a> for Function { fn to_export(&self) -> Export { self.exported.clone().into() } + + fn get_self_from_extern(_extern: Extern) -> Result { + match _extern { + Extern::Function(func) => Ok(func), + _ => Err(ExportError::IncompatibleType), + } + } + + fn into_weak_instance_ref(&mut self) { + self.exported + .vm_function + .instance_ref + .as_mut() + .map(|v| *v = v.downgrade()); + } } impl Clone for Function { @@ -877,6 +847,9 @@ mod inner { use std::error::Error; use std::marker::PhantomData; use std::panic::{self, AssertUnwindSafe}; + + #[cfg(feature = "experimental-reference-types-extern-ref")] + pub use wasmer_types::{ExternRef, VMExternRef}; use wasmer_types::{FunctionType, NativeWasmType, Type}; use wasmer_vm::{raise_user_trap, resume_panic, VMFunctionBody}; @@ -969,6 +942,18 @@ mod inner { f64 => f64 ); + #[cfg(feature = "experimental-reference-types-extern-ref")] + unsafe impl FromToNativeWasmType for ExternRef { + type Native = VMExternRef; + + fn to_native(self) -> Self::Native { + self.into() + } + fn from_native(n: Self::Native) -> Self { + n.into() + } + } + #[cfg(test)] mod test_from_to_native_wasm_type { use super::*; diff --git a/lib/api/src/sys/externals/global.rs b/lib/api/src/sys/externals/global.rs index 23dc09a03a..2128361236 100644 --- a/lib/api/src/sys/externals/global.rs +++ b/lib/api/src/sys/externals/global.rs @@ -1,4 +1,5 @@ -use crate::sys::exports::Exportable; +use crate::sys::exports::{ExportError, Exportable}; +use crate::sys::externals::Extern; use crate::sys::store::{Store, StoreObject}; use crate::sys::types::Val; use crate::sys::GlobalType; @@ -188,6 +189,34 @@ impl Global { vm_global, } } + + /// Returns whether or not these two globals refer to the same data. + /// + /// # Example + /// + /// ``` + /// # use wasmer::{Global, Store, Value}; + /// # let store = Store::default(); + /// # + /// let g = Global::new(&store, Value::I32(1)); + /// + /// assert!(g.same(&g)); + /// ``` + pub fn same(&self, other: &Self) -> bool { + Arc::ptr_eq(&self.vm_global.from, &other.vm_global.from) + } + + /// Get access to the backing VM value for this extern. This function is for + /// tests it should not be called by users of the Wasmer API. + /// + /// # Safety + /// This function is unsafe to call outside of tests for the wasmer crate + /// because there is no stability guarantee for the returned type and we may + /// make breaking changes to it at any time or remove this method. + #[doc(hidden)] + pub unsafe fn get_vm_global(&self) -> &VMGlobal { + &self.vm_global + } } impl Clone for Global { @@ -216,4 +245,18 @@ impl<'a> Exportable<'a> for Global { fn to_export(&self) -> Export { self.vm_global.clone().into() } + + fn get_self_from_extern(_extern: Extern) -> Result { + match _extern { + Extern::Global(global) => Ok(global), + _ => Err(ExportError::IncompatibleType), + } + } + + fn into_weak_instance_ref(&mut self) { + self.vm_global + .instance_ref + .as_mut() + .map(|v| *v = v.downgrade()); + } } diff --git a/lib/api/src/sys/externals/memory.rs b/lib/api/src/sys/externals/memory.rs index d84884bd6b..270addb97b 100644 --- a/lib/api/src/sys/externals/memory.rs +++ b/lib/api/src/sys/externals/memory.rs @@ -1,8 +1,10 @@ -use crate::sys::exports::Exportable; +use crate::sys::exports::{ExportError, Exportable}; +use crate::sys::externals::Extern; use crate::sys::store::Store; use crate::sys::{MemoryType, MemoryView}; use std::convert::TryInto; use std::slice; +use std::sync::Arc; use wasmer_types::{Pages, ValueType}; use wasmer_vm::{Export, MemoryError, VMMemory}; @@ -56,14 +58,6 @@ impl Memory { }) } - /// Create a `Memory` from `VMMemory`. - pub fn from_vmmemory(store: &Store, vm_memory: VMMemory) -> Self { - Self { - store: store.clone(), - vm_memory, - } - } - /// Returns the [`MemoryType`] of the `Memory`. /// /// # Example @@ -154,6 +148,43 @@ impl Memory { self.vm_memory.from.size() } + /// Grow memory by the specified amount of WebAssembly [`Pages`] and return + /// the previous memory size. + /// + /// # Example + /// + /// ``` + /// # use wasmer::{Memory, MemoryType, Pages, Store, Type, Value, WASM_MAX_PAGES}; + /// # let store = Store::default(); + /// # + /// let m = Memory::new(&store, MemoryType::new(1, Some(3), false)).unwrap(); + /// let p = m.grow(2).unwrap(); + /// + /// assert_eq!(p, Pages(1)); + /// assert_eq!(m.size(), Pages(3)); + /// ``` + /// + /// # Errors + /// + /// Returns an error if memory can't be grown by the specified amount + /// of pages. + /// + /// ```should_panic + /// # use wasmer::{Memory, MemoryType, Pages, Store, Type, Value, WASM_MAX_PAGES}; + /// # let store = Store::default(); + /// # + /// let m = Memory::new(&store, MemoryType::new(1, Some(1), false)).unwrap(); + /// + /// // This results in an error: `MemoryError::CouldNotGrow`. + /// let s = m.grow(1).unwrap(); + /// ``` + pub fn grow(&self, delta: IntoPages) -> Result + where + IntoPages: Into, + { + self.vm_memory.from.grow(delta.into()) + } + /// Return a "view" of the currently accessible memory. By /// default, the view is unsynchronized, using regular memory /// accesses. You can force a memory view to use atomic accesses @@ -193,6 +224,14 @@ impl Memory { unsafe { MemoryView::new(base as _, length as u32) } } + /// A shortcut to [`Self::view::`][self::view]. + /// + /// This code is going to be refactored. Use it as your own risks. + #[doc(hidden)] + pub fn uint8view(&self) -> MemoryView { + self.view() + } + pub(crate) fn from_vm_export(store: &Store, vm_memory: VMMemory) -> Self { Self { store: store.clone(), @@ -200,6 +239,22 @@ impl Memory { } } + /// Returns whether or not these two memories refer to the same data. + /// + /// # Example + /// + /// ``` + /// # use wasmer::{Memory, MemoryType, Store, Value}; + /// # let store = Store::default(); + /// # + /// let m = Memory::new(&store, MemoryType::new(1, None, false)).unwrap(); + /// + /// assert!(m.same(&m)); + /// ``` + pub fn same(&self, other: &Self) -> bool { + Arc::ptr_eq(&self.vm_memory.from, &other.vm_memory.from) + } + /// Get access to the backing VM value for this extern. This function is for /// tests it should not be called by users of the Wasmer API. /// @@ -229,4 +284,18 @@ impl<'a> Exportable<'a> for Memory { fn to_export(&self) -> Export { self.vm_memory.clone().into() } + + fn get_self_from_extern(_extern: Extern) -> Result { + match _extern { + Extern::Memory(memory) => Ok(memory), + _ => Err(ExportError::IncompatibleType), + } + } + + fn into_weak_instance_ref(&mut self) { + self.vm_memory + .instance_ref + .as_mut() + .map(|v| *v = v.downgrade()); + } } diff --git a/lib/api/src/sys/externals/mod.rs b/lib/api/src/sys/externals/mod.rs index 2459a53758..e43919cb19 100644 --- a/lib/api/src/sys/externals/mod.rs +++ b/lib/api/src/sys/externals/mod.rs @@ -11,8 +11,9 @@ pub use self::global::Global; pub use self::memory::Memory; pub use self::table::Table; -use crate::sys::exports::Exportable; +use crate::sys::exports::{ExportError, Exportable}; use crate::sys::store::{Store, StoreObject}; +use crate::sys::ExternType; use std::fmt; use wasmer_vm::Export; @@ -33,6 +34,16 @@ pub enum Extern { } impl Extern { + /// Return the underlying type of the inner `Extern`. + pub fn ty(&self) -> ExternType { + match self { + Self::Function(ft) => ExternType::Function(ft.ty().clone()), + Self::Memory(ft) => ExternType::Memory(ft.ty()), + Self::Table(tt) => ExternType::Table(*tt.ty()), + Self::Global(gt) => ExternType::Global(*gt.ty()), + } + } + /// Create an `Extern` from an `wasmer_engine::Export`. pub fn from_vm_export(store: &Store, export: Export) -> Self { match export { @@ -53,6 +64,20 @@ impl<'a> Exportable<'a> for Extern { Self::Table(t) => t.to_export(), } } + + fn get_self_from_extern(_extern: Self) -> Result { + // Since this is already an extern, we can just return it. + Ok(_extern) + } + + fn into_weak_instance_ref(&mut self) { + match self { + Self::Function(f) => f.into_weak_instance_ref(), + Self::Global(g) => g.into_weak_instance_ref(), + Self::Memory(m) => m.into_weak_instance_ref(), + Self::Table(t) => t.into_weak_instance_ref(), + } + } } impl StoreObject for Extern { diff --git a/lib/api/src/sys/externals/table.rs b/lib/api/src/sys/externals/table.rs index e2afc88a9d..cb7d4753b5 100644 --- a/lib/api/src/sys/externals/table.rs +++ b/lib/api/src/sys/externals/table.rs @@ -1,4 +1,5 @@ -use crate::sys::exports::Exportable; +use crate::sys::exports::{ExportError, Exportable}; +use crate::sys::externals::Extern; use crate::sys::store::Store; use crate::sys::types::{Val, ValFuncRef}; use crate::sys::RuntimeError; @@ -67,11 +68,70 @@ impl Table { &self.store } + /// Retrieves an element of the table at the provided `index`. + pub fn get(&self, index: u32) -> Option { + let item = self.vm_table.from.get(index)?; + Some(ValFuncRef::from_table_reference(item, &self.store)) + } + + /// Sets an element `val` in the Table at the provided `index`. + pub fn set(&self, index: u32, val: Val) -> Result<(), RuntimeError> { + let item = val.into_table_reference(&self.store)?; + set_table_item(self.vm_table.from.as_ref(), index, item) + } + /// Retrieves the size of the `Table` (in elements) pub fn size(&self) -> u32 { self.vm_table.from.size() } + /// Grows the size of the `Table` by `delta`, initializating + /// the elements with the provided `init` value. + /// + /// It returns the previous size of the `Table` in case is able + /// to grow the Table successfully. + /// + /// # Errors + /// + /// Returns an error if the `delta` is out of bounds for the table. + pub fn grow(&self, delta: u32, init: Val) -> Result { + let item = init.into_table_reference(&self.store)?; + self.vm_table + .from + .grow(delta, item) + .ok_or_else(|| RuntimeError::new(format!("failed to grow table by `{}`", delta))) + } + + /// Copies the `len` elements of `src_table` starting at `src_index` + /// to the destination table `dst_table` at index `dst_index`. + /// + /// # Errors + /// + /// Returns an error if the range is out of bounds of either the source or + /// destination tables. + pub fn copy( + dst_table: &Self, + dst_index: u32, + src_table: &Self, + src_index: u32, + len: u32, + ) -> Result<(), RuntimeError> { + if !Store::same(&dst_table.store, &src_table.store) { + return Err(RuntimeError::new( + "cross-`Store` table copies are not supported", + )); + } + RuntimeTable::copy( + dst_table.vm_table.from.as_ref(), + src_table.vm_table.from.as_ref(), + dst_index, + src_index, + len, + ) + .map_err(RuntimeError::from_trap)?; + Ok(()) + } + pub(crate) fn from_vm_export(store: &Store, vm_table: VMTable) -> Self { Self { store: store.clone(), @@ -113,4 +173,18 @@ impl<'a> Exportable<'a> for Table { fn to_export(&self) -> Export { self.vm_table.clone().into() } + + fn get_self_from_extern(_extern: Extern) -> Result { + match _extern { + Extern::Table(table) => Ok(table), + _ => Err(ExportError::IncompatibleType), + } + } + + fn into_weak_instance_ref(&mut self) { + self.vm_table + .instance_ref + .as_mut() + .map(|v| *v = v.downgrade()); + } } diff --git a/lib/api/src/sys/import_object.rs b/lib/api/src/sys/import_object.rs index 3b7067e5db..2568b01a29 100644 --- a/lib/api/src/sys/import_object.rs +++ b/lib/api/src/sys/import_object.rs @@ -1,6 +1,7 @@ //! The import module contains the implementation data structures and helper functions used to //! manipulate and access a wasm module's imports including memories, tables, globals, and //! functions. +use crate::Exports; use std::borrow::{Borrow, BorrowMut}; use std::collections::VecDeque; use std::collections::{hash_map::Entry, HashMap}; @@ -16,6 +17,12 @@ pub trait LikeNamespace { fn get_namespace_export(&self, name: &str) -> Option; /// Gets all exports in the namespace. fn get_namespace_exports(&self) -> Vec<(String, Export)>; + /// Returns the contents of this namespace as an `Exports`. + /// + /// This is used by `ImportObject::get_namespace_exports`. + fn as_exports(&self) -> Option { + None + } } /// All of the import data used when instantiating. @@ -101,6 +108,15 @@ impl ImportObject { } } + /// Returns the contents of a namespace as an `Exports`. + /// + /// Returns `None` if the namespace doesn't exist or doesn't implement the + /// `as_exports` method. + pub fn get_namespace_exports(&self, name: &str) -> Option { + let map = self.map.lock().unwrap(); + map.get(name).and_then(|ns| ns.as_exports()) + } + fn get_objects(&self) -> VecDeque<((String, String), Export)> { let mut out = VecDeque::new(); let guard = self.map.lock().unwrap(); diff --git a/lib/api/src/sys/instance.rs b/lib/api/src/sys/instance.rs index 68cd84444e..6d372c0db3 100644 --- a/lib/api/src/sys/instance.rs +++ b/lib/api/src/sys/instance.rs @@ -1,10 +1,14 @@ use crate::sys::module::Module; +use crate::sys::store::Store; use crate::sys::{HostEnvInitError, LinkError, RuntimeError}; use crate::{ExportError, NativeFunc, WasmTypeList}; +use std::fmt; use std::sync::{Arc, Mutex}; use thiserror::Error; use wasmer_types::InstanceConfig; -use wasmer_vm::{InstanceHandle, Resolver}; +use wasmer_vm::{InstanceHandle, Resolver, VMContext}; + +use super::exports::ExportableWithGenerics; /// A WebAssembly Instance is a stateful, executable /// instance of a WebAssembly [`Module`]. @@ -117,12 +121,19 @@ impl Instance { } /// New instance with config. - #[tracing::instrument(skip_all)] pub fn new_with_config( module: &Module, config: InstanceConfig, resolver: &dyn Resolver, ) -> Result { + unsafe { + if (*config.gas_counter).opcode_cost > i32::MAX as u64 { + // Fast gas counter logic assumes that individual opcode cost is not too big. + return Err(InstantiationError::HostEnvInitialization( + HostEnvInitError::IncorrectGasMeteringConfig, + )); + } + } let handle = module.instantiate(resolver, config)?; let instance = Self { handle: Arc::new(Mutex::new(handle)), @@ -147,6 +158,16 @@ impl Instance { Ok(instance) } + /// Gets the [`Module`] associated with this instance. + pub fn module(&self) -> &Module { + &self.module + } + + /// Returns the [`Store`] where the `Instance` belongs. + pub fn store(&self) -> &Store { + self.module.store() + } + /// Lookup an exported entity by its name. pub fn lookup(&self, field: &str) -> Option { let vmextern = self.handle.lock().unwrap().lookup(field)?; @@ -156,7 +177,7 @@ impl Instance { /// Lookup an exported function by its name. pub fn lookup_function(&self, field: &str) -> Option { if let crate::Export::Function(f) = self.lookup(field)? { - Some(crate::Function::from_vm_export(self.module.store(), f)) + Some(crate::Function::from_vm_export(self.store(), f)) } else { None } @@ -172,19 +193,49 @@ impl Instance { Rets: WasmTypeList, { match self.lookup(name) { - Some(crate::Export::Function(f)) => { - crate::Function::from_vm_export(self.module.store(), f) - .native() - .map_err(|_| ExportError::IncompatibleType) - } + Some(crate::Export::Function(f)) => crate::Function::from_vm_export(self.store(), f) + .native() + .map_err(|_| ExportError::IncompatibleType), Some(_) => Err(ExportError::IncompatibleType), None => Err(ExportError::Missing("not found".into())), } } - // Used internally by wast only + /// Hack to get this working with nativefunc too + pub fn get_with_generics<'a, T, Args, Rets>(&'a self, name: &str) -> Result + where + Args: WasmTypeList, + Rets: WasmTypeList, + T: ExportableWithGenerics<'a, Args, Rets>, + { + let export = self + .lookup(name) + .ok_or_else(|| ExportError::Missing(name.to_string()))?; + let ext = crate::Extern::from_vm_export(self.store(), export); + T::get_self_from_extern_with_generics(ext) + } + + /// Like `get_with_generics` but with a WeakReference to the `InstanceRef` internally. + /// This is useful for passing data into `WasmerEnv`, for example. + pub fn get_with_generics_weak<'a, T, Args, Rets>(&'a self, name: &str) -> Result + where + Args: WasmTypeList, + Rets: WasmTypeList, + T: ExportableWithGenerics<'a, Args, Rets>, + { + let mut out: T = self.get_with_generics(name)?; + out.into_weak_instance_ref(); + Ok(out) + } + #[doc(hidden)] - pub fn handle(&self) -> std::sync::MutexGuard<'_, InstanceHandle> { - self.handle.lock().unwrap() + pub fn vmctx_ptr(&self) -> *mut VMContext { + self.handle.lock().unwrap().vmctx_ptr() + } +} + +impl fmt::Debug for Instance { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Instance").finish() } } diff --git a/lib/api/src/sys/mod.rs b/lib/api/src/sys/mod.rs index 70b9289187..2fb4e004a6 100644 --- a/lib/api/src/sys/mod.rs +++ b/lib/api/src/sys/mod.rs @@ -10,6 +10,12 @@ mod ptr; mod store; mod tunables; mod types; +mod utils; + +/// Implement [`WasmerEnv`] for your type with `#[derive(WasmerEnv)]`. +/// +/// See the [`WasmerEnv`] trait for more information. +pub use wasmer_derive::WasmerEnv; #[doc(hidden)] pub mod internals { @@ -23,7 +29,7 @@ pub mod internals { pub use crate::sys::cell::WasmCell; pub use crate::sys::env::{HostEnvInitError, LazyInit, WasmerEnv}; -pub use crate::sys::exports::{ExportError, Exportable, Exports}; +pub use crate::sys::exports::{ExportError, Exportable, Exports, ExportsIterator}; pub use crate::sys::externals::{ Extern, FromToNativeWasmType, Function, Global, HostFunction, Memory, Table, WasmTypeList, }; @@ -39,6 +45,7 @@ pub use crate::sys::types::{ ValType, }; pub use crate::sys::types::{Val as Value, ValType as Type}; +pub use crate::sys::utils::is_wasm; pub use target_lexicon::{Architecture, CallingConvention, OperatingSystem, Triple, HOST}; #[cfg(feature = "compiler")] pub use wasmer_compiler::{wasmparser, CompilerConfig}; @@ -46,9 +53,11 @@ pub use wasmer_compiler::{ CompileError, CpuFeature, Features, ParseCpuFeatureError, Target, WasmError, WasmResult, }; pub use wasmer_engine::{DeserializeError, Engine, FrameInfo, LinkError, RuntimeError}; +#[cfg(feature = "experimental-reference-types-extern-ref")] +pub use wasmer_types::ExternRef; pub use wasmer_types::{ - Atomically, Bytes, ExportIndex, ExternRef, GlobalInit, LocalFunctionIndex, MemoryView, Pages, - ValueType, WASM_MAX_PAGES, WASM_MIN_PAGES, WASM_PAGE_SIZE, + Atomically, Bytes, ExportIndex, GlobalInit, LocalFunctionIndex, MemoryView, Pages, ValueType, + WASM_MAX_PAGES, WASM_MIN_PAGES, WASM_PAGE_SIZE, }; pub use wasmer_vm::{ ChainableNamedResolver, Export, NamedResolver, NamedResolverChain, Resolver, Tunables, @@ -68,9 +77,35 @@ pub mod vm { #[cfg(feature = "wat")] pub use wat::parse_bytes as wat2wasm; +// The compilers are mutually exclusive +#[cfg(any( + all( + feature = "default-llvm", + any(feature = "default-cranelift", feature = "default-singlepass") + ), + all(feature = "default-cranelift", feature = "default-singlepass") +))] +compile_error!( + r#"The `default-singlepass`, `default-cranelift` and `default-llvm` features are mutually exclusive. +If you wish to use more than one compiler, you can simply create the own store. Eg.: + +``` +use wasmer::{Store, Universal, Singlepass}; + +let engine = Universal::new(Singlepass::default()).engine(); +let store = Store::new(&engine); +```"# +); + #[cfg(feature = "singlepass")] pub use wasmer_compiler_singlepass::Singlepass; +#[cfg(feature = "cranelift")] +pub use wasmer_compiler_cranelift::{Cranelift, CraneliftOptLevel}; + +#[cfg(feature = "llvm")] +pub use wasmer_compiler_llvm::{LLVMOptLevel, LLVM}; + #[cfg(feature = "universal")] pub use wasmer_engine_universal::{Universal, UniversalArtifact, UniversalEngine}; diff --git a/lib/api/src/sys/module.rs b/lib/api/src/sys/module.rs index 07fe5565fc..42c739147f 100644 --- a/lib/api/src/sys/module.rs +++ b/lib/api/src/sys/module.rs @@ -2,6 +2,7 @@ use crate::sys::store::Store; use crate::sys::InstantiationError; use std::fmt; use std::io; +use std::path::Path; use std::sync::Arc; use thiserror::Error; use wasmer_compiler::CompileError; @@ -97,7 +98,6 @@ impl Module { /// # } /// ``` #[allow(unreachable_code)] - #[tracing::instrument(skip_all)] pub fn new(store: &Store, bytes: impl AsRef<[u8]>) -> Result { #[cfg(feature = "wat")] let bytes = wat::parse_bytes(bytes.as_ref()).map_err(|e| { @@ -110,31 +110,74 @@ impl Module { Self::from_binary(store, bytes.as_ref()) } + /// Creates a new WebAssembly module from a file path. + pub fn from_file(store: &Store, file: impl AsRef) -> Result { + let file_ref = file.as_ref(); + let wasm_bytes = std::fs::read(file_ref)?; + let module = Self::new(store, &wasm_bytes)?; + // Set the module name to the absolute path of the filename. + // This is useful for debugging the stack traces. + Ok(module) + } + /// Creates a new WebAssembly module from a binary. /// /// Opposed to [`Module::new`], this function is not compatible with /// the WebAssembly text format (if the "wat" feature is enabled for /// this crate). - #[tracing::instrument(skip_all)] - pub(crate) fn from_binary(store: &Store, binary: &[u8]) -> Result { - store.engine().validate(binary)?; - let module = { - let executable = store.engine().compile(binary, store.tunables())?; - let artifact = store.engine().load(&*executable)?; - match artifact.downcast_arc::() { - Ok(universal) => Self { - store: store.clone(), - artifact: universal, - }, - // We're are probably given an externally defined artifact type - // which I imagine we don't care about for now since this entire crate - // is only used for tests and this crate only defines universal engine. - Err(_) => panic!("unhandled artifact type"), - } - }; + pub fn from_binary(store: &Store, binary: &[u8]) -> Result { + Self::validate(store, binary)?; + unsafe { Self::from_binary_unchecked(store, binary) } + } + + /// Creates a new WebAssembly module skipping any kind of validation. + /// + /// # Safety + /// + /// This can speed up compilation time a bit, but it should be only used + /// in environments where the WebAssembly modules are trusted and validated + /// beforehand. + pub unsafe fn from_binary_unchecked( + store: &Store, + binary: &[u8], + ) -> Result { + let module = Self::compile(store, binary)?; Ok(module) } + /// Validates a new WebAssembly Module given the configuration + /// in the Store. + /// + /// This validation is normally pretty fast and checks the enabled + /// WebAssembly features in the Store Engine to assure deterministic + /// validation of the Module. + pub fn validate(store: &Store, binary: &[u8]) -> Result<(), CompileError> { + store.engine().validate(binary) + } + + fn compile(store: &Store, binary: &[u8]) -> Result { + let executable = store.engine().compile(binary, store.tunables())?; + let artifact = store.engine().load(&*executable)?; + match artifact.downcast_arc::() { + Ok(universal) => Ok(Self::from_universal_artifact(store, universal)), + // We're are probably given an externally defined artifact type + // which I imagine we don't care about for now since this entire crate + // is only used for tests and this crate only defines universal engine. + Err(_) => panic!("unhandled artifact type"), + } + } + + /// Make a Module from Artifact... + pub fn from_universal_artifact( + store: &Store, + artifact: Arc, + ) -> Self { + Self { + store: store.clone(), + artifact, + } + } + pub(crate) fn instantiate( &self, resolver: &dyn Resolver, diff --git a/lib/api/src/sys/native.rs b/lib/api/src/sys/native.rs index 181dddf181..baac369e9c 100644 --- a/lib/api/src/sys/native.rs +++ b/lib/api/src/sys/native.rs @@ -55,6 +55,18 @@ where pub(crate) fn arg_kind(&self) -> VMFunctionKind { self.exported.vm_function.kind } + + /// Get access to the backing VM value for this extern. This function is for + /// tests it should not be called by users of the Wasmer API. + /// + /// # Safety + /// This function is unsafe to call outside of tests for the wasmer crate + /// because there is no stability guarantee for the returned type and we may + /// make breaking changes to it at any time or remove this method. + #[doc(hidden)] + pub unsafe fn get_vm_function(&self) -> &wasmer_vm::VMFunction { + &self.exported.vm_function + } } /* @@ -209,6 +221,22 @@ macro_rules! impl_native_traits { } } + + #[allow(unused_parens)] + impl<'a, $( $x, )* Rets> crate::sys::exports::ExportableWithGenerics<'a, ($( $x ),*), Rets> for NativeFunc<( $( $x ),* ), Rets> + where + $( $x: FromToNativeWasmType, )* + Rets: WasmTypeList, + { + fn get_self_from_extern_with_generics(_extern: crate::sys::externals::Extern) -> Result { + use crate::sys::exports::Exportable; + crate::Function::get_self_from_extern(_extern)?.native().map_err(|_| crate::sys::exports::ExportError::IncompatibleType) + } + + fn into_weak_instance_ref(&mut self) { + self.exported.vm_function.instance_ref.as_mut().map(|v| *v = v.downgrade()); + } + } }; } diff --git a/lib/api/src/sys/ptr.rs b/lib/api/src/sys/ptr.rs index 6706153244..33be4844f9 100644 --- a/lib/api/src/sys/ptr.rs +++ b/lib/api/src/sys/ptr.rs @@ -8,7 +8,7 @@ use crate::sys::cell::WasmCell; use crate::sys::{externals::Memory, FromToNativeWasmType}; -use std::{cell::Cell, marker::PhantomData, mem}; +use std::{cell::Cell, fmt, marker::PhantomData, mem}; use wasmer_types::ValueType; /// The `Array` marker type. This type can be used like `WasmPtr` @@ -20,6 +20,48 @@ pub struct Item; /// A zero-cost type that represents a pointer to something in Wasm linear /// memory. +/// +/// This type can be used directly in the host function arguments: +/// ``` +/// # use wasmer::Memory; +/// # use wasmer::WasmPtr; +/// pub fn host_import(memory: Memory, ptr: WasmPtr) { +/// let derefed_ptr = ptr.deref(&memory).expect("pointer in bounds"); +/// let inner_val: u32 = derefed_ptr.get(); +/// println!("Got {} from Wasm memory address 0x{:X}", inner_val, ptr.offset()); +/// // update the value being pointed to +/// derefed_ptr.set(inner_val + 1); +/// } +/// ``` +/// +/// This type can also be used with primitive-filled structs, but be careful of +/// guarantees required by `ValueType`. +/// ``` +/// # use wasmer::Memory; +/// # use wasmer::WasmPtr; +/// # use wasmer::ValueType; +/// +/// #[derive(Copy, Clone, Debug)] +/// #[repr(C)] +/// struct V3 { +/// x: f32, +/// y: f32, +/// z: f32 +/// } +/// // This is safe as the 12 bytes represented by this struct +/// // are valid for all bit combinations. +/// unsafe impl ValueType for V3 { +/// } +/// +/// fn update_vector_3(memory: Memory, ptr: WasmPtr) { +/// let derefed_ptr = ptr.deref(&memory).expect("pointer in bounds"); +/// let mut inner_val: V3 = derefed_ptr.get(); +/// println!("Got {:?} from Wasm memory address 0x{:X}", inner_val, ptr.offset()); +/// // update the value being pointed to +/// inner_val.x = 10.4; +/// derefed_ptr.set(inner_val); +/// } +/// ``` #[repr(transparent)] pub struct WasmPtr { offset: u32, @@ -51,6 +93,33 @@ fn align_pointer(ptr: usize, align: usize) -> usize { ptr & !(align - 1) } +/// Methods for `WasmPtr`s to data that can be dereferenced, namely to types +/// that implement [`ValueType`], meaning that they're valid for all possible +/// bit patterns. +impl WasmPtr { + /// Dereference the `WasmPtr` getting access to a `&Cell` allowing for + /// reading and mutating of the inner value. + /// + /// This method is unsound if used with unsynchronized shared memory. + /// If you're unsure what that means, it likely does not apply to you. + /// This invariant will be enforced in the future. + #[inline] + pub fn deref<'a>(self, memory: &'a Memory) -> Option> { + if (self.offset as usize) + mem::size_of::() > memory.size().bytes().0 + || mem::size_of::() == 0 + { + return None; + } + unsafe { + let cell_ptr = align_pointer( + memory.view::().as_ptr().add(self.offset as usize) as usize, + mem::align_of::(), + ) as *const Cell; + Some(WasmCell::new(&*cell_ptr)) + } + } +} + /// Methods for `WasmPtr`s to arrays of data that can be dereferenced, namely to /// types that implement [`ValueType`], meaning that they're valid for all /// possible bit patterns. @@ -95,6 +164,36 @@ impl WasmPtr { Some(wasm_cells) } + /// Get a UTF-8 string from the `WasmPtr` with the given length. + /// + /// Note that . The + /// underlying data can be mutated if the Wasm is allowed to execute or + /// an aliasing `WasmPtr` is used to mutate memory. + /// + /// # Safety + /// This method returns a reference to Wasm linear memory. The underlying + /// data can be mutated if the Wasm is allowed to execute or an aliasing + /// `WasmPtr` is used to mutate memory. + /// + /// `str` has invariants that must not be broken by mutating Wasm memory. + /// Thus the caller must ensure that the backing memory is not modified + /// while the reference is held. + /// + /// Additionally, if `memory` is dynamic, the caller must also ensure that `memory` + /// is not grown while the reference is held. + pub unsafe fn get_utf8_str<'a>(self, memory: &'a Memory, str_len: u32) -> Option<&'a str> { + let memory_size = memory.size().bytes().0; + + if self.offset as usize + str_len as usize > memory.size().bytes().0 + || self.offset as usize >= memory_size + { + return None; + } + let ptr = memory.view::().as_ptr().add(self.offset as usize) as *const u8; + let slice: &[u8] = std::slice::from_raw_parts(ptr, str_len as usize); + std::str::from_utf8(slice).ok() + } + /// Get a UTF-8 `String` from the `WasmPtr` with the given length. /// /// an aliasing `WasmPtr` is used to mutate memory. @@ -119,6 +218,30 @@ impl WasmPtr { String::from_utf8(vec).ok() } + + /// Get a UTF-8 string from the `WasmPtr`, where the string is nul-terminated. + /// + /// Note that this does not account for UTF-8 strings that _contain_ nul themselves, + /// [`WasmPtr::get_utf8_str`] has to be used for those. + /// + /// # Safety + /// This method behaves similarly to [`WasmPtr::get_utf8_str`], all safety invariants on + /// that method must also be upheld here. + pub unsafe fn get_utf8_str_with_nul<'a>(self, memory: &'a Memory) -> Option<&'a str> { + memory.view::()[(self.offset as usize)..] + .iter() + .map(|cell| cell.get()) + .position(|byte| byte == 0) + .and_then(|length| self.get_utf8_str(memory, length as u32)) + } + + /// Get a UTF-8 `String` from the `WasmPtr`, where the string is nul-terminated. + /// + /// Note that this does not account for UTF-8 strings that _contain_ nul themselves, + /// [`WasmPtr::get_utf8_string`] has to be used for those. + pub fn get_utf8_string_with_nul(self, memory: &Memory) -> Option { + unsafe { self.get_utf8_str_with_nul(memory) }.map(|s| s.to_owned()) + } } unsafe impl FromToNativeWasmType for WasmPtr { @@ -147,3 +270,102 @@ impl Clone for WasmPtr { } impl Copy for WasmPtr {} + +impl PartialEq for WasmPtr { + fn eq(&self, other: &Self) -> bool { + self.offset == other.offset + } +} + +impl Eq for WasmPtr {} + +impl fmt::Debug for WasmPtr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "WasmPtr(offset: {}, pointer: {:#x}, align: {})", + self.offset, + self.offset, + mem::align_of::() + ) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::sys::{Memory, MemoryType, Store}; + + /// Ensure that memory accesses work on the edges of memory and that out of + /// bounds errors are caught with `deref` + #[test] + fn wasm_ptr_memory_bounds_checks_hold() { + // create a memory + let store = Store::default(); + let memory_descriptor = MemoryType::new(1, Some(1), false); + let memory = Memory::new(&store, memory_descriptor).unwrap(); + + // test that basic access works and that len = 0 works, but oob does not + let start_wasm_ptr: WasmPtr = WasmPtr::new(0); + let start_wasm_ptr_array: WasmPtr = WasmPtr::new(0); + + assert!(start_wasm_ptr.deref(&memory).is_some()); + assert!(start_wasm_ptr_array.deref(&memory, 0, 0).is_some()); + assert!(unsafe { start_wasm_ptr_array.get_utf8_str(&memory, 0).is_some() }); + assert!(start_wasm_ptr_array.get_utf8_string(&memory, 0).is_some()); + assert!(start_wasm_ptr_array.deref(&memory, 0, 1).is_some()); + + // test that accessing the last valid memory address works correctly and OOB is caught + let last_valid_address_for_u8 = (memory.size().bytes().0 - 1) as u32; + let end_wasm_ptr: WasmPtr = WasmPtr::new(last_valid_address_for_u8); + assert!(end_wasm_ptr.deref(&memory).is_some()); + + let end_wasm_ptr_array: WasmPtr = WasmPtr::new(last_valid_address_for_u8); + + assert!(end_wasm_ptr_array.deref(&memory, 0, 1).is_some()); + let invalid_idx_len_combos: [(u32, u32); 3] = + [(last_valid_address_for_u8 + 1, 0), (0, 2), (1, 1)]; + for &(idx, len) in invalid_idx_len_combos.iter() { + assert!(end_wasm_ptr_array.deref(&memory, idx, len).is_none()); + } + assert!(unsafe { end_wasm_ptr_array.get_utf8_str(&memory, 2).is_none() }); + assert!(end_wasm_ptr_array.get_utf8_string(&memory, 2).is_none()); + + // test that accesing the last valid memory address for a u32 is valid + // (same as above test but with more edge cases to assert on) + let last_valid_address_for_u32 = (memory.size().bytes().0 - 4) as u32; + let end_wasm_ptr: WasmPtr = WasmPtr::new(last_valid_address_for_u32); + assert!(end_wasm_ptr.deref(&memory).is_some()); + assert!(end_wasm_ptr.deref(&memory).is_some()); + + let end_wasm_ptr_oob_array: [WasmPtr; 4] = [ + WasmPtr::new(last_valid_address_for_u32 + 1), + WasmPtr::new(last_valid_address_for_u32 + 2), + WasmPtr::new(last_valid_address_for_u32 + 3), + WasmPtr::new(last_valid_address_for_u32 + 4), + ]; + for oob_end_ptr in end_wasm_ptr_oob_array.iter() { + assert!(oob_end_ptr.deref(&memory).is_none()); + } + let end_wasm_ptr_array: WasmPtr = WasmPtr::new(last_valid_address_for_u32); + assert!(end_wasm_ptr_array.deref(&memory, 0, 1).is_some()); + + let invalid_idx_len_combos: [(u32, u32); 3] = + [(last_valid_address_for_u32 + 1, 0), (0, 2), (1, 1)]; + for &(idx, len) in invalid_idx_len_combos.iter() { + assert!(end_wasm_ptr_array.deref(&memory, idx, len).is_none()); + } + + let end_wasm_ptr_array_oob_array: [WasmPtr; 4] = [ + WasmPtr::new(last_valid_address_for_u32 + 1), + WasmPtr::new(last_valid_address_for_u32 + 2), + WasmPtr::new(last_valid_address_for_u32 + 3), + WasmPtr::new(last_valid_address_for_u32 + 4), + ]; + + for oob_end_array_ptr in end_wasm_ptr_array_oob_array.iter() { + assert!(oob_end_array_ptr.deref(&memory, 0, 1).is_none()); + assert!(oob_end_array_ptr.deref(&memory, 1, 0).is_none()); + } + } +} diff --git a/lib/api/src/sys/store.rs b/lib/api/src/sys/store.rs index 36f175818b..ccc09a266d 100644 --- a/lib/api/src/sys/store.rs +++ b/lib/api/src/sys/store.rs @@ -79,7 +79,11 @@ impl Default for Store { #[allow(unreachable_code)] fn get_config() -> impl CompilerConfig + 'static { cfg_if::cfg_if! { - if #[cfg(feature = "default-singlepass")] { + if #[cfg(feature = "default-cranelift")] { + wasmer_compiler_cranelift::Cranelift::default() + } else if #[cfg(feature = "default-llvm")] { + wasmer_compiler_llvm::LLVM::default() + } else if #[cfg(feature = "default-singlepass")] { wasmer_compiler_singlepass::Singlepass::default() } else { compile_error!("No default compiler chosen") diff --git a/lib/api/src/sys/tunables.rs b/lib/api/src/sys/tunables.rs index 942e371578..58d9137ee9 100644 --- a/lib/api/src/sys/tunables.rs +++ b/lib/api/src/sys/tunables.rs @@ -27,9 +27,6 @@ pub struct BaseTunables { /// The size in bytes of the offset guard for dynamic heaps. pub dynamic_memory_offset_guard_size: u64, - - /// The cost of a regular op. - pub regular_op_cost: u64, } impl BaseTunables { @@ -64,15 +61,8 @@ impl BaseTunables { static_memory_bound, static_memory_offset_guard_size, dynamic_memory_offset_guard_size, - regular_op_cost: 0, } } - - /// Set the regular op cost for this compiler - pub fn set_regular_op_cost(&mut self, cost: u64) -> &mut Self { - self.regular_op_cost = cost; - self - } } impl Tunables for BaseTunables { @@ -154,81 +144,6 @@ impl Tunables for BaseTunables { vm_definition_location, )?)) } - - fn stack_init_gas_cost(&self, stack_size: u64) -> u64 { - (self.regular_op_cost / 8).saturating_mul(stack_size) - } - - /// Instrumentation configuration: stack limiter config - fn stack_limiter_cfg(&self) -> Box { - Box::new(SimpleMaxStackCfg) - } - - /// Instrumentation configuration: gas accounting config - fn gas_cfg(&self) -> Box> { - Box::new(SimpleGasCostCfg(self.regular_op_cost)) - } -} - -struct SimpleMaxStackCfg; - -impl finite_wasm::max_stack::SizeConfig for SimpleMaxStackCfg { - fn size_of_value(&self, ty: finite_wasm::wasmparser::ValType) -> u8 { - use finite_wasm::wasmparser::ValType; - match ty { - ValType::I32 => 4, - ValType::I64 => 8, - ValType::F32 => 4, - ValType::F64 => 8, - ValType::V128 => 16, - ValType::FuncRef => 8, - ValType::ExternRef => 8, - } - } - fn size_of_function_activation( - &self, - locals: &prefix_sum_vec::PrefixSumVec, - ) -> u64 { - let mut res = 0; - res += locals - .max_index() - .map(|l| u64::from(*l).saturating_add(1)) - .unwrap_or(0) - * 8; - // TODO: make the above take into account the types of locals by adding an iter on PrefixSumVec that returns (count, type) - res += 32; // Rough accounting for rip, rbp and some registers spilled. Not exact. - res - } -} - -struct SimpleGasCostCfg(u64); - -macro_rules! gas_cost { - ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident)*) => { - $( - fn $visit(&mut self $($(, $arg: $argty)*)?) -> u64 { - gas_cost!(@@$proposal $op self $({ $($arg: $argty),* })? => $visit) - } - )* - }; - - (@@mvp $_op:ident $_self:ident $({ $($_arg:ident: $_argty:ty),* })? => visit_block) => { - 0 - }; - (@@mvp $_op:ident $_self:ident $({ $($_arg:ident: $_argty:ty),* })? => visit_end) => { - 0 - }; - (@@mvp $_op:ident $_self:ident $({ $($_arg:ident: $_argty:ty),* })? => visit_else) => { - 0 - }; - (@@$_proposal:ident $_op:ident $self:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { - $self.0 - }; -} - -impl<'a> finite_wasm::wasmparser::VisitOperator<'a> for SimpleGasCostCfg { - type Output = u64; - finite_wasm::wasmparser::for_each_operator!(gas_cost); } #[cfg(test)] @@ -241,7 +156,6 @@ mod tests { static_memory_bound: Pages(2048), static_memory_offset_guard_size: 128, dynamic_memory_offset_guard_size: 256, - regular_op_cost: 0, }; // No maximum diff --git a/lib/api/src/sys/types.rs b/lib/api/src/sys/types.rs index bad2fd2868..57549a7879 100644 --- a/lib/api/src/sys/types.rs +++ b/lib/api/src/sys/types.rs @@ -39,11 +39,11 @@ impl From for Val { pub trait ValFuncRef { fn into_vm_funcref(&self, store: &Store) -> Result; - unsafe fn from_vm_funcref(item: VMFuncRef, store: &Store) -> Self; + fn from_vm_funcref(item: VMFuncRef, store: &Store) -> Self; fn into_table_reference(&self, store: &Store) -> Result; - unsafe fn from_table_reference(item: wasmer_vm::TableElement, store: &Store) -> Self; + fn from_table_reference(item: wasmer_vm::TableElement, store: &Store) -> Self; } impl ValFuncRef for Val { @@ -58,11 +58,32 @@ impl ValFuncRef for Val { }) } - /// # Safety - /// - /// The returned `Val` must outlive the containing instance. - unsafe fn from_vm_funcref(func_ref: VMFuncRef, store: &Store) -> Self { - Self::FuncRef(Function::from_vm_funcref(store, func_ref)) + fn from_vm_funcref(func_ref: VMFuncRef, store: &Store) -> Self { + if func_ref.is_null() { + return Self::FuncRef(None); + } + let item: &wasmer_vm::VMCallerCheckedAnyfunc = unsafe { + let anyfunc: *const wasmer_vm::VMCallerCheckedAnyfunc = *func_ref; + &*anyfunc + }; + let export = wasmer_vm::ExportFunction { + // TODO: + // figure out if we ever need a value here: need testing with complicated import patterns + metadata: None, + vm_function: wasmer_vm::VMFunction { + address: item.func_ptr, + signature: item.type_index, + // TODO: review this comment (unclear if it's still correct): + // All functions in tables are already Static (as dynamic functions + // are converted to use the trampolines with static signatures). + kind: wasmer_vm::VMFunctionKind::Static, + vmctx: item.vmctx, + call_trampoline: None, + instance_ref: None, + }, + }; + let f = Function::from_vm_export(store, export); + Self::FuncRef(Some(f)) } fn into_table_reference(&self, store: &Store) -> Result { @@ -80,10 +101,7 @@ impl ValFuncRef for Val { }) } - /// # Safety - /// - /// The returned `Val` may not outlive the containing instance. - unsafe fn from_table_reference(item: wasmer_vm::TableElement, store: &Store) -> Self { + fn from_table_reference(item: wasmer_vm::TableElement, store: &Store) -> Self { match item { wasmer_vm::TableElement::FuncRef(f) => Self::from_vm_funcref(f, store), wasmer_vm::TableElement::ExternRef(extern_ref) => Self::ExternRef(extern_ref.into()), diff --git a/lib/api/src/sys/utils.rs b/lib/api/src/sys/utils.rs new file mode 100644 index 0000000000..b3f067f333 --- /dev/null +++ b/lib/api/src/sys/utils.rs @@ -0,0 +1,4 @@ +/// Check if the provided bytes are wasm-like +pub fn is_wasm(bytes: impl AsRef<[u8]>) -> bool { + bytes.as_ref().starts_with(b"\0asm") +} diff --git a/lib/api/tests/sys_export.rs b/lib/api/tests/sys_export.rs new file mode 100644 index 0000000000..fd950fe56e --- /dev/null +++ b/lib/api/tests/sys_export.rs @@ -0,0 +1,366 @@ +#[cfg(feature = "sys")] +mod sys { + use anyhow::Result; + use wasmer::*; + use wasmer_vm::{VMGlobal, VMMemory, VMTable, WeakOrStrongInstanceRef}; + + const MEM_WAT: &str = " + (module + (func $host_fn (import \"env\" \"host_fn\") (param) (result)) + (func (export \"call_host_fn\") (param) (result) + (call $host_fn)) + + (memory $mem 0) + (export \"memory\" (memory $mem)) + ) +"; + + const GLOBAL_WAT: &str = " + (module + (func $host_fn (import \"env\" \"host_fn\") (param) (result)) + (func (export \"call_host_fn\") (param) (result) + (call $host_fn)) + + (global $global i32 (i32.const 11)) + (export \"global\" (global $global)) + ) +"; + + const TABLE_WAT: &str = " + (module + (func $host_fn (import \"env\" \"host_fn\") (param) (result)) + (func (export \"call_host_fn\") (param) (result) + (call $host_fn)) + + (table $table 4 4 funcref) + (export \"table\" (table $table)) + ) +"; + + const FUNCTION_WAT: &str = " + (module + (func $host_fn (import \"env\" \"host_fn\") (param) (result)) + (func (export \"call_host_fn\") (param) (result) + (call $host_fn)) + ) +"; + + fn is_memory_instance_ref_strong(memory: &VMMemory) -> Option { + // This is safe because we're calling it from a test to test the internals + memory + .instance_ref + .as_ref() + .map(|v| matches!(v, WeakOrStrongInstanceRef::Strong(_))) + } + + fn is_table_instance_ref_strong(table: &VMTable) -> Option { + // This is safe because we're calling it from a test to test the internals + table + .instance_ref + .as_ref() + .map(|v| matches!(v, WeakOrStrongInstanceRef::Strong(_))) + } + + fn is_global_instance_ref_strong(global: &VMGlobal) -> Option { + // This is safe because we're calling it from a test to test the internals + global + .instance_ref + .as_ref() + .map(|v| matches!(v, WeakOrStrongInstanceRef::Strong(_))) + } + + fn is_function_instance_ref_strong(f: &Function) -> Option { + // This is safe because we're calling it from a test to test the internals + unsafe { + f.get_vm_function() + .instance_ref + .as_ref() + .map(|v| matches!(v, WeakOrStrongInstanceRef::Strong(_))) + } + } + + fn is_native_function_instance_ref_strong( + f: &NativeFunc, + ) -> Option + where + Args: WasmTypeList, + Rets: WasmTypeList, + { + // This is safe because we're calling it from a test to test the internals + unsafe { + f.get_vm_function() + .instance_ref + .as_ref() + .map(|v| matches!(v, WeakOrStrongInstanceRef::Strong(_))) + } + } + + #[test] + fn strong_weak_behavior_works_memory() -> Result<()> { + #[derive(Clone, Debug, WasmerEnv, Default)] + struct MemEnv { + #[wasmer(export)] + memory: LazyInit, + } + + let host_fn = |env: &MemEnv| unsafe { + let mem = env.memory_ref().unwrap(); + assert_eq!( + is_memory_instance_ref_strong(&mem.get_vm_memory()), + Some(false) + ); + let mem_clone = mem.clone(); + assert_eq!( + is_memory_instance_ref_strong(&mem_clone.get_vm_memory()), + Some(true) + ); + assert_eq!( + is_memory_instance_ref_strong(&mem.get_vm_memory()), + Some(false) + ); + }; + + let f: NativeFunc<(), ()> = { + let store = Store::default(); + let module = Module::new(&store, MEM_WAT)?; + let env = MemEnv::default(); + + let instance = Instance::new( + &module, + &imports! { + "env" => { + "host_fn" => Function::new_native_with_env(&store, env, host_fn) + } + }, + )?; + + { + if let Some(Export::Memory(mem)) = instance.lookup("memory") { + assert_eq!(is_memory_instance_ref_strong(&mem), Some(true)); + } else { + panic!("not a memory"); + } + } + + let f: NativeFunc<(), ()> = instance.get_native_function("call_host_fn").unwrap(); + f.call()?; + f + }; + f.call()?; + + Ok(()) + } + + #[test] + fn strong_weak_behavior_works_global() -> Result<()> { + #[derive(Clone, Debug, WasmerEnv, Default)] + struct GlobalEnv { + #[wasmer(export)] + global: LazyInit, + } + + let host_fn = |env: &GlobalEnv| unsafe { + let global = env.global_ref().unwrap(); + assert_eq!( + is_global_instance_ref_strong(&global.get_vm_global()), + Some(false) + ); + let global_clone = global.clone(); + assert_eq!( + is_global_instance_ref_strong(&global_clone.get_vm_global()), + Some(true) + ); + assert_eq!( + is_global_instance_ref_strong(&global.get_vm_global()), + Some(false) + ); + }; + + let f: NativeFunc<(), ()> = { + let store = Store::default(); + let module = Module::new(&store, GLOBAL_WAT)?; + let env = GlobalEnv::default(); + + let instance = Instance::new( + &module, + &imports! { + "env" => { + "host_fn" => Function::new_native_with_env(&store, env, host_fn) + } + }, + )?; + + { + if let Some(Export::Global(global)) = instance.lookup("global") { + assert_eq!(is_global_instance_ref_strong(&global), Some(true)); + } else { + panic!("not a global"); + } + } + + let f: NativeFunc<(), ()> = instance.get_native_function("call_host_fn").unwrap(); + f.call()?; + f + }; + f.call()?; + + Ok(()) + } + + #[test] + fn strong_weak_behavior_works_table() -> Result<()> { + #[derive(Clone, WasmerEnv, Default)] + struct TableEnv { + #[wasmer(export)] + table: LazyInit
, + } + + let host_fn = |env: &TableEnv| unsafe { + let table = env.table_ref().unwrap(); + assert_eq!( + is_table_instance_ref_strong(&table.get_vm_table()), + Some(false) + ); + let table_clone = table.clone(); + assert_eq!( + is_table_instance_ref_strong(&table_clone.get_vm_table()), + Some(true) + ); + assert_eq!( + is_table_instance_ref_strong(&table.get_vm_table()), + Some(false) + ); + }; + + let f: NativeFunc<(), ()> = { + let store = Store::default(); + let module = Module::new(&store, TABLE_WAT)?; + let env = TableEnv::default(); + + let instance = Instance::new( + &module, + &imports! { + "env" => { + "host_fn" => Function::new_native_with_env(&store, env, host_fn) + } + }, + )?; + + { + if let Some(Export::Table(table)) = instance.lookup("table") { + assert_eq!(is_table_instance_ref_strong(&table), Some(true)); + } else { + panic!("not a table"); + } + } + + let f: NativeFunc<(), ()> = instance.get_native_function("call_host_fn").unwrap(); + f.call()?; + f + }; + f.call()?; + + Ok(()) + } + + #[test] + fn strong_weak_behavior_works_function() -> Result<()> { + #[derive(Clone, WasmerEnv, Default)] + struct FunctionEnv { + #[wasmer(export)] + call_host_fn: LazyInit, + } + + let host_fn = |env: &FunctionEnv| { + let function = env.call_host_fn_ref().unwrap(); + assert_eq!(is_function_instance_ref_strong(&function), Some(false)); + let function_clone = function.clone(); + assert_eq!(is_function_instance_ref_strong(&function_clone), Some(true)); + assert_eq!(is_function_instance_ref_strong(&function), Some(false)); + }; + + let f: NativeFunc<(), ()> = { + let store = Store::default(); + let module = Module::new(&store, FUNCTION_WAT)?; + let env = FunctionEnv::default(); + + let instance = Instance::new( + &module, + &imports! { + "env" => { + "host_fn" => Function::new_native_with_env(&store, env, host_fn) + } + }, + )?; + + { + let function = instance.lookup_function("call_host_fn").unwrap(); + assert_eq!(is_function_instance_ref_strong(&function), Some(true)); + } + + let f: NativeFunc<(), ()> = instance.get_native_function("call_host_fn").unwrap(); + f.call()?; + f + }; + f.call()?; + + Ok(()) + } + + #[test] + fn strong_weak_behavior_works_native_function() -> Result<()> { + #[derive(Clone, WasmerEnv, Default)] + struct FunctionEnv { + #[wasmer(export)] + call_host_fn: LazyInit>, + } + + let host_fn = |env: &FunctionEnv| { + let function = env.call_host_fn_ref().unwrap(); + assert_eq!( + is_native_function_instance_ref_strong(&function), + Some(false) + ); + let function_clone = function.clone(); + assert_eq!( + is_native_function_instance_ref_strong(&function_clone), + Some(true) + ); + assert_eq!( + is_native_function_instance_ref_strong(&function), + Some(false) + ); + }; + + let f: NativeFunc<(), ()> = { + let store = Store::default(); + let module = Module::new(&store, FUNCTION_WAT)?; + let env = FunctionEnv::default(); + + let instance = Instance::new( + &module, + &imports! { + "env" => { + "host_fn" => Function::new_native_with_env(&store, env, host_fn) + } + }, + )?; + + { + let function: NativeFunc<(), ()> = + instance.get_native_function("call_host_fn").unwrap(); + assert_eq!( + is_native_function_instance_ref_strong(&function), + Some(true) + ); + } + + let f: NativeFunc<(), ()> = instance.get_native_function("call_host_fn").unwrap(); + f.call()?; + f + }; + f.call()?; + + Ok(()) + } +} diff --git a/lib/api/tests/sys_externals.rs b/lib/api/tests/sys_externals.rs new file mode 100644 index 0000000000..6b9bb53bce --- /dev/null +++ b/lib/api/tests/sys_externals.rs @@ -0,0 +1,466 @@ +#[cfg(feature = "sys")] +mod sys { + use anyhow::Result; + use wasmer::*; + + #[test] + fn global_new() -> Result<()> { + let store = Store::default(); + let global = Global::new(&store, Value::I32(10)); + assert_eq!( + *global.ty(), + GlobalType { + ty: Type::I32, + mutability: Mutability::Const + } + ); + + let global_mut = Global::new_mut(&store, Value::I32(10)); + assert_eq!( + *global_mut.ty(), + GlobalType { + ty: Type::I32, + mutability: Mutability::Var + } + ); + + Ok(()) + } + + #[test] + fn global_get() -> Result<()> { + let store = Store::default(); + let global_i32 = Global::new(&store, Value::I32(10)); + assert_eq!(global_i32.get(), Value::I32(10)); + let global_i64 = Global::new(&store, Value::I64(20)); + assert_eq!(global_i64.get(), Value::I64(20)); + let global_f32 = Global::new(&store, Value::F32(10.0)); + assert_eq!(global_f32.get(), Value::F32(10.0)); + let global_f64 = Global::new(&store, Value::F64(20.0)); + assert_eq!(global_f64.get(), Value::F64(20.0)); + + Ok(()) + } + + #[test] + fn global_set() -> Result<()> { + let store = Store::default(); + let global_i32 = Global::new(&store, Value::I32(10)); + // Set on a constant should error + assert!(global_i32.set(Value::I32(20)).is_err()); + + let global_i32_mut = Global::new_mut(&store, Value::I32(10)); + // Set on different type should error + assert!(global_i32_mut.set(Value::I64(20)).is_err()); + + // Set on same type should succeed + global_i32_mut.set(Value::I32(20))?; + assert_eq!(global_i32_mut.get(), Value::I32(20)); + + Ok(()) + } + + #[test] + fn table_new() -> Result<()> { + let store = Store::default(); + let table_type = TableType { + ty: Type::FuncRef, + minimum: 0, + maximum: None, + }; + let f = Function::new_native(&store, || {}); + let table = Table::new(&store, table_type, Value::FuncRef(Some(f)))?; + assert_eq!(*table.ty(), table_type); + + // Anyrefs not yet supported + // let table_type = TableType { + // ty: Type::ExternRef, + // minimum: 0, + // maximum: None, + // }; + // let table = Table::new(&store, table_type, Value::ExternRef(ExternRef::Null))?; + // assert_eq!(*table.ty(), table_type); + + Ok(()) + } + + #[test] + #[ignore] + fn table_get() -> Result<()> { + let store = Store::default(); + let table_type = TableType { + ty: Type::FuncRef, + minimum: 0, + maximum: Some(1), + }; + let f = Function::new_native(&store, |num: i32| num + 1); + let table = Table::new(&store, table_type, Value::FuncRef(Some(f.clone())))?; + assert_eq!(*table.ty(), table_type); + let _elem = table.get(0).unwrap(); + // assert_eq!(elem.funcref().unwrap(), f); + Ok(()) + } + + #[test] + #[ignore] + fn table_set() -> Result<()> { + // Table set not yet tested + Ok(()) + } + + #[test] + fn table_grow() -> Result<()> { + let store = Store::default(); + let table_type = TableType { + ty: Type::FuncRef, + minimum: 0, + maximum: Some(10), + }; + let f = Function::new_native(&store, |num: i32| num + 1); + let table = Table::new(&store, table_type, Value::FuncRef(Some(f.clone())))?; + // Growing to a bigger maximum should return None + let old_len = table.grow(12, Value::FuncRef(Some(f.clone()))); + assert!(old_len.is_err()); + + // Growing to a bigger maximum should return None + let old_len = table.grow(5, Value::FuncRef(Some(f.clone())))?; + assert_eq!(old_len, 0); + + Ok(()) + } + + #[test] + #[ignore] + fn table_copy() -> Result<()> { + // TODO: table copy test not yet implemented + Ok(()) + } + + #[test] + fn memory_new() -> Result<()> { + let store = Store::default(); + let memory_type = MemoryType { + shared: false, + minimum: Pages(0), + maximum: Some(Pages(10)), + }; + let memory = Memory::new(&store, memory_type)?; + assert_eq!(memory.size(), Pages(0)); + assert_eq!(memory.ty(), memory_type); + Ok(()) + } + + #[test] + fn memory_grow() -> Result<()> { + let store = Store::default(); + + let desc = MemoryType::new(Pages(10), Some(Pages(16)), false); + let memory = Memory::new(&store, desc)?; + assert_eq!(memory.size(), Pages(10)); + + let result = memory.grow(Pages(2)).unwrap(); + assert_eq!(result, Pages(10)); + assert_eq!(memory.size(), Pages(12)); + + let result = memory.grow(Pages(10)); + assert_eq!( + result, + Err(MemoryError::CouldNotGrow { + current: 12.into(), + attempted_delta: 10.into() + }) + ); + + let bad_desc = MemoryType::new(Pages(15), Some(Pages(10)), false); + let bad_result = Memory::new(&store, bad_desc); + + assert!(matches!(bad_result, Err(MemoryError::InvalidMemory { .. }))); + + Ok(()) + } + + #[test] + fn function_new() -> Result<()> { + let store = Store::default(); + let function = Function::new_native(&store, || {}); + assert_eq!(function.ty().clone(), FunctionType::new(vec![], vec![])); + let function = Function::new_native(&store, |_a: i32| {}); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![Type::I32], vec![]) + ); + let function = Function::new_native(&store, |_a: i32, _b: i64, _c: f32, _d: f64| {}); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![Type::I32, Type::I64, Type::F32, Type::F64], vec![]) + ); + let function = Function::new_native(&store, || -> i32 { 1 }); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![], vec![Type::I32]) + ); + let function = + Function::new_native(&store, || -> (i32, i64, f32, f64) { (1, 2, 3.0, 4.0) }); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![], vec![Type::I32, Type::I64, Type::F32, Type::F64]) + ); + Ok(()) + } + + #[test] + fn function_new_env() -> Result<()> { + let store = Store::default(); + #[derive(Clone, WasmerEnv)] + struct MyEnv {} + + let my_env = MyEnv {}; + let function = Function::new_native_with_env(&store, my_env.clone(), |_env: &MyEnv| {}); + assert_eq!(function.ty().clone(), FunctionType::new(vec![], vec![])); + let function = + Function::new_native_with_env(&store, my_env.clone(), |_env: &MyEnv, _a: i32| {}); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![Type::I32], vec![]) + ); + let function = Function::new_native_with_env( + &store, + my_env.clone(), + |_env: &MyEnv, _a: i32, _b: i64, _c: f32, _d: f64| {}, + ); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![Type::I32, Type::I64, Type::F32, Type::F64], vec![]) + ); + let function = + Function::new_native_with_env(&store, my_env.clone(), |_env: &MyEnv| -> i32 { 1 }); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![], vec![Type::I32]) + ); + let function = Function::new_native_with_env( + &store, + my_env.clone(), + |_env: &MyEnv| -> (i32, i64, f32, f64) { (1, 2, 3.0, 4.0) }, + ); + assert_eq!( + function.ty().clone(), + FunctionType::new(vec![], vec![Type::I32, Type::I64, Type::F32, Type::F64]) + ); + Ok(()) + } + + #[test] + fn function_new_dynamic() -> Result<()> { + let store = Store::default(); + + // Using &FunctionType signature + let function_type = FunctionType::new(vec![], vec![]); + let function = Function::new(&store, &function_type, |_values: &[Value]| unimplemented!()); + assert_eq!(function.ty().clone(), function_type); + let function_type = FunctionType::new(vec![Type::I32], vec![]); + let function = Function::new(&store, &function_type, |_values: &[Value]| unimplemented!()); + assert_eq!(function.ty().clone(), function_type); + let function_type = + FunctionType::new(vec![Type::I32, Type::I64, Type::F32, Type::F64], vec![]); + let function = Function::new(&store, &function_type, |_values: &[Value]| unimplemented!()); + assert_eq!(function.ty().clone(), function_type); + let function_type = FunctionType::new(vec![], vec![Type::I32]); + let function = Function::new(&store, &function_type, |_values: &[Value]| unimplemented!()); + assert_eq!(function.ty().clone(), function_type); + let function_type = + FunctionType::new(vec![], vec![Type::I32, Type::I64, Type::F32, Type::F64]); + let function = Function::new(&store, &function_type, |_values: &[Value]| unimplemented!()); + assert_eq!(function.ty().clone(), function_type); + + // Using array signature + let function_type = ([Type::V128], [Type::I32, Type::F32, Type::F64]); + let function = Function::new(&store, function_type, |_values: &[Value]| unimplemented!()); + assert_eq!(function.ty().params(), [Type::V128]); + assert_eq!(function.ty().results(), [Type::I32, Type::F32, Type::F64]); + + Ok(()) + } + + #[test] + fn function_new_dynamic_env() -> Result<()> { + let store = Store::default(); + #[derive(Clone, WasmerEnv)] + struct MyEnv {} + let my_env = MyEnv {}; + + // Using &FunctionType signature + let function_type = FunctionType::new(vec![], vec![]); + let function = Function::new_with_env( + &store, + &function_type, + my_env.clone(), + |_env: &MyEnv, _values: &[Value]| unimplemented!(), + ); + assert_eq!(function.ty().clone(), function_type); + let function_type = FunctionType::new(vec![Type::I32], vec![]); + let function = Function::new_with_env( + &store, + &function_type, + my_env.clone(), + |_env: &MyEnv, _values: &[Value]| unimplemented!(), + ); + assert_eq!(function.ty().clone(), function_type); + let function_type = + FunctionType::new(vec![Type::I32, Type::I64, Type::F32, Type::F64], vec![]); + let function = Function::new_with_env( + &store, + &function_type, + my_env.clone(), + |_env: &MyEnv, _values: &[Value]| unimplemented!(), + ); + assert_eq!(function.ty().clone(), function_type); + let function_type = FunctionType::new(vec![], vec![Type::I32]); + let function = Function::new_with_env( + &store, + &function_type, + my_env.clone(), + |_env: &MyEnv, _values: &[Value]| unimplemented!(), + ); + assert_eq!(function.ty().clone(), function_type); + let function_type = + FunctionType::new(vec![], vec![Type::I32, Type::I64, Type::F32, Type::F64]); + let function = Function::new_with_env( + &store, + &function_type, + my_env.clone(), + |_env: &MyEnv, _values: &[Value]| unimplemented!(), + ); + assert_eq!(function.ty().clone(), function_type); + + // Using array signature + let function_type = ([Type::V128], [Type::I32, Type::F32, Type::F64]); + let function = Function::new_with_env( + &store, + function_type, + my_env.clone(), + |_env: &MyEnv, _values: &[Value]| unimplemented!(), + ); + assert_eq!(function.ty().params(), [Type::V128]); + assert_eq!(function.ty().results(), [Type::I32, Type::F32, Type::F64]); + + Ok(()) + } + + #[test] + fn native_function_works() -> Result<()> { + let store = Store::default(); + let function = Function::new_native(&store, || {}); + let native_function: NativeFunc<(), ()> = function.native().unwrap(); + let result = native_function.call(); + assert!(result.is_ok()); + + let function = Function::new_native(&store, |a: i32| -> i32 { a + 1 }); + let native_function: NativeFunc = function.native().unwrap(); + assert_eq!(native_function.call(3).unwrap(), 4); + + fn rust_abi(a: i32, b: i64, c: f32, d: f64) -> u64 { + (a as u64 * 1000) + (b as u64 * 100) + (c as u64 * 10) + (d as u64) + } + let function = Function::new_native(&store, rust_abi); + let native_function: NativeFunc<(i32, i64, f32, f64), u64> = function.native().unwrap(); + assert_eq!(native_function.call(8, 4, 1.5, 5.).unwrap(), 8415); + + let function = Function::new_native(&store, || -> i32 { 1 }); + let native_function: NativeFunc<(), i32> = function.native().unwrap(); + assert_eq!(native_function.call().unwrap(), 1); + + let function = Function::new_native(&store, |_a: i32| {}); + let native_function: NativeFunc = function.native().unwrap(); + assert!(native_function.call(4).is_ok()); + + let function = + Function::new_native(&store, || -> (i32, i64, f32, f64) { (1, 2, 3.0, 4.0) }); + let native_function: NativeFunc<(), (i32, i64, f32, f64)> = function.native().unwrap(); + assert_eq!(native_function.call().unwrap(), (1, 2, 3.0, 4.0)); + + Ok(()) + } + + #[test] + fn function_outlives_instance() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (type $sum_t (func (param i32 i32) (result i32))) + (func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) + local.get $x + local.get $y + i32.add) + (export "sum" (func $sum_f))) +"#; + + let f = { + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + let f: NativeFunc<(i32, i32), i32> = instance.get_native_function("sum").unwrap(); + + assert_eq!(f.call(4, 5)?, 9); + f + }; + + assert_eq!(f.call(4, 5)?, 9); + + Ok(()) + } + + #[test] + fn weak_instance_ref_externs_after_instance() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (memory (export "mem") 1) + (type $sum_t (func (param i32 i32) (result i32))) + (func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) + local.get $x + local.get $y + i32.add) + (export "sum" (func $sum_f))) +"#; + + let f = { + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + let f: NativeFunc<(i32, i32), i32> = instance.get_native_function("sum").unwrap(); + assert_eq!(f.call(4, 5)?, 9); + f + }; + + assert_eq!(f.call(4, 5)?, 9); + + Ok(()) + } + + #[test] + fn manually_generate_wasmer_env() -> Result<()> { + let store = Store::default(); + #[derive(WasmerEnv, Clone)] + struct MyEnv { + val: u32, + memory: LazyInit, + } + + fn host_function(env: &mut MyEnv, arg1: u32, arg2: u32) -> u32 { + env.val + arg1 + arg2 + } + + let mut env = MyEnv { + val: 5, + memory: LazyInit::new(), + }; + + let result = host_function(&mut env, 7, 9); + assert_eq!(result, 21); + + let memory = Memory::new(&store, MemoryType::new(0, None, false))?; + env.memory.initialize(memory); + + let result = host_function(&mut env, 1, 2); + assert_eq!(result, 8); + + Ok(()) + } +} diff --git a/lib/api/tests/sys_instance.rs b/lib/api/tests/sys_instance.rs new file mode 100644 index 0000000000..7a94b7dffb --- /dev/null +++ b/lib/api/tests/sys_instance.rs @@ -0,0 +1,70 @@ +#[cfg(feature = "sys")] +mod sys { + use anyhow::Result; + use wasmer::*; + + #[test] + fn exports_work_after_multiple_instances_have_been_freed() -> Result<()> { + let store = Store::default(); + let module = Module::new( + &store, + " + (module + (type $sum_t (func (param i32 i32) (result i32))) + (func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) + local.get $x + local.get $y + i32.add) + (export \"sum\" (func $sum_f))) +", + )?; + + let import_object = ImportObject::new(); + let instance = Instance::new(&module, &import_object)?; + let instance2 = instance.clone(); + let instance3 = instance.clone(); + + // The function is cloned to “break” the connection with `instance`. + let sum = instance.lookup_function("sum").unwrap().clone(); + + drop(instance); + drop(instance2); + drop(instance3); + + // All instances have been dropped, but `sum` continues to work! + assert_eq!( + sum.call(&[Value::I32(1), Value::I32(2)])?.into_vec(), + vec![Value::I32(3)], + ); + + Ok(()) + } + + #[test] + fn unit_native_function_env() -> Result<()> { + let store = Store::default(); + #[derive(WasmerEnv, Clone)] + struct Env { + multiplier: u32, + } + + fn imported_fn(env: &Env, args: &[Val]) -> Result, RuntimeError> { + let value = env.multiplier * args[0].unwrap_i32() as u32; + return Ok(vec![Val::I32(value as _)]); + } + + let imported_signature = FunctionType::new(vec![Type::I32], vec![Type::I32]); + let imported = Function::new_with_env( + &store, + imported_signature, + Env { multiplier: 3 }, + imported_fn, + ); + + let expected = vec![Val::I32(12)].into_boxed_slice(); + let result = imported.call(&[Val::I32(4)])?; + assert_eq!(result, expected); + + Ok(()) + } +} diff --git a/lib/api/tests/sys_module.rs b/lib/api/tests/sys_module.rs new file mode 100644 index 0000000000..a34a0f1d11 --- /dev/null +++ b/lib/api/tests/sys_module.rs @@ -0,0 +1,95 @@ +#[cfg(feature = "sys")] +mod sys { + use anyhow::Result; + use wasmer::*; + + #[test] + fn calling_host_functions_with_negative_values_works() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (import "host" "host_func1" (func (param i64))) + (import "host" "host_func2" (func (param i32))) + (import "host" "host_func3" (func (param i64))) + (import "host" "host_func4" (func (param i32))) + (import "host" "host_func5" (func (param i32))) + (import "host" "host_func6" (func (param i32))) + (import "host" "host_func7" (func (param i32))) + (import "host" "host_func8" (func (param i32))) + + (func (export "call_host_func1") + (call 0 (i64.const -1))) + (func (export "call_host_func2") + (call 1 (i32.const -1))) + (func (export "call_host_func3") + (call 2 (i64.const -1))) + (func (export "call_host_func4") + (call 3 (i32.const -1))) + (func (export "call_host_func5") + (call 4 (i32.const -1))) + (func (export "call_host_func6") + (call 5 (i32.const -1))) + (func (export "call_host_func7") + (call 6 (i32.const -1))) + (func (export "call_host_func8") + (call 7 (i32.const -1))) +)"#; + let module = Module::new(&store, wat)?; + let imports = imports! { + "host" => { + "host_func1" => Function::new_native(&store, |p: u64| { + println!("host_func1: Found number {}", p); + assert_eq!(p, u64::max_value()); + }), + "host_func2" => Function::new_native(&store, |p: u32| { + println!("host_func2: Found number {}", p); + assert_eq!(p, u32::max_value()); + }), + "host_func3" => Function::new_native(&store, |p: i64| { + println!("host_func3: Found number {}", p); + assert_eq!(p, -1); + }), + "host_func4" => Function::new_native(&store, |p: i32| { + println!("host_func4: Found number {}", p); + assert_eq!(p, -1); + }), + "host_func5" => Function::new_native(&store, |p: i16| { + println!("host_func5: Found number {}", p); + assert_eq!(p, -1); + }), + "host_func6" => Function::new_native(&store, |p: u16| { + println!("host_func6: Found number {}", p); + assert_eq!(p, u16::max_value()); + }), + "host_func7" => Function::new_native(&store, |p: i8| { + println!("host_func7: Found number {}", p); + assert_eq!(p, -1); + }), + "host_func8" => Function::new_native(&store, |p: u8| { + println!("host_func8: Found number {}", p); + assert_eq!(p, u8::max_value()); + }), + } + }; + let instance = Instance::new(&module, &imports)?; + + let f1: NativeFunc<(), ()> = instance.get_native_function("call_host_func1")?; + let f2: NativeFunc<(), ()> = instance.get_native_function("call_host_func2")?; + let f3: NativeFunc<(), ()> = instance.get_native_function("call_host_func3")?; + let f4: NativeFunc<(), ()> = instance.get_native_function("call_host_func4")?; + let f5: NativeFunc<(), ()> = instance.get_native_function("call_host_func5")?; + let f6: NativeFunc<(), ()> = instance.get_native_function("call_host_func6")?; + let f7: NativeFunc<(), ()> = instance.get_native_function("call_host_func7")?; + let f8: NativeFunc<(), ()> = instance.get_native_function("call_host_func8")?; + + f1.call()?; + f2.call()?; + f3.call()?; + f4.call()?; + f5.call()?; + f6.call()?; + f7.call()?; + f8.call()?; + + Ok(()) + } +} diff --git a/lib/api/tests/sys_reference_types.rs b/lib/api/tests/sys_reference_types.rs new file mode 100644 index 0000000000..f1cfb1a980 --- /dev/null +++ b/lib/api/tests/sys_reference_types.rs @@ -0,0 +1,565 @@ +#[cfg(feature = "sys")] +mod sys { + use anyhow::Result; + use std::collections::HashMap; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Arc; + use wasmer::*; + + #[test] + #[cfg_attr(feature = "singlepass", ignore)] // singlepass does not support funcref args. + fn func_ref_passed_and_returned() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (import "env" "func_ref_identity" (func (param funcref) (result funcref))) + (type $ret_i32_ty (func (result i32))) + (table $table (export "table") 2 2 funcref) + + (func (export "run") (param) (result funcref) + (call 0 (ref.null func))) + (func (export "call_set_value") (param $fr funcref) (result i32) + (table.set $table (i32.const 0) (local.get $fr)) + (call_indirect $table (type $ret_i32_ty) (i32.const 0))) +)"#; + let module = Module::new(&store, wat)?; + let func_ref_identity = Function::new( + &store, + FunctionType::new(vec![Type::FuncRef], vec![Type::FuncRef]), + |values| -> Result, _> { Ok(vec![values[0].clone()]) }, + ); + let imports = imports! { + "env" => { + "func_ref_identity" => func_ref_identity + }, + }; + + let instance = Instance::new(&module, &imports)?; + + let f: Function = instance.lookup_function("run").unwrap(); + let results = f.call(&[]).unwrap(); + if let Value::FuncRef(fr) = &results[0] { + assert!(fr.is_none()); + } else { + panic!("funcref not found!"); + } + + #[derive(Clone, Debug, WasmerEnv)] + pub struct Env(Arc); + let env = Env(Arc::new(AtomicBool::new(false))); + + let func_to_call = Function::new_native_with_env(&store, env.clone(), |env: &Env| -> i32 { + env.0.store(true, Ordering::SeqCst); + 343 + }); + let call_set_value: Function = instance.lookup_function("call_set_value").unwrap(); + let results: Box<[Value]> = call_set_value.call(&[Value::FuncRef(Some(func_to_call))])?; + assert!(env.0.load(Ordering::SeqCst)); + assert_eq!(&*results, &[Value::I32(343)]); + + Ok(()) + } + + #[test] + #[cfg_attr(feature = "singlepass", ignore)] // singlepass does not support funcref args. + fn func_ref_passed_and_called() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (func $func_ref_call (import "env" "func_ref_call") (param funcref) (result i32)) + (type $ret_i32_ty (func (result i32))) + (table $table (export "table") 2 2 funcref) + + (func $product (param $x i32) (param $y i32) (result i32) + (i32.mul (local.get $x) (local.get $y))) + ;; TODO: figure out exactly why this statement is needed + (elem declare func $product) + (func (export "call_set_value") (param $fr funcref) (result i32) + (table.set $table (i32.const 0) (local.get $fr)) + (call_indirect $table (type $ret_i32_ty) (i32.const 0))) + (func (export "call_func") (param $fr funcref) (result i32) + (call $func_ref_call (local.get $fr))) + (func (export "call_host_func_with_wasm_func") (result i32) + (call $func_ref_call (ref.func $product))) +)"#; + let module = Module::new(&store, wat)?; + + fn func_ref_call(values: &[Value]) -> Result, RuntimeError> { + // TODO: look into `Box<[Value]>` being returned breakage + let f = values[0].unwrap_funcref().as_ref().unwrap(); + let f: NativeFunc<(i32, i32), i32> = f.native()?; + Ok(vec![Value::I32(f.call(7, 9)?)]) + } + + let func_ref_call = Function::new( + &store, + FunctionType::new(vec![Type::FuncRef], vec![Type::I32]), + func_ref_call, + ); + let imports = imports! { + "env" => { + "func_ref_call" => func_ref_call, + // TODO(reftypes): this should work + /* + "func_ref_call_native" => Function::new_native(&store, |f: Function| -> Result { + let f: NativeFunc::<(i32, i32), i32> = f.native()?; + f.call(7, 9) + }) + */ + }, + }; + + let instance = Instance::new(&module, &imports)?; + { + fn sum(a: i32, b: i32) -> i32 { + a + b + } + let sum_func = Function::new_native(&store, sum); + + let call_func: Function = instance.lookup_function("call_func").unwrap(); + let result = call_func.call(&[Value::FuncRef(Some(sum_func))])?; + assert_eq!(result[0].unwrap_i32(), 16); + } + + { + let f: NativeFunc<(), i32> = instance + .get_native_function("call_host_func_with_wasm_func") + .unwrap(); + let result = f.call()?; + assert_eq!(result, 63); + } + + Ok(()) + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + #[test] + #[cfg_attr(feature = "singlepass", ignore)] // singlepass does not support funcref args. + fn extern_ref_passed_and_returned() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (func $extern_ref_identity (import "env" "extern_ref_identity") (param externref) (result externref)) + (func $extern_ref_identity_native (import "env" "extern_ref_identity_native") (param externref) (result externref)) + (func $get_new_extern_ref (import "env" "get_new_extern_ref") (result externref)) + (func $get_new_extern_ref_native (import "env" "get_new_extern_ref_native") (result externref)) + + (func (export "run") (param) (result externref) + (call $extern_ref_identity (ref.null extern))) + (func (export "run_native") (param) (result externref) + (call $extern_ref_identity_native (ref.null extern))) + (func (export "get_hashmap") (param) (result externref) + (call $get_new_extern_ref)) + (func (export "get_hashmap_native") (param) (result externref) + (call $get_new_extern_ref_native)) +)"#; + let module = Module::new(&store, wat)?; + let extern_ref_identity = Function::new( + &store, + FunctionType::new(vec![Type::ExternRef], vec![Type::ExternRef]), + |values| -> Result, _> { Ok(vec![values[0].clone()]) }, + ); + let extern_ref_identity_native = + Function::new_native(&store, |er: ExternRef| -> ExternRef { er }); + let get_new_extern_ref = Function::new( + &store, + FunctionType::new(vec![], vec![Type::ExternRef]), + |_| -> Result, _> { + let inner = [ + ("hello".to_string(), "world".to_string()), + ("color".to_string(), "orange".to_string()), + ] + .iter() + .cloned() + .collect::>(); + let new_extern_ref = ExternRef::new(inner); + Ok(vec![Value::ExternRef(new_extern_ref)]) + }, + ); + let get_new_extern_ref_native = Function::new_native(&store, || -> ExternRef { + let inner = [ + ("hello".to_string(), "world".to_string()), + ("color".to_string(), "orange".to_string()), + ] + .iter() + .cloned() + .collect::>(); + ExternRef::new(inner) + }); + let imports = imports! { + "env" => { + "extern_ref_identity" => extern_ref_identity, + "extern_ref_identity_native" => extern_ref_identity_native, + "get_new_extern_ref" => get_new_extern_ref, + "get_new_extern_ref_native" => get_new_extern_ref_native, + }, + }; + + let instance = Instance::new(&module, &imports)?; + for run in &["run", "run_native"] { + let f: Function = instance.lookup_function(run).unwrap(); + let results = f.call(&[]).unwrap(); + if let Value::ExternRef(er) = &results[0] { + assert!(er.is_null()); + } else { + panic!("result is not an extern ref!"); + } + + let f: NativeFunc<(), ExternRef> = instance.get_native_function(run).unwrap(); + let result: ExternRef = f.call()?; + assert!(result.is_null()); + } + + for get_hashmap in &["get_hashmap", "get_hashmap_native"] { + let f: Function = instance.lookup_function(get_hashmap).unwrap(); + let results = f.call(&[]).unwrap(); + if let Value::ExternRef(er) = &results[0] { + let inner: &HashMap = er.downcast().unwrap(); + assert_eq!(inner["hello"], "world"); + assert_eq!(inner["color"], "orange"); + } else { + panic!("result is not an extern ref!"); + } + + let f: NativeFunc<(), ExternRef> = instance.get_native_function(get_hashmap).unwrap(); + + let result: ExternRef = f.call()?; + let inner: &HashMap = result.downcast().unwrap(); + assert_eq!(inner["hello"], "world"); + assert_eq!(inner["color"], "orange"); + } + + Ok(()) + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + #[test] + // TODO(reftypes): reenable this test + #[ignore] + fn extern_ref_ref_counting_basic() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (func (export "drop") (param $er externref) (result) + (drop (local.get $er))) +)"#; + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + let f: NativeFunc = instance.get_native_function("drop").unwrap(); + + let er = ExternRef::new(3u32); + f.call(er.clone())?; + + assert_eq!(er.downcast::().unwrap(), &3); + assert_eq!(er.strong_count(), 1); + + Ok(()) + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + #[test] + fn refs_in_globals() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (global $er_global (export "er_global") (mut externref) (ref.null extern)) + (global $fr_global (export "fr_global") (mut funcref) (ref.null func)) + (global $fr_immutable_global (export "fr_immutable_global") funcref (ref.func $hello)) + (func $hello (param) (result i32) + (i32.const 73)) +)"#; + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + { + let er_global = if let Some(Export::Global(g)) = instance.lookup("er_global") { + g.from + } else { + panic!("no global"); + }; + + if let Value::ExternRef(er) = er_global.get(&store) { + assert!(er.is_null()); + } else { + panic!("Did not find extern ref in the global"); + } + + unsafe { + er_global.set(Val::ExternRef(ExternRef::new(3u32)))?; + } + + if let Value::ExternRef(er) = er_global.get(&store) { + assert_eq!(er.downcast::().unwrap(), &3); + assert_eq!(er.strong_count(), 1); + } else { + panic!("Did not find extern ref in the global"); + } + } + + { + let fr_global = if let Some(Export::Global(g)) = instance.lookup("fr_immutable_global") + { + g.from + } else { + panic!("no global"); + }; + + if let Value::FuncRef(Some(f)) = fr_global.get(&store) { + let native_func: NativeFunc<(), u32> = f.native()?; + assert_eq!(native_func.call()?, 73); + } else { + panic!("Did not find non-null func ref in the global"); + } + } + + { + let fr_global = if let Some(Export::Global(g)) = instance.lookup("fr_global") { + g.from + } else { + panic!("no global"); + }; + + if let Value::FuncRef(None) = fr_global.get(&store) { + } else { + panic!("Did not find a null func ref in the global"); + } + + let f = Function::new_native(&store, |arg1: i32, arg2: i32| -> i32 { arg1 + arg2 }); + + unsafe { + fr_global.set(Val::FuncRef(Some(f)))?; + } + + if let Value::FuncRef(Some(f)) = fr_global.get(&store) { + let native: NativeFunc<(i32, i32), i32> = f.native()?; + assert_eq!(native.call(5, 7)?, 12); + } else { + panic!("Did not find extern ref in the global"); + } + } + + Ok(()) + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + #[test] + #[cfg_attr(feature = "singlepass", ignore)] // singlepass does not support funcref args. + fn extern_ref_ref_counting_table_basic() -> Result<()> { + use wasmer_vm::TableElement; + + let store = Store::default(); + let wat = r#"(module + (global $global (export "global") (mut externref) (ref.null extern)) + (table $table (export "table") 4 4 externref) + (func $insert (param $er externref) (param $idx i32) + (table.set $table (local.get $idx) (local.get $er))) + (func $intermediate (param $er externref) (param $idx i32) + (call $insert (local.get $er) (local.get $idx))) + (func $insert_into_table (export "insert_into_table") (param $er externref) (param $idx i32) (result externref) + (call $intermediate (local.get $er) (local.get $idx)) + (local.get $er)) +)"#; + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + + let f: NativeFunc<(ExternRef, i32), ExternRef> = + instance.get_native_function("insert_into_table").unwrap(); + + let er = ExternRef::new(3usize); + + let er = f.call(er, 1)?; + assert_eq!(er.strong_count(), 2); + + let table = if let Some(Export::Table(t)) = instance.lookup("table") { + t.from + } else { + panic!("no table"); + }; + + if let TableElement::ExternRef(er2) = table.get(1).unwrap() { + assert_eq!(er2.strong_count(), 3); + } + + assert_eq!(er.strong_count(), 2); + table + .set(1, TableElement::ExternRef(ExternRef::null())) + .unwrap(); + + assert_eq!(er.strong_count(), 1); + + Ok(()) + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + #[test] + // TODO(reftypes): reenable this test + #[ignore] + fn extern_ref_ref_counting_global_basic() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (global $global (export "global") (mut externref) (ref.null extern)) + (func $get_from_global (export "get_from_global") (result externref) + (drop (global.get $global)) + (global.get $global)) +)"#; + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + + let global = if let Some(Export::Global(g)) = instance.lookup("global") { + g.from + } else { + panic!("not a global"); + }; + { + let er = ExternRef::new(3usize); + unsafe { + global.set(Val::ExternRef(er.clone()))?; + } + assert_eq!(er.strong_count(), 2); + } + let get_from_global: NativeFunc<(), ExternRef> = + instance.get_native_function("get_from_global").unwrap(); + + let er = get_from_global.call()?; + assert_eq!(er.strong_count(), 2); + unsafe { + global.set(Val::ExternRef(ExternRef::null()))?; + } + assert_eq!(er.strong_count(), 1); + + Ok(()) + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + #[test] + // TODO(reftypes): reenable this test + #[ignore] + fn extern_ref_ref_counting_traps() -> Result<()> { + let store = Store::default(); + let wat = r#"(module + (func $pass_er (export "pass_extern_ref") (param externref) + (local.get 0) + (unreachable)) +)"#; + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + + let pass_extern_ref: NativeFunc = + instance.get_native_function("pass_extern_ref").unwrap(); + + let er = ExternRef::new(3usize); + assert_eq!(er.strong_count(), 1); + + let result = pass_extern_ref.call(er.clone()); + assert!(result.is_err()); + assert_eq!(er.strong_count(), 1); + + Ok(()) + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + #[test] + #[cfg_attr(feature = "singlepass", ignore)] // singlepass does not support funcref args. + fn extern_ref_ref_counting_table_instructions() -> Result<()> { + use wasmer_vm::TableElement; + + let store = Store::default(); + let wat = r#"(module + (table $table1 (export "table1") 2 12 externref) + (table $table2 (export "table2") 6 12 externref) + (func $grow_table_with_ref (export "grow_table_with_ref") (param $er externref) (param $size i32) (result i32) + (table.grow $table1 (local.get $er) (local.get $size))) + (func $fill_table_with_ref (export "fill_table_with_ref") (param $er externref) (param $start i32) (param $end i32) + (table.fill $table1 (local.get $start) (local.get $er) (local.get $end))) + (func $copy_into_table2 (export "copy_into_table2") + (table.copy $table2 $table1 (i32.const 0) (i32.const 0) (i32.const 4))) +)"#; + let module = Module::new(&store, wat)?; + let instance = Instance::new(&module, &imports! {})?; + + let grow_table_with_ref: NativeFunc<(ExternRef, i32), i32> = + instance.get_native_function("grow_table_with_ref").unwrap(); + let fill_table_with_ref: NativeFunc<(ExternRef, i32, i32), ()> = + instance.get_native_function("fill_table_with_ref").unwrap(); + let copy_into_table2: NativeFunc<(), ()> = + instance.get_native_function("copy_into_table2").unwrap(); + let (table1, table2) = if let (Some(Export::Table(t1)), Some(Export::Table(t2))) = + (instance.lookup("table1"), instance.lookup("table2")) + { + (t1.from, t2.from) + } else { + panic!("can't get tables"); + }; + + let er1 = ExternRef::new(3usize); + let er2 = ExternRef::new(5usize); + let er3 = ExternRef::new(7usize); + { + let result = grow_table_with_ref.call(er1.clone(), 0)?; + assert_eq!(result, 2); + assert_eq!(er1.strong_count(), 1); + + let result = grow_table_with_ref.call(er1.clone(), 10_000)?; + assert_eq!(result, -1); + assert_eq!(er1.strong_count(), 1); + + let result = grow_table_with_ref.call(er1.clone(), 8)?; + assert_eq!(result, 2); + assert_eq!(er1.strong_count(), 9); + + for i in 2..10 { + if let TableElement::ExternRef(e) = table1.get(i).unwrap() { + assert_eq!(*e.downcast::().unwrap(), 3); + assert_eq!(&e, &er1); + } + } + assert_eq!(er1.strong_count(), 9); + } + + { + fill_table_with_ref.call(er2.clone(), 0, 2)?; + assert_eq!(er2.strong_count(), 3); + } + + { + table2.set(0, TableElement::ExternRef(er3.clone())).unwrap(); + table2.set(1, TableElement::ExternRef(er3.clone())).unwrap(); + table2.set(2, TableElement::ExternRef(er3.clone())).unwrap(); + table2.set(3, TableElement::ExternRef(er3.clone())).unwrap(); + table2.set(4, TableElement::ExternRef(er3.clone())).unwrap(); + assert_eq!(er3.strong_count(), 6); + } + + { + copy_into_table2.call()?; + assert_eq!(er3.strong_count(), 2); + assert_eq!(er2.strong_count(), 5); + assert_eq!(er1.strong_count(), 11); + for i in 1..5 { + if let TableElement::ExternRef(e) = table2.get(i).unwrap() { + let value = e.downcast::().unwrap(); + match i { + 0 | 1 => assert_eq!(*value, 5), + 4 => assert_eq!(*value, 7), + _ => assert_eq!(*value, 3), + } + } else { + panic!("not extern ref"); + } + } + } + + { + for i in 0..table1.size() { + table1 + .set(i, TableElement::ExternRef(ExternRef::null())) + .unwrap(); + } + for i in 0..table2.size() { + table2 + .set(i, TableElement::ExternRef(ExternRef::null())) + .unwrap(); + } + } + + assert_eq!(er1.strong_count(), 1); + assert_eq!(er2.strong_count(), 1); + assert_eq!(er3.strong_count(), 1); + + Ok(()) + } +} diff --git a/lib/cli/Cargo.toml b/lib/cli/Cargo.toml new file mode 100644 index 0000000000..4b6a707285 --- /dev/null +++ b/lib/cli/Cargo.toml @@ -0,0 +1,88 @@ +[package] +name = "wasmer-cli" +version = "2.1.0" +description = "Wasmer CLI" +categories = ["wasm", "command-line-interface"] +keywords = ["wasm", "webassembly", "cli"] +authors = ["Wasmer Engineering Team "] +repository = "https://github.com/wasmerio/wasmer" +license = "MIT" +readme = "README.md" +edition = "2018" +default-run = "wasmer" +build = "build.rs" + +[[bin]] +name = "wasmer" +path = "src/bin/wasmer.rs" +doc = false + +[[bin]] +name = "wasmer-headless" +path = "src/bin/wasmer_headless.rs" +doc = false +required-features = ["headless"] + +[dependencies] +wasmer = { version = "2.0.3", path = "../api", package = "wasmer-near", default-features = false } +wasmer-compiler = { version = "2.0.3", path = "../compiler", package = "wasmer-compiler-near" } +wasmer-compiler-cranelift = { version = "2.0.0", path = "../compiler-cranelift", optional = true } +wasmer-compiler-singlepass = { version = "2.0.3", path = "../compiler-singlepass", package = "wasmer-compiler-singlepass-near", optional = true } +wasmer-compiler-llvm = { version = "2.0.0", path = "../compiler-llvm", optional = true } +wasmer-engine = { version = "2.0.3", path = "../engine", package = "wasmer-engine-near" } +wasmer-engine-universal = { version = "2.0.3", path = "../engine-universal", package = "wasmer-engine-universal-near", optional = true } +wasmer-vm = { version = "2.0.3", path = "../vm", package = "wasmer-vm-near" } +wasmer-wast = { version = "2.0.0", path = "../../tests/lib/wast", optional = true } +wasmer-types = { version = "2.0.3", path = "../types", package = "wasmer-types-near" } +atty = "0.2" +colored = "2.0" +anyhow = "1.0" +structopt = { version = "0.3", features = ["suggestions"] } +# For the function names autosuggestion +distance = "0.4" +# For the inspect subcommand +bytesize = "1.0" +cfg-if = "1.0" +# For debug feature +fern = { version = "0.6", features = ["colored"], optional = true } +log = { version = "0.4", optional = true } +tempfile = "3" + +[features] +# Don't add the compiler features in default, please add them on the Makefile +# since we might want to autoconfigure them depending on the availability on the host. +default = [ + "wat", + "wast", + "universal", +] +engine = [] +universal = [ + "wasmer-engine-universal", + "engine", +] +wast = ["wasmer-wast"] +wat = ["wasmer/wat"] +compiler = [ + "wasmer-compiler/translator", + "wasmer-engine-universal/compiler", +] +singlepass = [ + "wasmer-compiler-singlepass", + "compiler", +] +cranelift = [ + "wasmer-compiler-cranelift", + "compiler", +] +llvm = [ + "wasmer-compiler-llvm", + "compiler", +] +debug = ["fern", "log"] +disable-all-logging = [] +headless = [] +headless-minimal = ["headless", "disable-all-logging", "universal"] + +# Deprecated features. +jit = ["universal"] diff --git a/lib/cli/README.md b/lib/cli/README.md new file mode 100644 index 0000000000..e02ddf67bd --- /dev/null +++ b/lib/cli/README.md @@ -0,0 +1,72 @@ +# `wasmer-cli` [![Build Status](https://github.com/wasmerio/wasmer/workflows/build/badge.svg?style=flat-square)](https://github.com/wasmerio/wasmer/actions?query=workflow%3Abuild) [![Join Wasmer Slack](https://img.shields.io/static/v1?label=Slack&message=join%20chat&color=brighgreen&style=flat-square)](https://slack.wasmer.io) [![MIT License](https://img.shields.io/github/license/wasmerio/wasmer.svg?style=flat-square)](https://github.com/wasmerio/wasmer/blob/master/LICENSE) + +This crate is the Wasmer CLI. + +The recommended way to install `wasmer` is via the [wasmer-installer](https://github.com/wasmerio/wasmer-install). + +However, you can also install `wasmer` via Cargo (you will need to specify the compilers to use): + +```bash +cargo install wasmer-cli --features "singlepass,cranelift" +``` + +Or by building it inside the codebase: + +```bash +cargo build --release --features "singlepass,cranelift" +``` + +> Note: installing `wasmer` via Cargo (or manual install) will not install +> the WAPM cli. If you want to use them together, please use the [wasmer installer](https://github.com/wasmerio/wasmer-install). + + +## Features + +The Wasmer supports the following features: +* `wat` (default): support for executing WebAssembly text files. +* `wast`(default): support for running wast test files. +* `universal` (default): support for the [Universal engine]. +* `dylib` (default): support for the [Dylib engine]. +* `cache` (default): support or automatically caching compiled artifacts. +* `wasi` (default): support for [WASI]. +* `experimental-io-devices`: support for experimental IO devices in WASI. +* `emscripten` (default): support for [Emscripten]. +* `singlepass`: support for the [Singlepass compiler]. +* `cranelift`: support for the [Cranelift compiler]. +* `llvm`: support for the [LLVM compiler]. + +[Universal engine]: https://github.com/wasmerio/wasmer/tree/master/lib/engine-universal/ +[Dylib engine]: https://github.com/wasmerio/wasmer/tree/master/lib/engine-dylib/ +[WASI]: https://github.com/wasmerio/wasmer/tree/master/lib/wasi/ +[Emscripten]: https://github.com/wasmerio/wasmer/tree/master/lib/emscripten/ +[Singlepass compiler]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-singlepass/ +[Cranelift compiler]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-cranelift/ +[LLVM compiler]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-llvm/ + +## CLI commands + +Once you have Wasmer installed, you can start executing WebAssembly files easily: + +Get the current Wasmer version: + +```bash +wasmer -V +``` + +Execute a WebAssembly file: + +```bash +wasmer run myfile.wasm +``` + +Compile a WebAssembly file: + +```bash +wasmer compile myfile.wasm -o myfile.so --dylib +``` + +Run a compiled WebAssembly file (fastest): + +```bash +wasmer run myfile.so +``` diff --git a/lib/cli/build.rs b/lib/cli/build.rs new file mode 100644 index 0000000000..36fe64763e --- /dev/null +++ b/lib/cli/build.rs @@ -0,0 +1,4 @@ +pub fn main() { + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-env-changed=WASMER_INSTALL_PREFIX"); +} diff --git a/lib/cli/src/bin/wasmer.rs b/lib/cli/src/bin/wasmer.rs new file mode 100644 index 0000000000..8f781e1f8c --- /dev/null +++ b/lib/cli/src/bin/wasmer.rs @@ -0,0 +1,10 @@ +use wasmer_cli::cli::wasmer_main; + +#[cfg(not(any(feature = "cranelift", feature = "singlepass", feature = "llvm")))] +compile_error!( + "Either enable at least one compiler, or compile the wasmer-headless binary instead" +); + +fn main() { + wasmer_main(); +} diff --git a/lib/cli/src/bin/wasmer_headless.rs b/lib/cli/src/bin/wasmer_headless.rs new file mode 100644 index 0000000000..ccd9aa9bf8 --- /dev/null +++ b/lib/cli/src/bin/wasmer_headless.rs @@ -0,0 +1,5 @@ +use wasmer_cli::cli::wasmer_main; + +fn main() { + wasmer_main(); +} diff --git a/lib/cli/src/c_gen/mod.rs b/lib/cli/src/c_gen/mod.rs new file mode 100644 index 0000000000..58766a4597 --- /dev/null +++ b/lib/cli/src/c_gen/mod.rs @@ -0,0 +1,540 @@ +//! A convenient little abstraction for building up C expressions and generating +//! simple C code. + +pub mod staticlib_header; + +/// An identifier in C. +pub type CIdent = String; + +/// A Type in the C language. +#[derive(Debug, Clone)] +pub enum CType { + /// C `void` type. + Void, + /// A pointer to some other type. + PointerTo { + /// Whether the pointer is `const`. + is_const: bool, + /// The type that the pointer points to. + inner: Box, + }, + /// C 8 bit unsigned integer type. + U8, + /// C 16 bit unsigned integer type. + U16, + /// C 32 bit unsigned integer type. + U32, + /// C 64 bit unsigned integer type. + U64, + /// C pointer sized unsigned integer type. + USize, + /// C 8 bit signed integer type. + I8, + /// C 16 bit signed integer type. + I16, + /// C 32 bit signed integer type. + I32, + /// C 64 bit signed integer type. + I64, + /// C pointer sized signed integer type. + ISize, + /// A function or function pointer. + Function { + /// The arguments the function takes. + arguments: Vec, + /// The return value if it has one + /// + /// None is equivalent to Some(Box(Ctype::Void)). + return_value: Option>, + }, + /// C constant array. + Array { + /// The type of the array. + inner: Box, + }, + /// A user defined type. + TypeDef(String), +} + +impl CType { + /// Convenience function to get a mutable void pointer type. + pub fn void_ptr() -> Self { + CType::PointerTo { + is_const: false, + inner: Box::new(CType::Void), + } + } + + /// Convenience function to get a const void pointer type. + #[allow(dead_code)] + pub fn const_void_ptr() -> Self { + CType::PointerTo { + is_const: true, + inner: Box::new(CType::Void), + } + } + + /// Generate the C source code for a type into the given `String`. + fn generate_c(&self, w: &mut String) { + match &self { + Self::Void => { + w.push_str("void"); + } + Self::PointerTo { is_const, inner } => { + if *is_const { + w.push_str("const "); + } + inner.generate_c(w); + w.push('*'); + } + Self::U8 => { + w.push_str("unsigned char"); + } + Self::U16 => { + w.push_str("unsigned short"); + } + Self::U32 => { + w.push_str("unsigned int"); + } + Self::U64 => { + w.push_str("unsigned long long"); + } + Self::USize => { + w.push_str("unsigned size_t"); + } + Self::I8 => { + w.push_str("char"); + } + Self::I16 => { + w.push_str("short"); + } + Self::I32 => { + w.push_str("int"); + } + Self::I64 => { + w.push_str("long long"); + } + Self::ISize => { + w.push_str("size_t"); + } + Self::Function { + arguments, + return_value, + } => { + // function with no, name, assume it's a function pointer + let ret: CType = return_value + .as_ref() + .map(|i: &Box| (&**i).clone()) + .unwrap_or_default(); + ret.generate_c(w); + w.push(' '); + w.push_str("(*)"); + w.push('('); + if arguments.len() > 1 { + for arg in &arguments[..arguments.len() - 1] { + arg.generate_c(w); + w.push_str(", "); + } + arguments.last().unwrap().generate_c(w); + } else if arguments.len() == 1 { + arguments[0].generate_c(w); + } + w.push(')'); + } + Self::Array { inner } => { + inner.generate_c(w); + w.push_str("[]"); + } + Self::TypeDef(inner) => { + w.push_str(&inner); + } + } + } + + /// Generate the C source code for a type with a nameinto the given `String`. + fn generate_c_with_name(&self, name: &str, w: &mut String) { + match &self { + Self::PointerTo { .. } + | Self::TypeDef { .. } + | Self::Void + | Self::U8 + | Self::U16 + | Self::U32 + | Self::U64 + | Self::USize + | Self::I8 + | Self::I16 + | Self::I32 + | Self::I64 + | Self::ISize => { + self.generate_c(w); + w.push(' '); + w.push_str(name); + } + Self::Function { + arguments, + return_value, + } => { + let ret: CType = return_value + .as_ref() + .map(|i: &Box| (&**i).clone()) + .unwrap_or_default(); + ret.generate_c(w); + w.push(' '); + w.push_str(&name); + w.push('('); + if arguments.len() > 1 { + for arg in &arguments[..arguments.len() - 1] { + arg.generate_c(w); + w.push_str(", "); + } + arguments.last().unwrap().generate_c(w); + } else if arguments.len() == 1 { + arguments[0].generate_c(w); + } + w.push(')'); + } + Self::Array { inner } => { + inner.generate_c(w); + w.push(' '); + w.push_str(&name); + w.push_str("[]"); + } + } + } +} + +impl Default for CType { + fn default() -> CType { + CType::Void + } +} + +/// A statement in the C programming language. This may not be exact to what an +/// AST would look like or what the C standard says about the C language, it's +/// simply a structed way to organize data for generating C code. +#[derive(Debug, Clone)] +pub enum CStatement { + /// A declaration of some kind. + Declaration { + /// The name of the thing being declared. + name: CIdent, + /// Whether the thing being declared is `extern`. + is_extern: bool, + /// Whether the thing being declared is `const`. + is_const: bool, + /// The type of the thing being declared. + ctype: CType, + /// The definition of the thing being declared. + /// + /// This is useful for initializing constant arrays, for example. + definition: Option>, + }, + + /// A literal array of CStatements. + LiteralArray { + /// The contents of the array. + items: Vec, + }, + + /// A literal constant value, passed through directly as a string. + LiteralConstant { + /// The raw value acting as a constant. + value: String, + }, + + /// A C-style cast + Cast { + /// The type to cast to. + target_type: CType, + /// The thing being cast. + expression: Box, + }, + + /// Typedef one type to another. + TypeDef { + /// The type of the thing being typedef'd. + source_type: CType, + /// The new name by which this type may be called. + new_name: CIdent, + }, +} + +impl CStatement { + /// Generate C source code for the given CStatement. + fn generate_c(&self, w: &mut String) { + match &self { + Self::Declaration { + name, + is_extern, + is_const, + ctype, + definition, + } => { + if *is_const { + w.push_str("const "); + } + if *is_extern { + w.push_str("extern "); + } + ctype.generate_c_with_name(name, w); + if let Some(def) = definition { + w.push_str(" = "); + def.generate_c(w); + } + w.push(';'); + w.push('\n'); + } + Self::LiteralArray { items } => { + w.push('{'); + if !items.is_empty() { + w.push('\n'); + } + for item in items { + w.push('\t'); + item.generate_c(w); + w.push(','); + w.push('\n'); + } + w.push('}'); + } + Self::LiteralConstant { value } => { + w.push_str(&value); + } + Self::Cast { + target_type, + expression, + } => { + w.push('('); + target_type.generate_c(w); + w.push(')'); + w.push(' '); + expression.generate_c(w); + } + Self::TypeDef { + source_type, + new_name, + } => { + w.push_str("typedef "); + // leaky abstraction / hack, doesn't fully solve the problem + if let CType::Function { .. } = source_type { + source_type.generate_c_with_name(&format!("(*{})", new_name), w); + } else { + source_type.generate_c(w); + w.push(' '); + w.push_str(&new_name); + } + w.push(';'); + w.push('\n'); + } + } + } +} + +/// Generate C source code from some `CStatements` into a String. +// TODO: add config section +pub fn generate_c(statements: &[CStatement]) -> String { + let mut out = String::new(); + for statement in statements { + statement.generate_c(&mut out); + } + out +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn generate_types() { + macro_rules! assert_c_type { + ($ctype:expr, $expected:expr) => { + let mut w = String::new(); + let ctype = $ctype; + ctype.generate_c(&mut w); + assert_eq!(w, $expected); + }; + } + + assert_c_type!(CType::Void, "void"); + assert_c_type!(CType::void_ptr(), "void*"); + assert_c_type!(CType::const_void_ptr(), "const void*"); + assert_c_type!(CType::U8, "unsigned char"); + assert_c_type!(CType::U16, "unsigned short"); + assert_c_type!(CType::U32, "unsigned int"); + assert_c_type!(CType::U64, "unsigned long long"); + assert_c_type!(CType::USize, "unsigned size_t"); + assert_c_type!(CType::I8, "char"); + assert_c_type!(CType::I16, "short"); + assert_c_type!(CType::I32, "int"); + assert_c_type!(CType::I64, "long long"); + assert_c_type!(CType::ISize, "size_t"); + assert_c_type!(CType::TypeDef("my_type".to_string()), "my_type"); + assert_c_type!( + CType::Function { + arguments: vec![CType::U8, CType::ISize], + return_value: None + }, + "void (*)(unsigned char, size_t)" + ); + assert_c_type!( + CType::Function { + arguments: vec![], + return_value: Some(Box::new(CType::ISize)) + }, + "size_t (*)()" + ); + assert_c_type!( + CType::PointerTo { + is_const: true, + inner: Box::new(CType::PointerTo { + is_const: false, + inner: Box::new(CType::U32) + }) + }, + "const unsigned int**" + ); + // TODO: test more complicated const correctness rules: there are bugs relating to it. + } + + #[test] + fn generate_types_with_names() { + macro_rules! assert_c_type { + ($ctype:expr, $name:literal, $expected:expr) => { + let mut w = String::new(); + let ctype = $ctype; + ctype.generate_c_with_name($name, &mut w); + assert_eq!(w, $expected); + }; + } + + assert_c_type!(CType::Void, "main", "void main"); + assert_c_type!(CType::void_ptr(), "data", "void* data"); + assert_c_type!(CType::const_void_ptr(), "data", "const void* data"); + assert_c_type!(CType::U8, "data", "unsigned char data"); + assert_c_type!(CType::U16, "data", "unsigned short data"); + assert_c_type!(CType::U32, "data", "unsigned int data"); + assert_c_type!(CType::U64, "data", "unsigned long long data"); + assert_c_type!(CType::USize, "data", "unsigned size_t data"); + assert_c_type!(CType::I8, "data", "char data"); + assert_c_type!(CType::I16, "data", "short data"); + assert_c_type!(CType::I32, "data", "int data"); + assert_c_type!(CType::I64, "data", "long long data"); + assert_c_type!(CType::ISize, "data", "size_t data"); + assert_c_type!( + CType::TypeDef("my_type".to_string()), + "data", + "my_type data" + ); + assert_c_type!( + CType::Function { + arguments: vec![CType::U8, CType::ISize], + return_value: None + }, + "my_func", + "void my_func(unsigned char, size_t)" + ); + assert_c_type!( + CType::Function { + arguments: vec![], + return_value: Some(Box::new(CType::ISize)) + }, + "my_func", + "size_t my_func()" + ); + assert_c_type!( + CType::PointerTo { + is_const: true, + inner: Box::new(CType::PointerTo { + is_const: false, + inner: Box::new(CType::U32) + }) + }, + "data", + "const unsigned int** data" + ); + // TODO: test more complicated const correctness rules: there are bugs relating to it. + } + + #[test] + fn generate_expressions_works() { + macro_rules! assert_c_expr { + ($cexpr:expr, $expected:expr) => { + let mut w = String::new(); + let cexpr = $cexpr; + cexpr.generate_c(&mut w); + assert_eq!(w, $expected); + }; + } + + assert_c_expr!( + CStatement::LiteralConstant { + value: "\"Hello, world!\"".to_string() + }, + "\"Hello, world!\"" + ); + assert_c_expr!( + CStatement::TypeDef { + source_type: CType::Function { + arguments: vec![CType::I32, CType::I32], + return_value: None, + }, + new_name: "my_func_ptr".to_string(), + }, + "typedef void (*my_func_ptr)(int, int);\n" + ); + assert_c_expr!( + CStatement::LiteralArray { + items: vec![ + CStatement::LiteralConstant { + value: "1".to_string() + }, + CStatement::LiteralConstant { + value: "2".to_string() + }, + CStatement::LiteralConstant { + value: "3".to_string() + }, + ] + }, + "{\n\t1,\n\t2,\n\t3,\n}" + ); + assert_c_expr!(CStatement::LiteralArray { items: vec![] }, "{}"); + assert_c_expr!( + CStatement::Declaration { + name: "my_array".to_string(), + is_extern: false, + is_const: true, + ctype: CType::Array { + inner: Box::new(CType::I32) + }, + definition: Some(Box::new(CStatement::LiteralArray { + items: vec![ + CStatement::LiteralConstant { + value: "1".to_string() + }, + CStatement::LiteralConstant { + value: "2".to_string() + }, + CStatement::LiteralConstant { + value: "3".to_string() + }, + ] + })) + }, + "const int my_array[] = {\n\t1,\n\t2,\n\t3,\n};\n" + ); + assert_c_expr!( + CStatement::Declaration { + name: "my_array".to_string(), + is_extern: true, + is_const: true, + ctype: CType::Array { + inner: Box::new(CType::I32) + }, + definition: None, + }, + "const extern int my_array[];\n" + ); + } +} diff --git a/lib/cli/src/c_gen/staticlib_header.rs b/lib/cli/src/c_gen/staticlib_header.rs new file mode 100644 index 0000000000..82933d003c --- /dev/null +++ b/lib/cli/src/c_gen/staticlib_header.rs @@ -0,0 +1,296 @@ +//! Generate a header file for the object file produced by the Staticlib engine. + +use super::{generate_c, CStatement, CType}; +use wasmer_compiler::{Symbol, SymbolRegistry}; +use wasmer_types::ModuleInfo; + +/// Helper functions to simplify the usage of the Staticlib engine. +const HELPER_FUNCTIONS: &str = r#" +wasm_byte_vec_t generate_serialized_data() { + // We need to pass all the bytes as one big buffer so we have to do all this logic to memcpy + // the various pieces together from the generated header file. + // + // We should provide a `deseralize_vectored` function to avoid requiring this extra work. + + char* byte_ptr = (char*)&WASMER_METADATA[0]; + + size_t num_function_pointers + = sizeof(function_pointers) / sizeof(void*); + size_t num_function_trampolines + = sizeof(function_trampolines) / sizeof(void*); + size_t num_dynamic_function_trampoline_pointers + = sizeof(dynamic_function_trampoline_pointers) / sizeof(void*); + + + size_t buffer_size = module_bytes_len + + sizeof(size_t) + sizeof(function_pointers) + + sizeof(size_t) + sizeof(function_trampolines) + + sizeof(size_t) + sizeof(dynamic_function_trampoline_pointers); + + char* memory_buffer = (char*) malloc(buffer_size); + size_t current_offset = 0; + + memcpy(memory_buffer + current_offset, byte_ptr, module_bytes_len); + current_offset += module_bytes_len; + + memcpy(memory_buffer + current_offset, (void*)&num_function_pointers, sizeof(size_t)); + current_offset += sizeof(size_t); + + memcpy(memory_buffer + current_offset, (void*)&function_pointers[0], sizeof(function_pointers)); + current_offset += sizeof(function_pointers); + + memcpy(memory_buffer + current_offset, (void*)&num_function_trampolines, sizeof(size_t)); + current_offset += sizeof(size_t); + + memcpy(memory_buffer + current_offset, (void*)&function_trampolines[0], sizeof(function_trampolines)); + current_offset += sizeof(function_trampolines); + + memcpy(memory_buffer + current_offset, (void*)&num_dynamic_function_trampoline_pointers, sizeof(size_t)); + current_offset += sizeof(size_t); + + memcpy(memory_buffer + current_offset, (void*)&dynamic_function_trampoline_pointers[0], sizeof(dynamic_function_trampoline_pointers)); + current_offset += sizeof(dynamic_function_trampoline_pointers); + + wasm_byte_vec_t module_byte_vec = { + .size = buffer_size, + .data = memory_buffer, + }; + return module_byte_vec; +} + +wasm_module_t* wasmer_staticlib_engine_new(wasm_store_t* store, const char* wasm_name) { + // wasm_name intentionally unused for now: will be used in the future. + wasm_byte_vec_t module_byte_vec = generate_serialized_data(); + wasm_module_t* module = wasm_module_deserialize(store, &module_byte_vec); + free(module_byte_vec.data); + + return module; +} +"#; + +/// Generate the header file that goes with the generated object file. +pub fn generate_header_file( + module_info: &ModuleInfo, + symbol_registry: &dyn SymbolRegistry, + metadata_length: usize, +) -> String { + let mut c_statements = vec![]; + c_statements.push(CStatement::LiteralConstant { + value: "#include \n#include \n\n".to_string(), + }); + c_statements.push(CStatement::LiteralConstant { + value: "#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n".to_string(), + }); + c_statements.push(CStatement::Declaration { + name: "module_bytes_len".to_string(), + is_extern: false, + is_const: true, + ctype: CType::U32, + definition: Some(Box::new(CStatement::LiteralConstant { + value: metadata_length.to_string(), + })), + }); + c_statements.push(CStatement::Declaration { + name: "WASMER_METADATA".to_string(), + is_extern: true, + is_const: true, + ctype: CType::Array { + inner: Box::new(CType::U8), + }, + definition: None, + }); + let function_declarations = module_info + .functions + .iter() + .filter_map(|(f_index, sig_index)| { + Some((module_info.local_func_index(f_index)?, sig_index)) + }) + .map(|(function_local_index, _sig_index)| { + let function_name = + symbol_registry.symbol_to_name(Symbol::LocalFunction(function_local_index)); + // TODO: figure out the signature here too + CStatement::Declaration { + name: function_name, + is_extern: false, + is_const: false, + ctype: CType::Function { + arguments: vec![CType::Void], + return_value: None, + }, + definition: None, + } + }); + c_statements.push(CStatement::LiteralConstant { + value: r#" +// Compiled Wasm function pointers ordered by function index: the order they +// appeared in in the Wasm module. +"# + .to_string(), + }); + c_statements.extend(function_declarations); + + // function pointer array + { + let function_pointer_array_statements = module_info + .functions + .iter() + .filter_map(|(f_index, sig_index)| { + Some((module_info.local_func_index(f_index)?, sig_index)) + }) + .map(|(function_local_index, _sig_index)| { + let function_name = + symbol_registry.symbol_to_name(Symbol::LocalFunction(function_local_index)); + // TODO: figure out the signature here too + + CStatement::Cast { + target_type: CType::void_ptr(), + expression: Box::new(CStatement::LiteralConstant { + value: function_name, + }), + } + }) + .collect::>(); + + c_statements.push(CStatement::Declaration { + name: "function_pointers".to_string(), + is_extern: false, + is_const: true, + ctype: CType::Array { + inner: Box::new(CType::void_ptr()), + }, + definition: Some(Box::new(CStatement::LiteralArray { + items: function_pointer_array_statements, + })), + }); + } + + let func_trampoline_declarations = + module_info + .signatures + .iter() + .map(|(sig_index, _func_type)| { + let function_name = + symbol_registry.symbol_to_name(Symbol::FunctionCallTrampoline(sig_index)); + + CStatement::Declaration { + name: function_name, + is_extern: false, + is_const: false, + ctype: CType::Function { + arguments: vec![CType::void_ptr(), CType::void_ptr(), CType::void_ptr()], + return_value: None, + }, + definition: None, + } + }); + c_statements.push(CStatement::LiteralConstant { + value: r#" +// Trampolines (functions by which we can call into Wasm) ordered by signature. +// There is 1 trampoline per function signature in the order they appear in +// the Wasm module. +"# + .to_string(), + }); + c_statements.extend(func_trampoline_declarations); + + // function trampolines + { + let function_trampoline_statements = module_info + .signatures + .iter() + .map(|(sig_index, _vm_shared_index)| { + let function_name = + symbol_registry.symbol_to_name(Symbol::FunctionCallTrampoline(sig_index)); + CStatement::LiteralConstant { + value: function_name, + } + }) + .collect::>(); + + c_statements.push(CStatement::Declaration { + name: "function_trampolines".to_string(), + is_extern: false, + is_const: true, + ctype: CType::Array { + inner: Box::new(CType::void_ptr()), + }, + definition: Some(Box::new(CStatement::LiteralArray { + items: function_trampoline_statements, + })), + }); + } + + let dyn_func_declarations = module_info + .functions + .keys() + .take(module_info.num_imported_functions) + .map(|func_index| { + let function_name = + symbol_registry.symbol_to_name(Symbol::DynamicFunctionTrampoline(func_index)); + // TODO: figure out the signature here + CStatement::Declaration { + name: function_name, + is_extern: false, + is_const: false, + ctype: CType::Function { + arguments: vec![CType::void_ptr(), CType::void_ptr(), CType::void_ptr()], + return_value: None, + }, + definition: None, + } + }); + c_statements.push(CStatement::LiteralConstant { + value: r#" +// Dynamic trampolines are per-function and are used for each function where +// the type signature is not known statically. In this case, this corresponds to +// the imported functions. +"# + .to_string(), + }); + c_statements.extend(dyn_func_declarations); + + c_statements.push(CStatement::TypeDef { + source_type: CType::Function { + arguments: vec![CType::void_ptr(), CType::void_ptr(), CType::void_ptr()], + return_value: None, + }, + new_name: "dyn_func_trampoline_t".to_string(), + }); + + // dynamic function trampoline pointer array + { + let dynamic_function_trampoline_statements = module_info + .functions + .keys() + .take(module_info.num_imported_functions) + .map(|func_index| { + let function_name = + symbol_registry.symbol_to_name(Symbol::DynamicFunctionTrampoline(func_index)); + CStatement::LiteralConstant { + value: function_name, + } + }) + .collect::>(); + c_statements.push(CStatement::Declaration { + name: "dynamic_function_trampoline_pointers".to_string(), + is_extern: false, + is_const: true, + ctype: CType::Array { + inner: Box::new(CType::TypeDef("dyn_func_trampoline_t".to_string())), + }, + definition: Some(Box::new(CStatement::LiteralArray { + items: dynamic_function_trampoline_statements, + })), + }); + } + + c_statements.push(CStatement::LiteralConstant { + value: HELPER_FUNCTIONS.to_string(), + }); + + c_statements.push(CStatement::LiteralConstant { + value: "\n#ifdef __cplusplus\n}\n#endif\n\n".to_string(), + }); + + generate_c(&c_statements) +} diff --git a/lib/cli/src/cli.rs b/lib/cli/src/cli.rs new file mode 100644 index 0000000000..f7bbb5cdf8 --- /dev/null +++ b/lib/cli/src/cli.rs @@ -0,0 +1,136 @@ +//! The logic for the Wasmer CLI tool. + +#[cfg(target_os = "linux")] +use crate::commands::Binfmt; +#[cfg(feature = "compiler")] +use crate::commands::Compile; +#[cfg(all(feature = "staticlib", feature = "compiler"))] +use crate::commands::CreateExe; +#[cfg(feature = "wast")] +use crate::commands::Wast; +use crate::commands::{Cache, Config, Inspect, Run, SelfUpdate, Validate}; +use crate::error::PrettyError; +use anyhow::Result; + +use structopt::{clap::ErrorKind, StructOpt}; + +#[derive(StructOpt)] +#[cfg_attr( + not(feature = "headless"), + structopt(name = "wasmer", about = "WebAssembly standalone runtime.", author) +)] +#[cfg_attr( + feature = "headless", + structopt( + name = "wasmer-headless", + about = "Headless WebAssembly standalone runtime.", + author + ) +)] +/// The options for the wasmer Command Line Interface +enum WasmerCLIOptions { + /// Run a WebAssembly file. Formats accepted: wasm, wat + #[structopt(name = "run")] + Run(Run), + + /// Wasmer cache + #[structopt(name = "cache")] + Cache(Cache), + + /// Validate a WebAssembly binary + #[structopt(name = "validate")] + Validate(Validate), + + /// Compile a WebAssembly binary + #[cfg(feature = "compiler")] + #[structopt(name = "compile")] + Compile(Compile), + + /// Compile a WebAssembly binary into a native executable + #[cfg(all(feature = "staticlib", feature = "compiler"))] + #[structopt(name = "create-exe")] + CreateExe(CreateExe), + + /// Get various configuration information needed + /// to compile programs which use Wasmer + #[structopt(name = "config")] + Config(Config), + + /// Update wasmer to the latest version + #[structopt(name = "self-update")] + SelfUpdate(SelfUpdate), + + /// Inspect a WebAssembly file + #[structopt(name = "inspect")] + Inspect(Inspect), + + /// Run spec testsuite + #[cfg(feature = "wast")] + #[structopt(name = "wast")] + Wast(Wast), + + /// Unregister and/or register wasmer as binfmt interpreter + #[cfg(target_os = "linux")] + #[structopt(name = "binfmt")] + Binfmt(Binfmt), +} + +impl WasmerCLIOptions { + fn execute(&self) -> Result<()> { + match self { + Self::Run(options) => options.execute(), + Self::SelfUpdate(options) => options.execute(), + Self::Cache(cache) => cache.execute(), + Self::Validate(validate) => validate.execute(), + #[cfg(feature = "compiler")] + Self::Compile(compile) => compile.execute(), + #[cfg(all(feature = "staticlib", feature = "compiler"))] + Self::CreateExe(create_exe) => create_exe.execute(), + Self::Config(config) => config.execute(), + Self::Inspect(inspect) => inspect.execute(), + #[cfg(feature = "wast")] + Self::Wast(wast) => wast.execute(), + #[cfg(target_os = "linux")] + Self::Binfmt(binfmt) => binfmt.execute(), + } + } +} + +/// The main function for the Wasmer CLI tool. +pub fn wasmer_main() { + // We allow windows to print properly colors + #[cfg(windows)] + colored::control::set_virtual_terminal(true).unwrap(); + + // We try to run wasmer with the normal arguments. + // Eg. `wasmer ` + // In case that fails, we fallback trying the Run subcommand directly. + // Eg. `wasmer myfile.wasm --dir=.` + // + // In case we've been run as wasmer-binfmt-interpreter myfile.wasm args, + // we assume that we're registered via binfmt_misc + let args = std::env::args().collect::>(); + let binpath = args.get(0).map(|s| s.as_ref()).unwrap_or(""); + let command = args.get(1); + let options = if cfg!(target_os = "linux") && binpath.ends_with("wasmer-binfmt-interpreter") { + WasmerCLIOptions::Run(Run::from_binfmt_args()) + } else { + match command.unwrap_or(&"".to_string()).as_ref() { + "cache" | "compile" | "config" | "create-exe" | "help" | "inspect" | "run" + | "self-update" | "validate" | "wast" | "binfmt" => WasmerCLIOptions::from_args(), + _ => { + WasmerCLIOptions::from_iter_safe(args.iter()).unwrap_or_else(|e| { + match e.kind { + // This fixes a issue that: + // 1. Shows the version twice when doing `wasmer -V` + // 2. Shows the run help (instead of normal help) when doing `wasmer --help` + ErrorKind::VersionDisplayed | ErrorKind::HelpDisplayed => e.exit(), + _ => WasmerCLIOptions::Run(Run::from_args()), + } + }) + } + } + }; + + PrettyError::report(options.execute()); +} diff --git a/lib/cli/src/commands.rs b/lib/cli/src/commands.rs new file mode 100644 index 0000000000..b0b53c0521 --- /dev/null +++ b/lib/cli/src/commands.rs @@ -0,0 +1,25 @@ +//! The commands available in the Wasmer binary. +#[cfg(target_os = "linux")] +mod binfmt; +mod cache; +#[cfg(feature = "compiler")] +mod compile; +mod config; +#[cfg(all(feature = "staticlib", feature = "compiler"))] +mod create_exe; +mod inspect; +mod run; +mod self_update; +mod validate; +#[cfg(feature = "wast")] +mod wast; + +#[cfg(target_os = "linux")] +pub use binfmt::*; +#[cfg(feature = "compiler")] +pub use compile::*; +#[cfg(all(feature = "staticlib", feature = "compiler"))] +pub use create_exe::*; +#[cfg(feature = "wast")] +pub use wast::*; +pub use {cache::*, config::*, inspect::*, run::*, self_update::*, validate::*}; diff --git a/lib/cli/src/commands/binfmt.rs b/lib/cli/src/commands/binfmt.rs new file mode 100644 index 0000000000..e668740a6d --- /dev/null +++ b/lib/cli/src/commands/binfmt.rs @@ -0,0 +1,153 @@ +use anyhow::{Context, Result}; +use std::env; +use std::fs; +use std::io::Write; +use std::os::unix::ffi::OsStrExt; +use std::os::unix::fs::MetadataExt; +use std::path::{Path, PathBuf}; +use structopt::StructOpt; +use Action::*; + +#[derive(StructOpt, Clone, Copy)] +enum Action { + /// Register wasmer as binfmt interpreter + Register, + /// Unregister a binfmt interpreter for wasm32 + Unregister, + /// Soft unregister, and register + Reregister, +} + +/// Unregister and/or register wasmer as binfmt interpreter +/// +/// Check the wasmer repository for a systemd service definition example +/// to automate the process at start-up. +#[derive(StructOpt)] +pub struct Binfmt { + // Might be better to traverse the mount list + /// Mount point of binfmt_misc fs + #[structopt(long, default_value = "/proc/sys/fs/binfmt_misc/")] + binfmt_misc: PathBuf, + + #[structopt(subcommand)] + action: Action, +} + +// Quick safety check: +// This folder isn't world writeable (or else its sticky bit is set), and neither are its parents. +// +// If somebody mounted /tmp wrong, this might result in a TOCTOU problem. +fn seccheck(path: &Path) -> Result<()> { + if let Some(parent) = path.parent() { + seccheck(parent)?; + } + let m = std::fs::metadata(path) + .with_context(|| format!("Can't check permissions of {}", path.to_string_lossy()))?; + anyhow::ensure!( + m.mode() & 0o2 == 0 || m.mode() & 0o1000 != 0, + "{} is world writeable and not sticky", + path.to_string_lossy() + ); + Ok(()) +} + +impl Binfmt { + /// execute [Binfmt] + pub fn execute(&self) -> Result<()> { + if !self.binfmt_misc.exists() { + panic!("{} does not exist", self.binfmt_misc.to_string_lossy()); + } + let temp_dir; + let specs = match self.action { + Register | Reregister => { + temp_dir = tempfile::tempdir().context("Make temporary directory")?; + seccheck(temp_dir.path())?; + let bin_path_orig: PathBuf = env::args_os() + .nth(0) + .map(Into::into) + .filter(|p: &PathBuf| p.exists()) + .context("Cannot get path to wasmer executable")?; + let bin_path = temp_dir.path().join("wasmer-binfmt-interpreter"); + fs::copy(&bin_path_orig, &bin_path).context("Copy wasmer binary to temp folder")?; + let bin_path = fs::canonicalize(&bin_path).with_context(|| { + format!( + "Couldn't get absolute path for {}", + bin_path.to_string_lossy() + ) + })?; + Some([ + [ + b":wasm32:M::\\x00asm\\x01\\x00\\x00::".as_ref(), + bin_path.as_os_str().as_bytes(), + b":PFC", + ] + .concat(), + [ + b":wasm32-wat:E::wat::".as_ref(), + bin_path.as_os_str().as_bytes(), + b":PFC", + ] + .concat(), + ]) + } + _ => None, + }; + let wasm_registration = self.binfmt_misc.join("wasm32"); + let wat_registration = self.binfmt_misc.join("wasm32-wat"); + match self.action { + Reregister | Unregister => { + let unregister = [wasm_registration, wat_registration] + .iter() + .map(|registration| { + if registration.exists() { + let mut registration = fs::OpenOptions::new() + .write(true) + .open(registration) + .context("Open existing binfmt entry to remove")?; + registration + .write_all(b"-1") + .context("Couldn't write binfmt unregister request")?; + Ok(true) + } else { + eprintln!( + "Warning: {} does not exist, not unregistered.", + registration.to_string_lossy() + ); + Ok(false) + } + }) + .collect::>() + .into_iter() + .collect::>>()?; + match (self.action, unregister.into_iter().any(|b| b)) { + (Unregister, false) => bail!("Nothing unregistered"), + _ => (), + } + } + _ => (), + }; + if let Some(specs) = specs { + if cfg!(target_env = "gnu") { + // Approximate. ELF parsing for a proper check feels like overkill here. + eprintln!("Warning: wasmer has been compiled for glibc, and is thus likely dynamically linked. Invoking wasm binaries in chroots or mount namespaces (lxc, docker, ...) may not work."); + } + specs + .iter() + .map(|spec| { + let register = self.binfmt_misc.join("register"); + let mut register = fs::OpenOptions::new() + .write(true) + .open(register) + .context("Open binfmt misc for registration")?; + register + .write_all(&spec) + .context("Couldn't register binfmt")?; + Ok(()) + }) + .collect::>() + .into_iter() + .collect::>>()?; + } + Ok(()) + } +} diff --git a/lib/cli/src/commands/cache.rs b/lib/cli/src/commands/cache.rs new file mode 100644 index 0000000000..f5aa21be8e --- /dev/null +++ b/lib/cli/src/commands/cache.rs @@ -0,0 +1,44 @@ +use crate::common::get_cache_dir; +use anyhow::{Context, Result}; +use std::fs; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +/// The options for the `wasmer cache` subcommand +pub enum Cache { + /// Clear the cache + #[structopt(name = "clean")] + Clean, + + /// Display the location of the cache + #[structopt(name = "dir")] + Dir, +} + +impl Cache { + /// Execute the cache command + pub fn execute(&self) -> Result<()> { + match &self { + Cache::Clean => { + self.clean().context("failed to clean wasmer cache.")?; + } + Cache::Dir => { + self.dir()?; + } + } + Ok(()) + } + fn clean(&self) -> Result<()> { + let cache_dir = get_cache_dir(); + if cache_dir.exists() { + fs::remove_dir_all(cache_dir.clone())?; + } + fs::create_dir_all(cache_dir)?; + eprintln!("Wasmer cache cleaned successfully."); + Ok(()) + } + fn dir(&self) -> Result<()> { + println!("{}", get_cache_dir().to_string_lossy()); + Ok(()) + } +} diff --git a/lib/cli/src/commands/compile.rs b/lib/cli/src/commands/compile.rs new file mode 100644 index 0000000000..76ada5ebbc --- /dev/null +++ b/lib/cli/src/commands/compile.rs @@ -0,0 +1,148 @@ +use crate::store::{EngineType, StoreOptions}; +use crate::warning; +use anyhow::{Context, Result}; +use std::path::PathBuf; +use structopt::StructOpt; +use wasmer::*; + +#[derive(Debug, StructOpt)] +/// The options for the `wasmer compile` subcommand +pub struct Compile { + /// Input file + #[structopt(name = "FILE", parse(from_os_str))] + path: PathBuf, + + /// Output file + #[structopt(name = "OUTPUT PATH", short = "o", parse(from_os_str))] + output: PathBuf, + + /// Output path for generated header file + #[structopt(name = "HEADER PATH", long = "header", parse(from_os_str))] + header_path: Option, + + /// Compilation Target triple + #[structopt(long = "target")] + target_triple: Option, + + #[structopt(flatten)] + store: StoreOptions, + + #[structopt(short = "m", multiple = true, number_of_values = 1)] + cpu_features: Vec, +} + +impl Compile { + /// Runs logic for the `compile` subcommand + pub fn execute(&self) -> Result<()> { + self.inner_execute() + .context(format!("failed to compile `{}`", self.path.display())) + } + + pub(crate) fn get_recommend_extension( + engine_type: &EngineType, + target_triple: &Triple, + ) -> Result<&'static str> { + Ok(match engine_type { + #[cfg(feature = "dylib")] + EngineType::Dylib => { + wasmer_engine_dylib::DylibArtifact::get_default_extension(target_triple) + } + #[cfg(feature = "universal")] + EngineType::Universal => { + wasmer_engine_universal::UniversalArtifact::get_default_extension(target_triple) + } + #[cfg(feature = "staticlib")] + EngineType::Staticlib => { + wasmer_engine_staticlib::StaticlibArtifact::get_default_extension(target_triple) + } + #[cfg(not(all(feature = "dylib", feature = "universal", feature = "staticlib")))] + _ => bail!("selected engine type is not compiled in"), + }) + } + + fn inner_execute(&self) -> Result<()> { + let target = self + .target_triple + .as_ref() + .map(|target_triple| { + let mut features = self + .cpu_features + .clone() + .into_iter() + .fold(CpuFeature::set(), |a, b| a | b); + // Cranelift requires SSE2, so we have this "hack" for now to facilitate + // usage + features |= CpuFeature::SSE2; + Target::new(target_triple.clone(), features) + }) + .unwrap_or_default(); + let (store, engine_type, compiler_type) = + self.store.get_store_for_target(target.clone())?; + let output_filename = self + .output + .file_stem() + .map(|osstr| osstr.to_string_lossy().to_string()) + .unwrap_or_default(); + let recommended_extension = Self::get_recommend_extension(&engine_type, target.triple())?; + match self.output.extension() { + Some(ext) => { + if ext != recommended_extension { + warning!("the output file has a wrong extension. We recommend using `{}.{}` for the chosen target", &output_filename, &recommended_extension) + } + } + None => { + warning!("the output file has no extension. We recommend using `{}.{}` for the chosen target", &output_filename, &recommended_extension) + } + } + println!("Engine: {}", engine_type.to_string()); + println!("Compiler: {}", compiler_type.to_string()); + println!("Target: {}", target.triple()); + + let module = Module::from_file(&store, &self.path)?; + + let _ = module.serialize_to_file(&self.output)?; + eprintln!( + "✔ File compiled successfully to `{}`.", + self.output.display(), + ); + + #[cfg(feature = "staticlib")] + if engine_type == EngineType::Staticlib { + let artifact: &wasmer_engine_staticlib::StaticlibArtifact = + module.artifact().as_ref().downcast_ref().context("Engine type is Staticlib but could not downcast artifact into StaticlibArtifact")?; + let symbol_registry = artifact.symbol_registry(); + let metadata_length = artifact.metadata_length(); + let module_info = module.info(); + let header_file_src = crate::c_gen::staticlib_header::generate_header_file( + module_info, + symbol_registry, + metadata_length, + ); + + let header_path = self.header_path.as_ref().cloned().unwrap_or_else(|| { + let mut hp = PathBuf::from( + self.path + .file_stem() + .map(|fs| fs.to_string_lossy().to_string()) + .unwrap_or_else(|| "wasm_out".to_string()), + ); + hp.set_extension("h"); + hp + }); + // for C code + let mut header = std::fs::OpenOptions::new() + .create(true) + .truncate(true) + .write(true) + .open(&header_path)?; + + use std::io::Write; + header.write_all(header_file_src.as_bytes())?; + eprintln!( + "✔ Header file generated successfully at `{}`.", + header_path.display(), + ); + } + Ok(()) + } +} diff --git a/lib/cli/src/commands/config.rs b/lib/cli/src/commands/config.rs new file mode 100644 index 0000000000..0ded2bf454 --- /dev/null +++ b/lib/cli/src/commands/config.rs @@ -0,0 +1,102 @@ +use crate::VERSION; +use anyhow::{Context, Result}; +use std::env; +use std::path::PathBuf; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +/// The options for the `wasmer config` subcommand +pub struct Config { + /// Print the installation prefix. + #[structopt(long, conflicts_with = "pkg-config")] + prefix: bool, + + /// Directory containing Wasmer executables. + #[structopt(long, conflicts_with = "pkg-config")] + bindir: bool, + + /// Directory containing Wasmer headers. + #[structopt(long, conflicts_with = "pkg-config")] + includedir: bool, + + /// Directory containing Wasmer libraries. + #[structopt(long, conflicts_with = "pkg-config")] + libdir: bool, + + /// Libraries needed to link against Wasmer components. + #[structopt(long, conflicts_with = "pkg-config")] + libs: bool, + + /// C compiler flags for files that include Wasmer headers. + #[structopt(long, conflicts_with = "pkg-config")] + cflags: bool, + + /// It outputs the necessary details for compiling + /// and linking a program to Wasmer, using the `pkg-config` format. + #[structopt(long)] + pkg_config: bool, +} + +impl Config { + /// Runs logic for the `config` subcommand + pub fn execute(&self) -> Result<()> { + self.inner_execute() + .context("failed to retrieve the wasmer config".to_string()) + } + fn inner_execute(&self) -> Result<()> { + let key = "WASMER_DIR"; + let wasmer_dir = env::var(key) + .or_else(|e| { + option_env!("WASMER_INSTALL_PREFIX") + .map(str::to_string) + .ok_or(e) + }) + .context(format!( + "failed to retrieve the {} environment variables", + key + ))?; + + let prefix = PathBuf::from(wasmer_dir); + + let prefixdir = prefix.display().to_string(); + let bindir = prefix.join("bin").display().to_string(); + let includedir = prefix.join("include").display().to_string(); + let libdir = prefix.join("lib").display().to_string(); + let cflags = format!("-I{}", includedir); + let libs = format!("-L{} -lwasmer", libdir); + + if self.pkg_config { + println!("prefix={}", prefixdir); + println!("exec_prefix={}", bindir); + println!("includedir={}", includedir); + println!("libdir={}", libdir); + println!(); + println!("Name: wasmer"); + println!("Description: The Wasmer library for running WebAssembly"); + println!("Version: {}", VERSION); + println!("Cflags: {}", cflags); + println!("Libs: {}", libs); + return Ok(()); + } + + if self.prefix { + println!("{}", prefixdir); + } + if self.bindir { + println!("{}", bindir); + } + if self.includedir { + println!("{}", includedir); + } + if self.libdir { + println!("{}", libdir); + } + if self.libs { + println!("{}", libs); + } + if self.cflags { + println!("{}", cflags); + } + Ok(()) + } +} diff --git a/lib/cli/src/commands/create_exe.rs b/lib/cli/src/commands/create_exe.rs new file mode 100644 index 0000000000..bfdded6fab --- /dev/null +++ b/lib/cli/src/commands/create_exe.rs @@ -0,0 +1,316 @@ +//! Create a standalone native executable for a given Wasm file. + +use crate::store::{CompilerOptions, EngineType}; +use anyhow::{Context, Result}; +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::Command; +use structopt::StructOpt; +use wasmer::*; + +const WASMER_MAIN_C_SOURCE: &[u8] = include_bytes!("wasmer_create_exe_main.c"); + +#[derive(Debug, StructOpt)] +/// The options for the `wasmer create-exe` subcommand +pub struct CreateExe { + /// Input file + #[structopt(name = "FILE", parse(from_os_str))] + path: PathBuf, + + /// Output file + #[structopt(name = "OUTPUT PATH", short = "o", parse(from_os_str))] + output: PathBuf, + + /// Compilation Target triple + #[structopt(long = "target")] + target_triple: Option, + + #[structopt(flatten)] + compiler: CompilerOptions, + + #[structopt(short = "m", multiple = true, number_of_values = 1)] + cpu_features: Vec, + + /// Additional libraries to link against. + /// This is useful for fixing linker errors that may occur on some systems. + #[structopt(short = "l", multiple = true, number_of_values = 1)] + libraries: Vec, +} + +impl CreateExe { + /// Runs logic for the `compile` subcommand + pub fn execute(&self) -> Result<()> { + let target = self + .target_triple + .as_ref() + .map(|target_triple| { + let mut features = self + .cpu_features + .clone() + .into_iter() + .fold(CpuFeature::set(), |a, b| a | b); + // Cranelift requires SSE2, so we have this "hack" for now to facilitate + // usage + features |= CpuFeature::SSE2; + Target::new(target_triple.clone(), features) + }) + .unwrap_or_default(); + let engine_type = EngineType::Staticlib; + let (store, compiler_type) = self + .compiler + .get_store_for_target_and_engine(target.clone(), engine_type)?; + + println!("Engine: {}", engine_type.to_string()); + println!("Compiler: {}", compiler_type.to_string()); + println!("Target: {}", target.triple()); + + let working_dir = tempfile::tempdir()?; + let starting_cd = env::current_dir()?; + let output_path = starting_cd.join(&self.output); + env::set_current_dir(&working_dir)?; + + #[cfg(not(windows))] + let wasm_object_path = PathBuf::from("wasm.o"); + #[cfg(windows)] + let wasm_object_path = PathBuf::from("wasm.obj"); + + let wasm_module_path = starting_cd.join(&self.path); + + let module = + Module::from_file(&store, &wasm_module_path).context("failed to compile Wasm")?; + let _ = module.serialize_to_file(&wasm_object_path)?; + + let artifact: &wasmer_engine_staticlib::StaticlibArtifact = + module.artifact().as_ref().downcast_ref().context( + "Engine type is Staticlib but could not downcast artifact into StaticlibArtifact", + )?; + let symbol_registry = artifact.symbol_registry(); + let metadata_length = artifact.metadata_length(); + let module_info = module.info(); + let header_file_src = crate::c_gen::staticlib_header::generate_header_file( + module_info, + symbol_registry, + metadata_length, + ); + + generate_header(header_file_src.as_bytes())?; + self.compile_c(wasm_object_path, output_path)?; + + eprintln!( + "✔ Native executable compiled successfully to `{}`.", + self.output.display(), + ); + + Ok(()) + } + + fn compile_c(&self, wasm_object_path: PathBuf, output_path: PathBuf) -> anyhow::Result<()> { + use std::io::Write; + + // write C src to disk + let c_src_path = Path::new("wasmer_main.c"); + #[cfg(not(windows))] + let c_src_obj = PathBuf::from("wasmer_main.o"); + #[cfg(windows)] + let c_src_obj = PathBuf::from("wasmer_main.obj"); + + { + let mut c_src_file = fs::OpenOptions::new() + .create_new(true) + .write(true) + .open(&c_src_path) + .context("Failed to open C source code file")?; + c_src_file.write_all(WASMER_MAIN_C_SOURCE)?; + } + run_c_compile(&c_src_path, &c_src_obj, self.target_triple.clone()) + .context("Failed to compile C source code")?; + LinkCode { + object_paths: vec![c_src_obj, wasm_object_path], + output_path, + additional_libraries: self.libraries.clone(), + target: self.target_triple.clone(), + ..Default::default() + } + .run() + .context("Failed to link objects together")?; + + Ok(()) + } +} + +fn generate_header(header_file_src: &[u8]) -> anyhow::Result<()> { + let header_file_path = Path::new("my_wasm.h"); + let mut header = std::fs::OpenOptions::new() + .create(true) + .truncate(true) + .write(true) + .open(&header_file_path)?; + + use std::io::Write; + header.write_all(header_file_src)?; + + Ok(()) +} + +fn get_wasmer_dir() -> anyhow::Result { + Ok(PathBuf::from( + env::var("WASMER_DIR") + .or_else(|e| { + option_env!("WASMER_INSTALL_PREFIX") + .map(str::to_string) + .ok_or(e) + }) + .context("Trying to read env var `WASMER_DIR`")?, + )) +} + +fn get_wasmer_include_directory() -> anyhow::Result { + let mut path = get_wasmer_dir()?; + path.push("include"); + Ok(path) +} + +/// path to the static libwasmer +fn get_libwasmer_path() -> anyhow::Result { + let mut path = get_wasmer_dir()?; + path.push("lib"); + + // TODO: prefer headless Wasmer if/when it's a separate library. + #[cfg(not(windows))] + path.push("libwasmer.a"); + #[cfg(windows)] + path.push("wasmer.lib"); + + Ok(path) +} + +/// Compile the C code. +fn run_c_compile( + path_to_c_src: &Path, + output_name: &Path, + target: Option, +) -> anyhow::Result<()> { + #[cfg(not(windows))] + let c_compiler = "cc"; + // We must use a C++ compiler on Windows because wasm.h uses `static_assert` + // which isn't available in `clang` on Windows. + #[cfg(windows)] + let c_compiler = "clang++"; + + let mut command = Command::new(c_compiler); + let command = command + .arg("-O2") + .arg("-c") + .arg(path_to_c_src) + .arg("-I") + .arg(get_wasmer_include_directory()?); + + let command = if let Some(target) = target { + command.arg("-target").arg(format!("{}", target)) + } else { + command + }; + + let output = command.arg("-o").arg(output_name).output()?; + + if !output.status.success() { + bail!( + "C code compile failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + Ok(()) +} + +/// Data used to run a linking command for generated artifacts. +#[derive(Debug)] +struct LinkCode { + /// Path to the linker used to run the linking command. + linker_path: PathBuf, + /// String used as an optimization flag. + optimization_flag: String, + /// Paths of objects to link. + object_paths: Vec, + /// Additional libraries to link against. + additional_libraries: Vec, + /// Path to the output target. + output_path: PathBuf, + /// Path to the dir containing the static libwasmer library. + libwasmer_path: PathBuf, + /// The target to link the executable for. + target: Option, +} + +impl Default for LinkCode { + fn default() -> Self { + #[cfg(not(windows))] + let linker = "cc"; + #[cfg(windows)] + let linker = "clang"; + Self { + linker_path: PathBuf::from(linker), + optimization_flag: String::from("-O2"), + object_paths: vec![], + additional_libraries: vec![], + output_path: PathBuf::from("a.out"), + libwasmer_path: get_libwasmer_path().unwrap(), + target: None, + } + } +} + +impl LinkCode { + fn run(&self) -> anyhow::Result<()> { + let mut command = Command::new(&self.linker_path); + let command = command + .arg(&self.optimization_flag) + .args( + self.object_paths + .iter() + .map(|path| path.canonicalize().unwrap()), + ) + .arg( + &self + .libwasmer_path + .canonicalize() + .context("Failed to find libwasmer")?, + ); + let command = if let Some(target) = &self.target { + command.arg("-target").arg(format!("{}", target)) + } else { + command + }; + // Add libraries required per platform. + // We need userenv, sockets (Ws2_32), advapi32 for some system calls and bcrypt for random numbers. + #[cfg(windows)] + let command = command + .arg("-luserenv") + .arg("-lWs2_32") + .arg("-ladvapi32") + .arg("-lbcrypt"); + // On unix we need dlopen-related symbols, libmath for a few things, and pthreads. + #[cfg(not(windows))] + let command = command.arg("-ldl").arg("-lm").arg("-pthread"); + let link_aganist_extra_libs = self + .additional_libraries + .iter() + .map(|lib| format!("-l{}", lib)); + let command = command.args(link_aganist_extra_libs); + let output = command.arg("-o").arg(&self.output_path).output()?; + + if !output.status.success() { + bail!( + "linking failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + Ok(()) + } +} diff --git a/lib/cli/src/commands/inspect.rs b/lib/cli/src/commands/inspect.rs new file mode 100644 index 0000000000..4858c749ac --- /dev/null +++ b/lib/cli/src/commands/inspect.rs @@ -0,0 +1,74 @@ +use crate::store::StoreOptions; +use anyhow::{Context, Result}; +use bytesize::ByteSize; +use std::path::PathBuf; +use structopt::StructOpt; +use wasmer::*; + +#[derive(Debug, StructOpt)] +/// The options for the `wasmer validate` subcommand +pub struct Inspect { + /// File to validate as WebAssembly + #[structopt(name = "FILE", parse(from_os_str))] + path: PathBuf, + + #[structopt(flatten)] + store: StoreOptions, +} + +impl Inspect { + /// Runs logic for the `validate` subcommand + pub fn execute(&self) -> Result<()> { + self.inner_execute() + .context(format!("failed to inspect `{}`", self.path.display())) + } + fn inner_execute(&self) -> Result<()> { + let (store, _engine_type, _compiler_type) = self.store.get_store()?; + let module_contents = std::fs::read(&self.path)?; + let module = Module::new(&store, &module_contents)?; + println!( + "Type: {}", + if !is_wasm(&module_contents) { + "wat" + } else { + "wasm" + } + ); + println!("Size: {}", ByteSize(module_contents.len() as _)); + println!("Imports:"); + println!(" Functions:"); + for f in module.imports().functions() { + println!(" \"{}\".\"{}\": {}", f.module(), f.name(), f.ty()); + } + println!(" Memories:"); + for f in module.imports().memories() { + println!(" \"{}\".\"{}\": {}", f.module(), f.name(), f.ty()); + } + println!(" Tables:"); + for f in module.imports().tables() { + println!(" \"{}\".\"{}\": {}", f.module(), f.name(), f.ty()); + } + println!(" Globals:"); + for f in module.imports().globals() { + println!(" \"{}\".\"{}\": {}", f.module(), f.name(), f.ty()); + } + println!("Exports:"); + println!(" Functions:"); + for f in module.exports().functions() { + println!(" \"{}\": {}", f.name(), f.ty()); + } + println!(" Memories:"); + for f in module.exports().memories() { + println!(" \"{}\": {}", f.name(), f.ty()); + } + println!(" Tables:"); + for f in module.exports().tables() { + println!(" \"{}\": {}", f.name(), f.ty()); + } + println!(" Globals:"); + for f in module.exports().globals() { + println!(" \"{}\": {}", f.name(), f.ty()); + } + Ok(()) + } +} diff --git a/lib/cli/src/commands/run.rs b/lib/cli/src/commands/run.rs new file mode 100644 index 0000000000..7c04a8df1a --- /dev/null +++ b/lib/cli/src/commands/run.rs @@ -0,0 +1,360 @@ +use crate::common::get_cache_dir; +#[cfg(feature = "debug")] +use crate::logging; +use crate::store::{CompilerType, EngineType, StoreOptions}; +use crate::suggestions::suggest_function_exports; +use crate::warning; +use anyhow::{anyhow, Context, Result}; +use std::path::PathBuf; +use std::str::FromStr; +use wasmer::*; + +use structopt::StructOpt; + +#[derive(Debug, StructOpt, Clone, Default)] +/// The options for the `wasmer run` subcommand +pub struct Run { + /// File to run + #[structopt(name = "FILE", parse(from_os_str))] + path: PathBuf, + + /// Invoke a specified function + #[structopt(long = "invoke", short = "i")] + invoke: Option, + + /// The command name is a string that will override the first argument passed + /// to the wasm program. This is used in wapm to provide nicer output in + /// help commands and error messages of the running wasm program + #[structopt(long = "command-name", hidden = true)] + command_name: Option, + + #[structopt(flatten)] + store: StoreOptions, + + /// Enable debug output + #[cfg(feature = "debug")] + #[structopt(long = "debug", short = "d")] + debug: bool, + + #[cfg(feature = "debug")] + #[structopt(short, long, parse(from_occurrences))] + verbose: u8, + + /// Application arguments + #[structopt(value_name = "ARGS")] + args: Vec, +} + +impl Run { + /// Execute the run command + pub fn execute(&self) -> Result<()> { + #[cfg(feature = "debug")] + if self.debug { + logging::set_up_logging(self.verbose).unwrap(); + } + self.inner_execute().with_context(|| { + format!( + "failed to run `{}`{}", + self.path.display(), + if CompilerType::enabled().is_empty() { + " (no compilers enabled)" + } else { + "" + } + ) + }) + } + + fn inner_execute(&self) -> Result<()> { + let module = self.get_module()?; + let instance = Instance::new(&module, &imports! {})?; + + // If this module exports an _initialize function, run that first. + if let Ok(initialize) = instance.exports.get_function("_initialize") { + initialize + .call(&[]) + .with_context(|| "failed to run _initialize function")?; + } + + // Do we want to invoke a function? + if let Some(ref invoke) = self.invoke { + let imports = imports! {}; + let instance = Instance::new(&module, &imports)?; + let result = self.invoke_function(&instance, &invoke, &self.args)?; + println!( + "{}", + result + .iter() + .map(|val| val.to_string()) + .collect::>() + .join(" ") + ); + } else { + let start: Function = self.try_find_function(&instance, "_start", &[])?; + let result = start.call(&[]); + result?; + } + + Ok(()) + } + + fn get_module(&self) -> Result { + let contents = std::fs::read(self.path.clone())?; + #[cfg(feature = "universal")] + { + use wasmer_engine_universal::{Universal, UniversalArtifact, UniversalExecutable}; + + if UniversalExecutable::verify_serialized(&contents) { + unsafe { + let executable = UniversalExecutable::archive_from_slice(&contents)?; + let engine = wasmer_engine_universal::Universal::headless().engine(); + let artifact = engine.load(&executable); + let store = Store::new(&engine); + let module = unsafe { Module::deserialize_from_file(&store, &self.path)? }; + } + return Ok(module); + } + } + let (store, engine_type, compiler_type) = self.store.get_store()?; + let module_result = Module::new(&store, &contents); + + let mut module = module_result.with_context(|| { + format!( + "module instantiation failed (engine: {}, compiler: {})", + engine_type.to_string(), + compiler_type.to_string() + ) + })?; + // We set the name outside the cache, to make sure we dont cache the name + module.set_name(&self.path.file_name().unwrap_or_default().to_string_lossy()); + + Ok(module) + } + + #[cfg(feature = "cache")] + fn get_module_from_cache( + &self, + store: &Store, + contents: &[u8], + engine_type: &EngineType, + compiler_type: &CompilerType, + ) -> Result { + // We try to get it from cache, in case caching is enabled + // and the file length is greater than 4KB. + // For files smaller than 4KB caching is not worth, + // as it takes space and the speedup is minimal. + let mut cache = self.get_cache(engine_type, compiler_type)?; + // Try to get the hash from the provided `--cache-key`, otherwise + // generate one from the provided file `.wasm` contents. + let hash = self + .cache_key + .as_ref() + .and_then(|key| Hash::from_str(&key).ok()) + .unwrap_or_else(|| Hash::generate(&contents)); + match unsafe { cache.load(&store, hash) } { + Ok(module) => Ok(module), + Err(e) => { + match e { + DeserializeError::Io(_) => { + // Do not notify on IO errors + } + err => { + warning!("cached module is corrupted: {}", err); + } + } + let module = Module::new(&store, &contents)?; + // Store the compiled Module in cache + cache.store(hash, &module)?; + Ok(module) + } + } + } + + #[cfg(feature = "cache")] + /// Get the Compiler Filesystem cache + fn get_cache( + &self, + engine_type: &EngineType, + compiler_type: &CompilerType, + ) -> Result { + let mut cache_dir_root = get_cache_dir(); + cache_dir_root.push(compiler_type.to_string()); + let mut cache = FileSystemCache::new(cache_dir_root)?; + + // Important: Dylib files need to have a `.dll` extension on + // Windows, otherwise they will not load, so we just add an + // extension always to make it easier to recognize as well. + #[allow(unreachable_patterns)] + let extension = match *engine_type { + #[cfg(feature = "dylib")] + EngineType::Dylib => { + wasmer_engine_dylib::DylibArtifact::get_default_extension(&Triple::host()) + .to_string() + } + #[cfg(feature = "universal")] + EngineType::Universal => { + wasmer_engine_universal::UniversalArtifact::get_default_extension(&Triple::host()) + .to_string() + } + // We use the compiler type as the default extension + _ => compiler_type.to_string(), + }; + cache.set_cache_extension(Some(extension)); + Ok(cache) + } + + fn try_find_function( + &self, + instance: &Instance, + name: &str, + args: &[String], + ) -> Result { + Ok(instance + .exports + .get_function(&name) + .map_err(|e| { + if instance.module().info().functions.is_empty() { + anyhow!("The module has no exported functions to call.") + } else { + let suggested_functions = suggest_function_exports(instance.module(), ""); + let names = suggested_functions + .iter() + .take(3) + .map(|arg| format!("`{}`", arg)) + .collect::>() + .join(", "); + let suggested_command = format!( + "wasmer {} -i {} {}", + self.path.display(), + suggested_functions.get(0).unwrap_or(&String::new()), + args.join(" ") + ); + let suggestion = if suggested_functions.len() == 0 { + String::from("Can not find any export functions.") + } else { + format!( + "Similar functions found: {}.\nTry with: {}", + names, suggested_command + ) + }; + match e { + ExportError::Missing(_) => { + anyhow!("No export `{}` found in the module.\n{}", name, suggestion) + } + ExportError::IncompatibleType => anyhow!( + "Export `{}` found, but is not a function.\n{}", + name, + suggestion + ), + } + } + })? + .clone()) + } + + fn invoke_function( + &self, + instance: &Instance, + invoke: &str, + args: &[String], + ) -> Result> { + let func: Function = self.try_find_function(&instance, invoke, args)?; + let func_ty = func.ty(); + let required_arguments = func_ty.params().len(); + let provided_arguments = args.len(); + if required_arguments != provided_arguments { + bail!( + "Function expected {} arguments, but received {}: \"{}\"", + required_arguments, + provided_arguments, + self.args.join(" ") + ); + } + let invoke_args = args + .iter() + .zip(func_ty.params().iter()) + .map(|(arg, param_type)| match param_type { + ValType::I32 => { + Ok(Val::I32(arg.parse().map_err(|_| { + anyhow!("Can't convert `{}` into a i32", arg) + })?)) + } + ValType::I64 => { + Ok(Val::I64(arg.parse().map_err(|_| { + anyhow!("Can't convert `{}` into a i64", arg) + })?)) + } + ValType::F32 => { + Ok(Val::F32(arg.parse().map_err(|_| { + anyhow!("Can't convert `{}` into a f32", arg) + })?)) + } + ValType::F64 => { + Ok(Val::F64(arg.parse().map_err(|_| { + anyhow!("Can't convert `{}` into a f64", arg) + })?)) + } + _ => Err(anyhow!( + "Don't know how to convert {} into {:?}", + arg, + param_type + )), + }) + .collect::>>()?; + Ok(func.call(&invoke_args)?) + } + + /// Create Run instance for arguments/env, + /// assuming we're being run from a CFP binfmt interpreter. + pub fn from_binfmt_args() -> Run { + Self::from_binfmt_args_fallible().unwrap_or_else(|e| { + crate::error::PrettyError::report::<()>( + Err(e).context("Failed to set up wasmer binfmt invocation"), + ) + }) + } + + #[cfg(target_os = "linux")] + fn from_binfmt_args_fallible() -> Result { + let argv = std::env::args_os().collect::>(); + let (_interpreter, executable, original_executable, args) = match &argv[..] { + [a, b, c, d @ ..] => (a, b, c, d), + _ => { + bail!("Wasmer binfmt interpreter needs at least three arguments (including $0) - must be registered as binfmt interpreter with the CFP flags. (Got arguments: {:?})", argv); + } + }; + // TODO: Optimally, args and env would be passed as an UTF-8 Vec. + // (Can be pulled out of std::os::unix::ffi::OsStrExt) + // But I don't want to duplicate or rewrite run.rs today. + let args = args + .iter() + .enumerate() + .map(|(i, s)| { + s.clone().into_string().map_err(|s| { + anyhow!( + "Cannot convert argument {} ({:?}) to UTF-8 string", + i + 1, + s + ) + }) + }) + .collect::>>()?; + let original_executable = original_executable + .clone() + .into_string() + .map_err(|s| anyhow!("Cannot convert executable name {:?} to UTF-8 string", s))?; + let store = StoreOptions::default(); + // TODO: store.compiler.features.all = true; ? + Ok(Self { + args, + path: executable.into(), + command_name: Some(original_executable), + store, + ..Self::default() + }) + } + #[cfg(not(target_os = "linux"))] + fn from_binfmt_args_fallible() -> Result { + bail!("binfmt_misc is only available on linux.") + } +} diff --git a/lib/cli/src/commands/self_update.rs b/lib/cli/src/commands/self_update.rs new file mode 100644 index 0000000000..2218107c3f --- /dev/null +++ b/lib/cli/src/commands/self_update.rs @@ -0,0 +1,39 @@ +//! When wasmer self-update is executed, this is what gets executed +use anyhow::{Context, Result}; +#[cfg(not(target_os = "windows"))] +use std::process::{Command, Stdio}; +use structopt::StructOpt; + +/// The options for the `wasmer self-update` subcommand +#[derive(Debug, StructOpt)] +pub struct SelfUpdate {} + +impl SelfUpdate { + /// Runs logic for the `self-update` subcommand + pub fn execute(&self) -> Result<()> { + self.inner_execute().context("failed to self-update wasmer") + } + + #[cfg(not(target_os = "windows"))] + fn inner_execute(&self) -> Result<()> { + println!("Fetching latest installer"); + let cmd = Command::new("curl") + .arg("https://get.wasmer.io") + .arg("-sSfL") + .stdout(Stdio::piped()) + .spawn()?; + + let mut process = Command::new("sh") + .stdin(cmd.stdout.unwrap()) + .stdout(Stdio::inherit()) + .spawn()?; + + process.wait().unwrap(); + Ok(()) + } + + #[cfg(target_os = "windows")] + fn inner_execute(&self) -> Result<()> { + bail!("Self update is not supported on Windows. Use install instructions on the Wasmer homepage: https://wasmer.io"); + } +} diff --git a/lib/cli/src/commands/validate.rs b/lib/cli/src/commands/validate.rs new file mode 100644 index 0000000000..fb9554d684 --- /dev/null +++ b/lib/cli/src/commands/validate.rs @@ -0,0 +1,34 @@ +use crate::store::StoreOptions; +use anyhow::{bail, Context, Result}; +use std::path::PathBuf; +use structopt::StructOpt; +use wasmer::*; + +#[derive(Debug, StructOpt)] +/// The options for the `wasmer validate` subcommand +pub struct Validate { + /// File to validate as WebAssembly + #[structopt(name = "FILE", parse(from_os_str))] + path: PathBuf, + + #[structopt(flatten)] + store: StoreOptions, +} + +impl Validate { + /// Runs logic for the `validate` subcommand + pub fn execute(&self) -> Result<()> { + self.inner_execute() + .context(format!("failed to validate `{}`", self.path.display())) + } + fn inner_execute(&self) -> Result<()> { + let (store, _engine_type, _compiler_type) = self.store.get_store()?; + let module_contents = std::fs::read(&self.path)?; + if !is_wasm(&module_contents) { + bail!("`wasmer validate` only validates WebAssembly files"); + } + Module::validate(&store, &module_contents)?; + eprintln!("Validation passed for `{}`.", self.path.display()); + Ok(()) + } +} diff --git a/lib/cli/src/commands/wasmer_create_exe_main.c b/lib/cli/src/commands/wasmer_create_exe_main.c new file mode 100644 index 0000000000..c77fcab9dd --- /dev/null +++ b/lib/cli/src/commands/wasmer_create_exe_main.c @@ -0,0 +1,169 @@ +#include "wasmer.h" +#include "my_wasm.h" + +#include +#include +#include + +#define own + +// TODO: make this define templated so that the Rust code can toggle it on/off +#define WASI + +static void print_wasmer_error() { + int error_len = wasmer_last_error_length(); + printf("Error len: `%d`\n", error_len); + char *error_str = (char *)malloc(error_len); + wasmer_last_error_message(error_str, error_len); + printf("%s\n", error_str); + free(error_str); +} + +#ifdef WASI +static void pass_mapdir_arg(wasi_config_t *wasi_config, char *mapdir) { + int colon_location = strchr(mapdir, ':') - mapdir; + if (colon_location == 0) { + // error malformed argument + fprintf(stderr, "Expected mapdir argument of the form alias:directory\n"); + exit(-1); + } + + char *alias = (char *)malloc(colon_location + 1); + memcpy(alias, mapdir, colon_location); + alias[colon_location] = '\0'; + + int dir_len = strlen(mapdir) - colon_location; + char *dir = (char *)malloc(dir_len + 1); + memcpy(dir, &mapdir[colon_location + 1], dir_len); + dir[dir_len] = '\0'; + + wasi_config_mapdir(wasi_config, alias, dir); + free(alias); + free(dir); +} + +// We try to parse out `--dir` and `--mapdir` ahead of time and process those +// specially. All other arguments are passed to the guest program. +static void handle_arguments(wasi_config_t *wasi_config, int argc, + char *argv[]) { + for (int i = 1; i < argc; ++i) { + // We probably want special args like `--dir` and `--mapdir` to not be + // passed directly + if (strcmp(argv[i], "--dir") == 0) { + // next arg is a preopen directory + if ((i + 1) < argc) { + i++; + wasi_config_preopen_dir(wasi_config, argv[i]); + } else { + fprintf(stderr, "--dir expects a following argument specifying which " + "directory to preopen\n"); + exit(-1); + } + } else if (strcmp(argv[i], "--mapdir") == 0) { + // next arg is a mapdir + if ((i + 1) < argc) { + i++; + pass_mapdir_arg(wasi_config, argv[i]); + } else { + fprintf(stderr, + "--mapdir expects a following argument specifying which " + "directory to preopen in the form alias:directory\n"); + exit(-1); + } + } else if (strncmp(argv[i], "--dir=", strlen("--dir=")) == 0) { + // this arg is a preopen dir + char *dir = argv[i] + strlen("--dir="); + wasi_config_preopen_dir(wasi_config, dir); + } else if (strncmp(argv[i], "--mapdir=", strlen("--mapdir=")) == 0) { + // this arg is a mapdir + char *mapdir = argv[i] + strlen("--mapdir="); + pass_mapdir_arg(wasi_config, mapdir); + } else { + // guest argument + wasi_config_arg(wasi_config, argv[i]); + } + } +} +#endif + +int main(int argc, char *argv[]) { + wasm_config_t *config = wasm_config_new(); + wasm_config_set_engine(config, STATICLIB); + wasm_engine_t *engine = wasm_engine_new_with_config(config); + wasm_store_t *store = wasm_store_new(engine); + + wasm_module_t *module = wasmer_staticlib_engine_new(store, argv[0]); + + if (!module) { + fprintf(stderr, "Failed to create module\n"); + print_wasmer_error(); + return -1; + } + + // We have now finished the memory buffer book keeping and we have a valid + // Module. + +#ifdef WASI + wasi_config_t *wasi_config = wasi_config_new(argv[0]); + handle_arguments(wasi_config, argc, argv); + + wasi_env_t *wasi_env = wasi_env_new(wasi_config); + if (!wasi_env) { + fprintf(stderr, "Error building WASI env!\n"); + print_wasmer_error(); + return 1; + } +#endif + + wasm_importtype_vec_t import_types; + wasm_module_imports(module, &import_types); + + wasm_extern_vec_t imports; + wasm_extern_vec_new_uninitialized(&imports, import_types.size); + wasm_importtype_vec_delete(&import_types); + +#ifdef WASI + bool get_imports_result = wasi_get_imports(store, module, wasi_env, &imports); + wasi_env_delete(wasi_env); + + if (!get_imports_result) { + fprintf(stderr, "Error getting WASI imports!\n"); + print_wasmer_error(); + + return 1; + } +#endif + + wasm_instance_t *instance = wasm_instance_new(store, module, &imports, NULL); + + if (!instance) { + fprintf(stderr, "Failed to create instance\n"); + print_wasmer_error(); + return -1; + } + +#ifdef WASI + own wasm_func_t *start_function = wasi_get_start_function(instance); + if (!start_function) { + fprintf(stderr, "`_start` function not found\n"); + print_wasmer_error(); + return -1; + } + + wasm_val_vec_t args = WASM_EMPTY_VEC; + wasm_val_vec_t results = WASM_EMPTY_VEC; + own wasm_trap_t *trap = wasm_func_call(start_function, &args, &results); + if (trap) { + fprintf(stderr, "Trap is not NULL: TODO:\n"); + return -1; + } +#endif + + // TODO: handle non-WASI start (maybe with invoke?) + + wasm_instance_delete(instance); + wasm_module_delete(module); + wasm_store_delete(store); + wasm_engine_delete(engine); + return 0; +} diff --git a/lib/cli/src/commands/wast.rs b/lib/cli/src/commands/wast.rs new file mode 100644 index 0000000000..2837069f21 --- /dev/null +++ b/lib/cli/src/commands/wast.rs @@ -0,0 +1,37 @@ +//! Runs a .wast WebAssembly test suites +use crate::store::StoreOptions; +use anyhow::{Context, Result}; +use std::path::PathBuf; +use structopt::StructOpt; +use wasmer_wast::Wast as WastSpectest; + +#[derive(Debug, StructOpt)] +/// The options for the `wasmer wast` subcommand +pub struct Wast { + /// Wast file to run + #[structopt(name = "FILE", parse(from_os_str))] + path: PathBuf, + + #[structopt(flatten)] + store: StoreOptions, + + #[structopt(short, long)] + /// A flag to indicate wast stop at the first error or continue. + fail_fast: bool, +} + +impl Wast { + /// Runs logic for the `validate` subcommand + pub fn execute(&self) -> Result<()> { + self.inner_execute() + .context(format!("failed to test the wast `{}`", self.path.display())) + } + fn inner_execute(&self) -> Result<()> { + let (store, _engine_name, _compiler_name) = self.store.get_store()?; + let mut wast = WastSpectest::new_with_spectest(store); + wast.fail_fast = self.fail_fast; + wast.run_file(&self.path).with_context(|| "tests failed")?; + eprintln!("Wast tests succeeded for `{}`.", self.path.display()); + Ok(()) + } +} diff --git a/lib/cli/src/common.rs b/lib/cli/src/common.rs new file mode 100644 index 0000000000..863e154103 --- /dev/null +++ b/lib/cli/src/common.rs @@ -0,0 +1,53 @@ +//! Common module with common used structures across different +//! commands. +use crate::VERSION; +use std::env; +use std::path::PathBuf; +use structopt::StructOpt; + +#[derive(Debug, StructOpt, Clone, Default)] +/// The WebAssembly features that can be passed through the +/// Command Line args. +pub struct WasmFeatures { + /// Enable support for the SIMD proposal. + #[structopt(long = "enable-simd")] + pub simd: bool, + + /// Enable support for the threads proposal. + #[structopt(long = "enable-threads")] + pub threads: bool, + + /// Enable support for the reference types proposal. + #[structopt(long = "enable-reference-types")] + pub reference_types: bool, + + /// Enable support for the multi value proposal. + #[structopt(long = "enable-multi-value")] + pub multi_value: bool, + + /// Enable support for the bulk memory proposal. + #[structopt(long = "enable-bulk-memory")] + pub bulk_memory: bool, + + /// Enable support for all pre-standard proposals. + #[structopt(long = "enable-all")] + pub all: bool, +} + +/// Get the cache dir +pub fn get_cache_dir() -> PathBuf { + match env::var("WASMER_CACHE_DIR") { + Ok(dir) => { + let mut path = PathBuf::from(dir); + path.push(VERSION); + path + } + Err(_) => { + // We use a temporal directory for saving cache files + let mut temp_dir = env::temp_dir(); + temp_dir.push("wasmer"); + temp_dir.push(VERSION); + temp_dir + } + } +} diff --git a/lib/cli/src/compilers/llvm.rs b/lib/cli/src/compilers/llvm.rs new file mode 100644 index 0000000000..b3c8c8fe6f --- /dev/null +++ b/lib/cli/src/compilers/llvm.rs @@ -0,0 +1,40 @@ +#[derive(Debug, StructOpt, Clone)] +/// LLVM backend flags. +pub struct LLVMCLIOptions { + /// Emit LLVM IR before optimization pipeline. + #[structopt(long = "llvm-pre-opt-ir", parse(from_os_str))] + pre_opt_ir: Option, + + /// Emit LLVM IR after optimization pipeline. + #[structopt(long = "llvm-post-opt-ir", parse(from_os_str))] + post_opt_ir: Option, + + /// Emit LLVM generated native code object file. + #[structopt(long = "llvm-object-file", parse(from_os_str))] + obj_file: Option, +} + +impl LLVMCallbacks for LLVMCLIOptions { + fn preopt_ir_callback(&mut self, module: &InkwellModule) { + if let Some(filename) = &self.pre_opt_ir { + module.print_to_file(filename).unwrap(); + } + } + + fn postopt_ir_callback(&mut self, module: &InkwellModule) { + if let Some(filename) = &self.post_opt_ir { + module.print_to_file(filename).unwrap(); + } + } + + fn obj_memory_buffer_callback(&mut self, memory_buffer: &InkwellMemoryBuffer) { + if let Some(filename) = &self.obj_file { + let mem_buf_slice = memory_buffer.as_slice(); + let mut file = fs::File::create(filename).unwrap(); + let mut pos = 0; + while pos < mem_buf_slice.len() { + pos += file.write(&mem_buf_slice[pos..]).unwrap(); + } + } + } +} diff --git a/lib/cli/src/error.rs b/lib/cli/src/error.rs new file mode 100644 index 0000000000..f23b50339c --- /dev/null +++ b/lib/cli/src/error.rs @@ -0,0 +1,114 @@ +//! Implements `PretyError` to print pretty errors in the CLI (when they happen) + +use anyhow::{Chain, Error}; +use colored::*; +use std::fmt::{self, Debug, Write}; + +/// A `PrettyError` for printing `anyhow::Error` nicely. +pub struct PrettyError { + error: Error, +} + +/// A macro that prints a warning with nice colors +#[macro_export] +macro_rules! warning { + ($($arg:tt)*) => ({ + use colored::*; + eprintln!("{}: {}", "warning".yellow().bold(), format!($($arg)*)); + }) +} + +impl PrettyError { + /// Process a `Result` printing any errors and exiting + /// the process after + pub fn report(result: Result) -> ! { + std::process::exit(match result { + Ok(_t) => 0, + Err(error) => { + eprintln!("{:?}", PrettyError { error }); + 1 + } + }); + } +} + +impl Debug for PrettyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let error = &self.error; + + if f.alternate() { + return Debug::fmt(&error, f); + } + + write!(f, "{}", format!("{}: {}", "error".red(), error).bold())?; + // write!(f, "{}", error)?; + + if let Some(cause) = error.source() { + // write!(f, "\n{}:", "caused by".bold().blue())?; + let chain = Chain::new(cause); + let (total_errors, _) = chain.size_hint(); + for (n, error) in chain.enumerate() { + writeln!(f)?; + let mut indented = Indented { + inner: f, + number: Some(n + 1), + is_last: n == total_errors - 1, + started: false, + }; + write!(indented, "{}", error)?; + } + } + Ok(()) + } +} + +struct Indented<'a, D> { + inner: &'a mut D, + number: Option, + started: bool, + is_last: bool, +} + +impl Write for Indented<'_, T> +where + T: Write, +{ + fn write_str(&mut self, s: &str) -> fmt::Result { + for (i, line) in s.split('\n').enumerate() { + if !self.started { + self.started = true; + match self.number { + Some(number) => { + if !self.is_last { + write!( + self.inner, + "{} {: >4} ", + "│".bold().blue(), + format!("{}:", number).dimmed() + )? + } else { + write!( + self.inner, + "{}{: >2}: ", + "╰─▶".bold().blue(), + format!("{}", number).bold().blue() + )? + } + } + None => self.inner.write_str(" ")?, + } + } else if i > 0 { + self.inner.write_char('\n')?; + if self.number.is_some() { + self.inner.write_str(" ")?; + } else { + self.inner.write_str(" ")?; + } + } + + self.inner.write_str(line)?; + } + + Ok(()) + } +} diff --git a/lib/cli/src/lib.rs b/lib/cli/src/lib.rs new file mode 100644 index 0000000000..7a80b9fae6 --- /dev/null +++ b/lib/cli/src/lib.rs @@ -0,0 +1,32 @@ +//! The Wasmer binary lib + +#![deny( + missing_docs, + dead_code, + nonstandard_style, + unused_mut, + unused_variables, + unused_unsafe, + unreachable_patterns, + unstable_features +)] +#![doc(html_favicon_url = "https://wasmer.io/images/icons/favicon-32x32.png")] +#![doc(html_logo_url = "https://github.com/wasmerio.png?size=200")] + +#[macro_use] +extern crate anyhow; + +pub mod commands; +pub mod common; +#[macro_use] +pub mod error; +pub mod c_gen; +pub mod cli; +#[cfg(feature = "debug")] +pub mod logging; +pub mod store; +pub mod suggestions; +pub mod utils; + +/// Version number of this crate. +pub const VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/lib/cli/src/logging.rs b/lib/cli/src/logging.rs new file mode 100644 index 0000000000..d60a45fbf3 --- /dev/null +++ b/lib/cli/src/logging.rs @@ -0,0 +1,68 @@ +//! Logging functions for the debug feature. +use crate::utils::wasmer_should_print_color; +use anyhow::Result; +use fern::colors::{Color, ColoredLevelConfig}; +use std::time; + +/// The debug level +pub type DebugLevel = log::LevelFilter; + +/// Subroutine to instantiate the loggers +pub fn set_up_logging(verbose: u8) -> Result<(), String> { + let colors_line = ColoredLevelConfig::new() + .error(Color::Red) + .warn(Color::Yellow) + .trace(Color::BrightBlack); + let should_color = wasmer_should_print_color(); + + let colors_level = colors_line.info(Color::Green); + let level = match verbose { + 1 => DebugLevel::Debug, + _ => DebugLevel::Trace, + }; + let dispatch = fern::Dispatch::new() + .level(level) + .chain({ + let base = if should_color { + fern::Dispatch::new().format(move |out, message, record| { + let time = time::SystemTime::now().duration_since(time::UNIX_EPOCH).expect("Can't get time"); + out.finish(format_args!( + "{color_line}[{seconds}.{millis} {level} {target}{color_line}]{ansi_close} {message}", + color_line = format_args!( + "\x1B[{}m", + colors_line.get_color(&record.level()).to_fg_str() + ), + seconds = time.as_secs(), + millis = time.subsec_millis(), + level = colors_level.color(record.level()), + target = record.target(), + ansi_close = "\x1B[0m", + message = message, + )); + }) + } else { + // default formatter without color + fern::Dispatch::new().format(move |out, message, record| { + let time = time::SystemTime::now().duration_since(time::UNIX_EPOCH).expect("Can't get time"); + out.finish(format_args!( + "[{seconds}.{millis} {level} {target}] {message}", + seconds = time.as_secs(), + millis = time.subsec_millis(), + level = record.level(), + target = record.target(), + message = message, + )); + }) + }; + + base + .filter(|metadata| { + metadata.target().starts_with("wasmer") + }) + .chain(std::io::stdout()) + }); + + dispatch.apply().map_err(|e| format!("{}", e))?; + + Ok(()) +} diff --git a/lib/cli/src/store.rs b/lib/cli/src/store.rs new file mode 100644 index 0000000000..c53770aa2d --- /dev/null +++ b/lib/cli/src/store.rs @@ -0,0 +1,488 @@ +//! Common module with common used structures across different +//! commands. + +use crate::common::WasmFeatures; +use anyhow::Result; +#[cfg(feature = "llvm")] +use std::path::PathBuf; +use std::string::ToString; +#[allow(unused_imports)] +use std::sync::Arc; +use structopt::StructOpt; +use wasmer::*; +#[cfg(feature = "compiler")] +use wasmer_compiler::CompilerConfig; + +#[derive(Debug, Clone, StructOpt, Default)] +/// The compiler and engine options +pub struct StoreOptions { + #[cfg(feature = "compiler")] + #[structopt(flatten)] + compiler: CompilerOptions, + + /// Use the Universal Engine. + #[structopt(long, conflicts_with_all = &["dylib", "staticlib", "jit", "native", "object_file"])] + universal: bool, + + /// Use the Dylib Engine. + #[structopt(long, conflicts_with_all = &["universal", "staticlib", "jit", "native", "object_file"])] + dylib: bool, + + /// Use the Staticlib Engine. + #[structopt(long, conflicts_with_all = &["universal", "dylib", "jit", "native", "object_file"])] + staticlib: bool, + + /// Use the JIT (Universal) Engine. + #[structopt(long, hidden = true, conflicts_with_all = &["universal", "dylib", "staticlib", "native", "object_file"])] + jit: bool, + + /// Use the Native (Dylib) Engine. + #[structopt(long, hidden = true, conflicts_with_all = &["universal", "dylib", "staticlib", "jit", "object_file"])] + native: bool, + + /// Use the ObjectFile (Staticlib) Engine. + #[structopt(long, hidden = true, conflicts_with_all = &["universal", "dylib", "staticlib", "jit", "native"])] + object_file: bool, +} + +#[cfg(feature = "compiler")] +#[derive(Debug, Clone, StructOpt, Default)] +/// The compiler options +pub struct CompilerOptions { + /// Use Singlepass compiler. + #[structopt(long, conflicts_with_all = &["cranelift", "llvm"])] + singlepass: bool, + + /// Use Cranelift compiler. + #[structopt(long, conflicts_with_all = &["singlepass", "llvm"])] + cranelift: bool, + + /// Use LLVM compiler. + #[structopt(long, conflicts_with_all = &["singlepass", "cranelift"])] + llvm: bool, + + /// Enable compiler internal verification. + #[structopt(long)] + enable_verifier: bool, + + /// LLVM debug directory, where IR and object files will be written to. + #[cfg(feature = "llvm")] + #[structopt(long, parse(from_os_str))] + llvm_debug_dir: Option, + + #[structopt(flatten)] + features: WasmFeatures, +} + +#[cfg(feature = "compiler")] +impl CompilerOptions { + // depending on compiler flags some branches may end up the same + #[allow(clippy::if_same_then_else)] + fn get_compiler(&self) -> Result { + if self.cranelift { + Ok(CompilerType::Cranelift) + } else if self.llvm { + Ok(CompilerType::LLVM) + } else if self.singlepass { + Ok(CompilerType::Singlepass) + } else { + // Auto mode, we choose the best compiler for that platform + cfg_if::cfg_if! { + if #[cfg(all(feature = "cranelift", any(target_arch = "x86_64", target_arch = "aarch64")))] { + Ok(CompilerType::Cranelift) + } + else if #[cfg(all(feature = "singlepass", target_arch = "x86_64"))] { + Ok(CompilerType::Singlepass) + } + else if #[cfg(feature = "llvm")] { + Ok(CompilerType::LLVM) + } else { + bail!("There are no available compilers for your architecture"); + } + } + } + } + + /// Get the enaled Wasm features. + pub fn get_features(&self, mut features: Features) -> Result { + if self.features.threads || self.features.all { + features.threads(true); + } + if self.features.multi_value || self.features.all { + features.multi_value(true); + } + if self.features.simd || self.features.all { + features.simd(true); + } + if self.features.bulk_memory || self.features.all { + features.bulk_memory(true); + } + if self.features.reference_types || self.features.all { + features.reference_types(true); + } + Ok(features) + } + + /// Gets the Store for a given target and engine. + pub fn get_store_for_target_and_engine( + &self, + target: Target, + engine_type: EngineType, + ) -> Result<(Store, CompilerType)> { + let (compiler_config, compiler_type) = self.get_compiler_config()?; + let engine = self.get_engine_by_type(target, compiler_config, engine_type)?; + let store = Store::new(&*engine); + Ok((store, compiler_type)) + } + + fn get_engine_by_type( + &self, + target: Target, + compiler_config: Box, + engine_type: EngineType, + ) -> Result> { + let features = self.get_features(compiler_config.default_features_for_target(&target))?; + let engine: Box = match engine_type { + #[cfg(feature = "universal")] + EngineType::Universal => Box::new( + wasmer_engine_universal::Universal::new(compiler_config) + .features(features) + .target(target) + .engine(), + ), + #[cfg(feature = "dylib")] + EngineType::Dylib => Box::new( + wasmer_engine_dylib::Dylib::new(compiler_config) + .target(target) + .features(features) + .engine(), + ), + #[cfg(feature = "staticlib")] + EngineType::Staticlib => Box::new( + wasmer_engine_staticlib::Staticlib::new(compiler_config) + .target(target) + .features(features) + .engine(), + ), + #[cfg(not(all(feature = "universal", feature = "dylib", feature = "staticlib")))] + engine => bail!( + "The `{}` engine is not included in this binary.", + engine.to_string() + ), + }; + + Ok(engine) + } + + /// Get the Compiler Config for the current options + #[allow(unused_variables)] + pub(crate) fn get_compiler_config(&self) -> Result<(Box, CompilerType)> { + let compiler = self.get_compiler()?; + let compiler_config: Box = match compiler { + CompilerType::Headless => bail!("The headless engine can't be chosen"), + #[cfg(feature = "singlepass")] + CompilerType::Singlepass => { + let mut config = wasmer_compiler_singlepass::Singlepass::new(); + if self.enable_verifier { + config.enable_verifier(); + } + Box::new(config) + } + #[cfg(feature = "cranelift")] + CompilerType::Cranelift => { + let mut config = wasmer_compiler_cranelift::Cranelift::new(); + if self.enable_verifier { + config.enable_verifier(); + } + Box::new(config) + } + #[cfg(feature = "llvm")] + CompilerType::LLVM => { + use std::fmt; + use std::fs::File; + use std::io::Write; + use wasmer_compiler_llvm::{ + CompiledKind, InkwellMemoryBuffer, InkwellModule, LLVMCallbacks, LLVM, + }; + use wasmer_types::entity::EntityRef; + let mut config = LLVM::new(); + struct Callbacks { + debug_dir: PathBuf, + } + impl Callbacks { + fn new(debug_dir: PathBuf) -> Result { + // Create the debug dir in case it doesn't exist + std::fs::create_dir_all(&debug_dir)?; + Ok(Self { debug_dir }) + } + } + // Converts a kind into a filename, that we will use to dump + // the contents of the IR object file to. + fn types_to_signature(types: &[Type]) -> String { + types + .iter() + .map(|ty| match ty { + Type::I32 => "i".to_string(), + Type::I64 => "I".to_string(), + Type::F32 => "f".to_string(), + Type::F64 => "F".to_string(), + Type::V128 => "v".to_string(), + Type::ExternRef => "e".to_string(), + Type::FuncRef => "r".to_string(), + }) + .collect::>() + .join("") + } + // Converts a kind into a filename, that we will use to dump + // the contents of the IR object file to. + fn function_kind_to_filename(kind: &CompiledKind) -> String { + match kind { + CompiledKind::Local(local_index) => { + format!("function_{}", local_index.index()) + } + CompiledKind::FunctionCallTrampoline(func_type) => format!( + "trampoline_call_{}_{}", + types_to_signature(&func_type.params()), + types_to_signature(&func_type.results()) + ), + CompiledKind::DynamicFunctionTrampoline(func_type) => format!( + "trampoline_dynamic_{}_{}", + types_to_signature(&func_type.params()), + types_to_signature(&func_type.results()) + ), + CompiledKind::Module => "module".into(), + } + } + impl LLVMCallbacks for Callbacks { + fn preopt_ir(&self, kind: &CompiledKind, module: &InkwellModule) { + let mut path = self.debug_dir.clone(); + path.push(format!("{}.preopt.ll", function_kind_to_filename(kind))); + module + .print_to_file(&path) + .expect("Error while dumping pre optimized LLVM IR"); + } + fn postopt_ir(&self, kind: &CompiledKind, module: &InkwellModule) { + let mut path = self.debug_dir.clone(); + path.push(format!("{}.postopt.ll", function_kind_to_filename(kind))); + module + .print_to_file(&path) + .expect("Error while dumping post optimized LLVM IR"); + } + fn obj_memory_buffer( + &self, + kind: &CompiledKind, + memory_buffer: &InkwellMemoryBuffer, + ) { + let mut path = self.debug_dir.clone(); + path.push(format!("{}.o", function_kind_to_filename(kind))); + let mem_buf_slice = memory_buffer.as_slice(); + let mut file = File::create(path) + .expect("Error while creating debug object file from LLVM IR"); + let mut pos = 0; + while pos < mem_buf_slice.len() { + pos += file.write(&mem_buf_slice[pos..]).unwrap(); + } + } + } + + impl fmt::Debug for Callbacks { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "LLVMCallbacks") + } + } + + if let Some(ref llvm_debug_dir) = self.llvm_debug_dir { + config.callbacks(Some(Arc::new(Callbacks::new(llvm_debug_dir.clone())?))); + } + if self.enable_verifier { + config.enable_verifier(); + } + Box::new(config) + } + #[cfg(not(all(feature = "singlepass", feature = "cranelift", feature = "llvm",)))] + compiler => { + bail!( + "The `{}` compiler is not included in this binary.", + compiler.to_string() + ) + } + }; + + #[allow(unreachable_code)] + Ok((compiler_config, compiler)) + } +} + +/// The compiler used for the store +#[derive(Debug, PartialEq, Eq)] +pub enum CompilerType { + /// Singlepass compiler + Singlepass, + /// Cranelift compiler + Cranelift, + /// LLVM compiler + LLVM, + /// Headless compiler + Headless, +} + +impl CompilerType { + /// Return all enabled compilers + pub fn enabled() -> Vec { + vec![ + #[cfg(feature = "singlepass")] + Self::Singlepass, + #[cfg(feature = "cranelift")] + Self::Cranelift, + #[cfg(feature = "llvm")] + Self::LLVM, + ] + } +} + +impl ToString for CompilerType { + fn to_string(&self) -> String { + match self { + Self::Singlepass => "singlepass".to_string(), + Self::Cranelift => "cranelift".to_string(), + Self::LLVM => "llvm".to_string(), + Self::Headless => "headless".to_string(), + } + } +} + +/// The engine used for the store +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum EngineType { + /// Universal Engine + Universal, + /// Dylib Engine + Dylib, + /// Static Engine + Staticlib, +} + +impl ToString for EngineType { + fn to_string(&self) -> String { + match self { + Self::Universal => "universal".to_string(), + Self::Dylib => "dylib".to_string(), + Self::Staticlib => "staticlib".to_string(), + } + } +} + +#[cfg(all(feature = "compiler", feature = "engine"))] +impl StoreOptions { + /// Gets the store for the host target, with the engine name and compiler name selected + pub fn get_store(&self) -> Result<(Store, EngineType, CompilerType)> { + let target = Target::default(); + self.get_store_for_target(target) + } + + /// Gets the store for a given target, with the engine name and compiler name selected, as + pub fn get_store_for_target( + &self, + target: Target, + ) -> Result<(Store, EngineType, CompilerType)> { + let (compiler_config, compiler_type) = self.compiler.get_compiler_config()?; + let (engine, engine_type) = self.get_engine_with_compiler(target, compiler_config)?; + let store = Store::new(&*engine); + Ok((store, engine_type, compiler_type)) + } + + fn get_engine_with_compiler( + &self, + target: Target, + compiler_config: Box, + ) -> Result<(Box, EngineType)> { + let engine_type = self.get_engine()?; + let engine = self + .compiler + .get_engine_by_type(target, compiler_config, engine_type)?; + + Ok((engine, engine_type)) + } +} + +#[cfg(feature = "engine")] +impl StoreOptions { + fn get_engine(&self) -> Result { + if self.universal || self.jit { + Ok(EngineType::Universal) + } else if self.dylib || self.native { + Ok(EngineType::Dylib) + } else if self.staticlib || self.object_file { + Ok(EngineType::Staticlib) + } else { + // Auto mode, we choose the best engine for that platform + if cfg!(feature = "universal") { + Ok(EngineType::Universal) + } else if cfg!(feature = "dylib") { + Ok(EngineType::Dylib) + } else if cfg!(feature = "staticlib") { + Ok(EngineType::Staticlib) + } else { + bail!("There are no available engines for your architecture") + } + } + } +} + +// If we don't have a compiler, but we have an engine +#[cfg(all(not(feature = "compiler"), feature = "engine"))] +impl StoreOptions { + fn get_engine_headless(&self) -> Result<(Arc, EngineType)> { + let engine_type = self.get_engine()?; + let engine: Arc = match engine_type { + #[cfg(feature = "universal")] + EngineType::Universal => { + Arc::new(wasmer_engine_universal::Universal::headless().engine()) + } + #[cfg(feature = "dylib")] + EngineType::Dylib => Arc::new(wasmer_engine_dylib::Dylib::headless().engine()), + #[cfg(feature = "staticlib")] + EngineType::Staticlib => { + Arc::new(wasmer_engine_staticlib::Staticlib::headless().engine()) + } + #[cfg(not(all(feature = "universal", feature = "dylib", feature = "staticlib")))] + engine => bail!( + "The `{}` engine is not included in this binary.", + engine.to_string() + ), + }; + Ok((engine, engine_type)) + } + + /// Get the store (headless engine) + pub fn get_store(&self) -> Result<(Store, EngineType, CompilerType)> { + let (engine, engine_type) = self.get_engine_headless()?; + let store = Store::new(&*engine); + Ok((store, engine_type, CompilerType::Headless)) + } + + /// Gets the store for provided host target + pub fn get_store_for_target( + &self, + _target: Target, + ) -> Result<(Store, EngineType, CompilerType)> { + bail!("You need compilers to retrieve a store for a specific target"); + } +} + +// If we don't have any engine enabled +#[cfg(not(feature = "engine"))] +impl StoreOptions { + /// Get the store (headless engine) + pub fn get_store(&self) -> Result<(Store, EngineType, CompilerType)> { + bail!("No engines are enabled"); + } + + /// Gets the store for the host target + pub fn get_store_for_target( + &self, + _target: Target, + ) -> Result<(Store, EngineType, CompilerType)> { + bail!("No engines are enabled"); + } +} diff --git a/lib/cli/src/suggestions.rs b/lib/cli/src/suggestions.rs new file mode 100644 index 0000000000..4509e21d93 --- /dev/null +++ b/lib/cli/src/suggestions.rs @@ -0,0 +1,18 @@ +//! This file provides suggestions for the user, to help them on the +//! usage of WebAssembly +use distance::damerau_levenshtein; +use wasmer::Module; + +/// Suggest function exports for the module +pub fn suggest_function_exports(module: &Module, query: &str) -> Vec { + let mut function_names = module + .exports() + .functions() + .map(|extern_fn| { + let name = extern_fn.name(); + name.to_string() + }) + .collect::>(); + function_names.sort_by_key(|name| damerau_levenshtein(name, query)); + function_names +} diff --git a/lib/cli/src/utils.rs b/lib/cli/src/utils.rs new file mode 100644 index 0000000000..a032f26f40 --- /dev/null +++ b/lib/cli/src/utils.rs @@ -0,0 +1,92 @@ +//! Utility functions for the WebAssembly module +use anyhow::{bail, Result}; +use std::env; +use std::path::PathBuf; + +/// Whether or not Wasmer should print with color +pub fn wasmer_should_print_color() -> bool { + env::var("WASMER_COLOR") + .ok() + .and_then(|inner| inner.parse::().ok()) + .unwrap_or_else(|| atty::is(atty::Stream::Stdout)) +} + +fn retrieve_alias_pathbuf(alias: &str, real_dir: &str) -> Result<(String, PathBuf)> { + let pb = PathBuf::from(&real_dir); + if let Ok(pb_metadata) = pb.metadata() { + if !pb_metadata.is_dir() { + bail!("\"{}\" exists, but it is not a directory", &real_dir); + } + } else { + bail!("Directory \"{}\" does not exist", &real_dir); + } + Ok((alias.to_string(), pb)) +} + +/// Parses a mapdir from a string +pub fn parse_mapdir(entry: &str) -> Result<(String, PathBuf)> { + // We try first splitting by `::` + if let [alias, real_dir] = entry.split("::").collect::>()[..] { + retrieve_alias_pathbuf(alias, real_dir) + } + // And then we try splitting by `:` (for compatibility with previous API) + else if let [alias, real_dir] = entry.split(':').collect::>()[..] { + retrieve_alias_pathbuf(alias, real_dir) + } else { + bail!( + "Directory mappings must consist of two paths separate by a `::` or `:`. Found {}", + &entry + ) + } +} + +/// Parses an environment variable. +pub fn parse_envvar(entry: &str) -> Result<(String, String)> { + let entry = entry.trim(); + + match entry.find('=') { + None => bail!( + "Environment variable must be of the form `=`; found `{}`", + &entry + ), + + Some(0) => bail!( + "Environment variable is not well formed, the `name` is missing in `=`; got `{}`", + &entry + ), + + Some(position) if position == entry.len() - 1 => bail!( + "Environment variable is not well formed, the `value` is missing in `=`; got `{}`", + &entry + ), + + Some(position) => Ok((entry[..position].into(), entry[position + 1..].into())), + } +} + +#[cfg(test)] +mod tests { + use super::parse_envvar; + + #[test] + fn test_parse_envvar() { + assert_eq!( + parse_envvar("A").unwrap_err().to_string(), + "Environment variable must be of the form `=`; found `A`" + ); + assert_eq!( + parse_envvar("=A").unwrap_err().to_string(), + "Environment variable is not well formed, the `name` is missing in `=`; got `=A`" + ); + assert_eq!( + parse_envvar("A=").unwrap_err().to_string(), + "Environment variable is not well formed, the `value` is missing in `=`; got `A=`" + ); + assert_eq!(parse_envvar("A=B").unwrap(), ("A".into(), "B".into())); + assert_eq!(parse_envvar(" A=B\t").unwrap(), ("A".into(), "B".into())); + assert_eq!( + parse_envvar("A=B=C=D").unwrap(), + ("A".into(), "B=C=D".into()) + ); + } +} diff --git a/lib/compiler-cranelift/.gitignore b/lib/compiler-cranelift/.gitignore new file mode 100644 index 0000000000..4308d82204 --- /dev/null +++ b/lib/compiler-cranelift/.gitignore @@ -0,0 +1,3 @@ +target/ +**/*.rs.bk +Cargo.lock diff --git a/lib/compiler-cranelift/Cargo.toml b/lib/compiler-cranelift/Cargo.toml new file mode 100644 index 0000000000..2786a119dc --- /dev/null +++ b/lib/compiler-cranelift/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "wasmer-compiler-cranelift" +version = "2.1.0" +description = "Cranelift compiler for Wasmer WebAssembly runtime" +categories = ["wasm"] +keywords = ["wasm", "webassembly", "compiler", "cranelift"] +authors = ["Wasmer Engineering Team "] +repository = "https://github.com/wasmerio/wasmer" +documentation = "https://docs.rs/wasmer-compiler-cranelift/" +license = "MIT OR Apache-2.0 WITH LLVM-exception" +readme = "README.md" +edition = "2018" + +[dependencies] +wasmer-compiler = { path = "../compiler", version = "=2.4.0", package = "wasmer-compiler-near", features = ["translator"], default-features = false } +wasmer-vm = { path = "../vm", version = "=2.4.0", package = "wasmer-vm-near" } +wasmer-types = { path = "../types", version = "=2.4.0", package = "wasmer-types-near", default-features = false, features = ["std"] } +cranelift-entity = { version = "0.76", default-features = false } +cranelift-codegen = { version = "0.76", default-features = false, features = ["x86", "arm64"] } +cranelift-frontend = { version = "0.76", default-features = false } +tracing = "0.1" +hashbrown = { version = "0.11", optional = true } +rayon = "1.5" +more-asserts = "0.2" +gimli = { version = "0.25", optional = true } +smallvec = "1.6" +target-lexicon = { version = "0.12.2", default-features = false } + +[dev-dependencies] +cranelift-codegen = { version = "0.76", features = ["all-arch"] } +lazy_static = "1.4" + +[badges] +maintenance = { status = "actively-developed" } + +[features] +default = ["std", "unwind"] +unwind = ["cranelift-codegen/unwind", "gimli"] +std = ["cranelift-codegen/std", "cranelift-frontend/std", "wasmer-compiler/std", "wasmer-types/std"] +core = ["hashbrown", "cranelift-codegen/core", "cranelift-frontend/core"] diff --git a/lib/compiler-cranelift/README.md b/lib/compiler-cranelift/README.md new file mode 100644 index 0000000000..f10abbe30b --- /dev/null +++ b/lib/compiler-cranelift/README.md @@ -0,0 +1,37 @@ +# `wasmer-compiler-cranelift` [![Build Status](https://github.com/wasmerio/wasmer/workflows/build/badge.svg?style=flat-square)](https://github.com/wasmerio/wasmer/actions?query=workflow%3Abuild) [![Join Wasmer Slack](https://img.shields.io/static/v1?label=Slack&message=join%20chat&color=brighgreen&style=flat-square)](https://slack.wasmer.io) [![MIT License](https://img.shields.io/github/license/wasmerio/wasmer.svg?style=flat-square)](https://github.com/wasmerio/wasmer/blob/master/LICENSE) [![crates.io](https://img.shields.io/crates/v/wasmer-compiler-cranelift.svg)](https://crates.io/crates/wasmer-compiler-cranelift) + +This crate contains a compiler implementation based on Cranelift. + +## Usage + +```rust +use wasmer::{Store, Universal}; +use wasmer_compiler_cranelift::Cranelift; + +let compiler = Cranelift::new(); +// Put it into an engine and add it to the store +let store = Store::new(&Universal::new(compiler).engine()); +``` + +*Note: you can find a [full working example using Cranelift compiler +here][example].* + +## When to use Cranelift + +We recommend using this compiler crate **only for development +proposes**. For production we recommend using [`wasmer-compiler-llvm`] +as it offers a much better runtime speed (50% faster on average). + +### Acknowledgments + +This project borrowed some of the function lowering from +[`cranelift-wasm`]. + +Please check [Wasmer `ATTRIBUTIONS`] to further see licenses and other +attributions of the project. + + +[example]: https://github.com/wasmerio/wasmer/blob/master/examples/compiler_cranelift.rs +[`wasmer-compiler-llvm`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-llvm +[`cranelift-wasm`]: https://crates.io/crates/cranelift-wasm +[Wasmer `ATTRIBUTIONS`]: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md diff --git a/lib/compiler-cranelift/build.rs b/lib/compiler-cranelift/build.rs new file mode 100644 index 0000000000..5e03bc2e02 --- /dev/null +++ b/lib/compiler-cranelift/build.rs @@ -0,0 +1,15 @@ +//! Wasmer Cranelift compiler build script. +//! +//! Sets the git revsion? for $PURPOSE +//! TODO(syrus): explain what's happening here + +use std::process::Command; +use std::str; + +fn main() { + let git_rev = match Command::new("git").args(&["rev-parse", "HEAD"]).output() { + Ok(output) => str::from_utf8(&output.stdout).unwrap().trim().to_string(), + Err(_) => env!("CARGO_PKG_VERSION").to_string(), + }; + println!("cargo:rustc-env=GIT_REV={}", git_rev); +} diff --git a/lib/compiler-cranelift/src/address_map.rs b/lib/compiler-cranelift/src/address_map.rs new file mode 100644 index 0000000000..4d5c2258cb --- /dev/null +++ b/lib/compiler-cranelift/src/address_map.rs @@ -0,0 +1,57 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +use cranelift_codegen::MachSrcLoc; +use cranelift_codegen::{isa, Context}; +use wasmer_compiler::{wasmparser::Range, FunctionAddressMap, InstructionAddressMap, SourceLoc}; + +pub fn get_function_address_map<'data>( + context: &Context, + range: Range, + body_len: usize, + isa: &dyn isa::TargetIsa, +) -> FunctionAddressMap { + let mut instructions = Vec::new(); + + if let Some(ref mcr) = &context.mach_compile_result { + // New-style backend: we have a `MachCompileResult` that will give us `MachSrcLoc` mapping + // tuples. + for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() { + instructions.push(InstructionAddressMap { + srcloc: SourceLoc::new(loc.bits()), + code_offset: start as usize, + code_len: (end - start) as usize, + }); + } + } else { + let func = &context.func; + let mut blocks = func.layout.blocks().collect::>(); + blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase + + let encinfo = isa.encoding_info(); + for block in blocks { + for (offset, inst, size) in func.inst_offsets(block, &encinfo) { + let srcloc = func.srclocs[inst]; + instructions.push(InstructionAddressMap { + srcloc: SourceLoc::new(srcloc.bits()), + code_offset: offset as usize, + code_len: size as usize, + }); + } + } + } + + // Generate artificial srcloc for function start/end to identify boundary + // within module. Similar to FuncTranslator::cur_srcloc(): it will wrap around + // if byte code is larger than 4 GB. + let start_srcloc = SourceLoc::new(range.start as u32); + let end_srcloc = SourceLoc::new(range.end as u32); + + FunctionAddressMap { + instructions, + start_srcloc, + end_srcloc, + body_offset: 0, + body_len, + } +} diff --git a/lib/compiler-cranelift/src/compiler.rs b/lib/compiler-cranelift/src/compiler.rs new file mode 100644 index 0000000000..828bd83c90 --- /dev/null +++ b/lib/compiler-cranelift/src/compiler.rs @@ -0,0 +1,285 @@ +//! Support for compiling with Cranelift. + +use crate::address_map::get_function_address_map; +use crate::config::Cranelift; +#[cfg(feature = "unwind")] +use crate::dwarf::WriterRelocate; +use crate::func_environ::{get_function_name, FuncEnvironment}; +use crate::sink::{RelocSink, TrapSink}; +use crate::trampoline::{ + make_trampoline_dynamic_function, make_trampoline_function_call, FunctionBuilderContext, +}; +use crate::translator::{ + compiled_function_unwind_info, signature_to_cranelift_ir, transform_jump_table, + CraneliftUnwindInfo, FuncTranslator, +}; +use cranelift_codegen::ir; +use cranelift_codegen::print_errors::pretty_error; +use cranelift_codegen::{binemit, Context}; +#[cfg(feature = "unwind")] +use gimli::write::{Address, EhFrame, FrameTable}; +use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; +use target_lexicon::{Architecture, OperatingSystem}; +use wasmer_compiler::CompileError; +use wasmer_compiler::{CallingConvention, ModuleTranslationState, Target}; +use wasmer_compiler::{ + Compilation, CompileModuleInfo, CompiledFunction, CompiledFunctionFrameInfo, + CompiledFunctionUnwindInfo, Compiler, Dwarf, FunctionBody, FunctionBodyData, SectionIndex, +}; +use wasmer_compiler::{ + CustomSection, CustomSectionProtection, Relocation, RelocationKind, RelocationTarget, + SectionBody, +}; +use wasmer_types::entity::{EntityRef, PrimaryMap}; +use wasmer_types::{FunctionIndex, LocalFunctionIndex, SignatureIndex}; +use wasmer_vm::libcalls::LibCall; + +/// A compiler that compiles a WebAssembly module with Cranelift, translating the Wasm to Cranelift IR, +/// optimizing it and then translating to assembly. +pub struct CraneliftCompiler { + config: Cranelift, +} + +impl CraneliftCompiler { + /// Creates a new Cranelift compiler + pub fn new(config: Cranelift) -> Self { + Self { config } + } + + /// Gets the WebAssembly features for this Compiler + pub fn config(&self) -> &Cranelift { + &self.config + } +} + +impl Compiler for CraneliftCompiler { + /// Compile the module using Cranelift, producing a compilation result with + /// associated relocations. + fn compile_module( + &self, + target: &Target, + compile_info: &CompileModuleInfo, + module_translation_state: &ModuleTranslationState, + function_body_inputs: PrimaryMap>, + ) -> Result { + let isa = self.config().isa(target); + let frontend_config = isa.frontend_config(); + let memory_styles = &compile_info.memory_styles; + let table_styles = &compile_info.table_styles; + let module = &compile_info.module; + let signatures = module + .signatures + .iter() + .map(|(_sig_index, func_type)| signature_to_cranelift_ir(func_type, frontend_config)) + .collect::>(); + + // Generate the frametable + #[cfg(feature = "unwind")] + let dwarf_frametable = if function_body_inputs.is_empty() { + // If we have no function body inputs, we don't need to + // construct the `FrameTable`. Constructing it, with empty + // FDEs will cause some issues in Linux. + None + } else { + match target.triple().default_calling_convention() { + Ok(CallingConvention::SystemV) => { + match isa.create_systemv_cie() { + Some(cie) => { + let mut dwarf_frametable = FrameTable::default(); + let cie_id = dwarf_frametable.add_cie(cie); + Some((dwarf_frametable, cie_id)) + } + // Even though we are in a SystemV system, Cranelift doesn't support it + None => None, + } + } + _ => None, + } + }; + + let mut custom_sections = PrimaryMap::new(); + + let probestack_trampoline_relocation_target = if target.triple().operating_system + == OperatingSystem::Linux + && target.triple().architecture == Architecture::X86_64 + { + let probestack_trampoline = CustomSection { + protection: CustomSectionProtection::ReadExecute, + // We create a jump to an absolute 64bits address + // with an indrect jump immediatly followed but the absolute address + // JMP [IP+0] FF 25 00 00 00 00 + // 64bits ADDR 00 00 00 00 00 00 00 00 preset to 0 until the relocation takes place + bytes: SectionBody::new_with_vec(vec![ + 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + ]), + relocations: vec![Relocation { + kind: RelocationKind::Abs8, + reloc_target: RelocationTarget::LibCall(LibCall::Probestack), + // 6 is the size of the jmp instruction. The relocated address must follow + offset: 6, + addend: 0, + }], + }; + custom_sections.push(probestack_trampoline); + + Some(SectionIndex::new(custom_sections.len() - 1)) + } else { + None + }; + + let (functions, fdes): (Vec, Vec<_>) = function_body_inputs + .iter() + .collect::)>>() + .par_iter() + .map_init(FuncTranslator::new, |func_translator, (i, input)| { + let func_index = module.func_index(*i); + let mut context = Context::new(); + let mut func_env = FuncEnvironment::new( + isa.frontend_config(), + module, + &signatures, + &memory_styles, + &table_styles, + ); + context.func.name = get_function_name(func_index); + context.func.signature = signatures[module.functions[func_index]].clone(); + // if generate_debug_info { + // context.func.collect_debug_info(); + // } + let mut reader = + wasmer_compiler::FunctionReader::new(input.module_offset, input.data); + func_translator.translate( + module_translation_state, + &mut reader, + &mut context.func, + &mut func_env, + *i, + )?; + + let mut code_buf: Vec = Vec::new(); + let mut reloc_sink = + RelocSink::new(&module, func_index, probestack_trampoline_relocation_target); + let mut trap_sink = TrapSink::new(); + let mut stackmap_sink = binemit::NullStackMapSink {}; + context + .compile_and_emit( + &*isa, + &mut code_buf, + &mut reloc_sink, + &mut trap_sink, + &mut stackmap_sink, + ) + .map_err(|error| { + CompileError::Codegen(pretty_error(&context.func, Some(&*isa), error)) + })?; + + let (unwind_info, fde) = match compiled_function_unwind_info(&*isa, &context)? { + #[cfg(feature = "unwind")] + CraneliftUnwindInfo::FDE(fde) => { + if dwarf_frametable.is_some() { + let fde = fde.to_fde(Address::Symbol { + // The symbol is the kind of relocation. + // "0" is used for functions + symbol: WriterRelocate::FUNCTION_SYMBOL, + // We use the addend as a way to specify the + // function index + addend: i.index() as _, + }); + // The unwind information is inserted into the dwarf section + (Some(CompiledFunctionUnwindInfo::Dwarf), Some(fde)) + } else { + (None, None) + } + } + #[cfg(feature = "unwind")] + other => (other.maybe_into_to_windows_unwind(), None), + + // This is a bit hacky, but necessary since gimli is not + // available when the "unwind" feature is disabled. + #[cfg(not(feature = "unwind"))] + other => (other.maybe_into_to_windows_unwind(), None::<()>), + }; + + let range = reader.range(); + let address_map = get_function_address_map(&context, range, code_buf.len(), &*isa); + + // We transform the Cranelift JumpTable's into compiler JumpTables + let func_jt_offsets = transform_jump_table(context.func.jt_offsets); + + Ok(( + CompiledFunction { + body: FunctionBody { + body: code_buf, + unwind_info, + }, + jt_offsets: func_jt_offsets, + relocations: reloc_sink.func_relocs, + frame_info: CompiledFunctionFrameInfo { + address_map, + traps: trap_sink.traps, + }, + }, + fde, + )) + }) + .collect::, CompileError>>()? + .into_iter() + .unzip(); + + #[cfg(feature = "unwind")] + let dwarf = if let Some((mut dwarf_frametable, cie_id)) = dwarf_frametable { + for fde in fdes { + if let Some(fde) = fde { + dwarf_frametable.add_fde(cie_id, fde); + } + } + let mut eh_frame = EhFrame(WriterRelocate::new(target.triple().endianness().ok())); + dwarf_frametable.write_eh_frame(&mut eh_frame).unwrap(); + + let eh_frame_section = eh_frame.0.into_section(); + custom_sections.push(eh_frame_section); + Some(Dwarf::new(SectionIndex::new(custom_sections.len() - 1))) + } else { + None + }; + #[cfg(not(feature = "unwind"))] + let dwarf = None; + + // function call trampolines (only for local functions, by signature) + let function_call_trampolines = module + .signatures + .values() + .collect::>() + .par_iter() + .map_init(FunctionBuilderContext::new, |mut cx, sig| { + make_trampoline_function_call(&*isa, &mut cx, sig) + }) + .collect::, CompileError>>()? + .into_iter() + .collect::>(); + + use wasmer_vm::VMOffsets; + let offsets = VMOffsets::new(frontend_config.pointer_bytes()); + // dynamic function trampolines (only for imported functions) + let dynamic_function_trampolines = module + .imported_function_types() + .collect::>() + .par_iter() + .map_init(FunctionBuilderContext::new, |mut cx, func_type| { + make_trampoline_dynamic_function(&*isa, &offsets, &mut cx, &func_type) + }) + .collect::, CompileError>>()? + .into_iter() + .collect::>(); + + Ok(Compilation::new( + functions.into_iter().collect(), + custom_sections, + function_call_trampolines, + dynamic_function_trampolines, + dwarf, + None, + )) + } +} diff --git a/lib/compiler-cranelift/src/config.rs b/lib/compiler-cranelift/src/config.rs new file mode 100644 index 0000000000..84f58b1ea9 --- /dev/null +++ b/lib/compiler-cranelift/src/config.rs @@ -0,0 +1,199 @@ +use crate::compiler::CraneliftCompiler; +use cranelift_codegen::isa::{lookup, TargetIsa}; +use cranelift_codegen::settings::{self, Configurable}; +use wasmer_compiler::{Architecture, Compiler, CompilerConfig, CpuFeature, Target}; + +// Runtime Environment + +/// Possible optimization levels for the Cranelift codegen backend. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub enum CraneliftOptLevel { + /// No optimizations performed, minimizes compilation time by disabling most + /// optimizations. + None, + /// Generates the fastest possible code, but may take longer. + Speed, + /// Similar to `speed`, but also performs transformations aimed at reducing + /// code size. + SpeedAndSize, +} + +/// Global configuration options used to create an +/// `wasmer_engine::Engine` and customize its behavior. +/// +/// This structure exposes a builder-like interface and is primarily +/// consumed by `wasmer_engine::Engine::new`. +#[derive(Debug, Clone)] +pub struct Cranelift { + enable_nan_canonicalization: bool, + enable_verifier: bool, + enable_pic: bool, + opt_level: CraneliftOptLevel, +} + +impl Cranelift { + /// Creates a new configuration object with the default configuration + /// specified. + pub fn new() -> Self { + Self { + enable_nan_canonicalization: false, + enable_verifier: false, + opt_level: CraneliftOptLevel::Speed, + enable_pic: false, + } + } + + /// Enable NaN canonicalization. + /// + /// NaN canonicalization is useful when trying to run WebAssembly + /// deterministically across different architectures. + pub fn canonicalize_nans(&mut self, enable: bool) -> &mut Self { + self.enable_nan_canonicalization = enable; + self + } + + /// The optimization levels when optimizing the IR. + pub fn opt_level(&mut self, opt_level: CraneliftOptLevel) -> &mut Self { + self.opt_level = opt_level; + self + } + + /// Generates the ISA for the provided target + pub fn isa(&self, target: &Target) -> Box { + let mut builder = + lookup(target.triple().clone()).expect("construct Cranelift ISA for triple"); + // Cpu Features + let cpu_features = target.cpu_features(); + if target.triple().architecture == Architecture::X86_64 + && !cpu_features.contains(CpuFeature::SSE2) + { + panic!("x86 support requires SSE2"); + } + if cpu_features.contains(CpuFeature::SSE3) { + builder.enable("has_sse3").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::SSSE3) { + builder.enable("has_ssse3").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::SSE41) { + builder.enable("has_sse41").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::SSE42) { + builder.enable("has_sse42").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::POPCNT) { + builder.enable("has_popcnt").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::AVX) { + builder.enable("has_avx").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::BMI1) { + builder.enable("has_bmi1").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::BMI2) { + builder.enable("has_bmi2").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::AVX2) { + builder.enable("has_avx2").expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::AVX512DQ) { + builder + .enable("has_avx512dq") + .expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::AVX512VL) { + builder + .enable("has_avx512vl") + .expect("should be valid flag"); + } + if cpu_features.contains(CpuFeature::LZCNT) { + builder.enable("has_lzcnt").expect("should be valid flag"); + } + + builder.finish(self.flags()) + } + + /// Generates the flags for the compiler + pub fn flags(&self) -> settings::Flags { + let mut flags = settings::builder(); + + // There are two possible traps for division, and this way + // we get the proper one if code traps. + flags + .enable("avoid_div_traps") + .expect("should be valid flag"); + + if self.enable_pic { + flags.enable("is_pic").expect("should be a valid flag"); + } + + // Invert cranelift's default-on verification to instead default off. + let enable_verifier = if self.enable_verifier { + "true" + } else { + "false" + }; + flags + .set("enable_verifier", enable_verifier) + .expect("should be valid flag"); + flags + .set("enable_safepoints", "true") + .expect("should be valid flag"); + + flags + .set( + "opt_level", + match self.opt_level { + CraneliftOptLevel::None => "none", + CraneliftOptLevel::Speed => "speed", + CraneliftOptLevel::SpeedAndSize => "speed_and_size", + }, + ) + .expect("should be valid flag"); + + flags + .set("enable_simd", "true") + .expect("should be valid flag"); + + let enable_nan_canonicalization = if self.enable_nan_canonicalization { + "true" + } else { + "false" + }; + flags + .set("enable_nan_canonicalization", enable_nan_canonicalization) + .expect("should be valid flag"); + + settings::Flags::new(flags) + } +} + +impl CompilerConfig for Cranelift { + fn enable_pic(&mut self) { + self.enable_pic = true; + } + + fn enable_verifier(&mut self) { + self.enable_verifier = true; + } + + fn enable_nan_canonicalization(&mut self) { + self.enable_nan_canonicalization = true; + } + + fn canonicalize_nans(&mut self, enable: bool) { + self.enable_nan_canonicalization = enable; + } + + /// Transform it into the compiler + fn compiler(self: Box) -> Box { + Box::new(CraneliftCompiler::new(*self)) + } +} + +impl Default for Cranelift { + fn default() -> Self { + Self::new() + } +} diff --git a/lib/compiler-cranelift/src/debug/address_map.rs b/lib/compiler-cranelift/src/debug/address_map.rs new file mode 100644 index 0000000000..c8900a0bc4 --- /dev/null +++ b/lib/compiler-cranelift/src/debug/address_map.rs @@ -0,0 +1,33 @@ +//! Data structures to provide transformation of the source +// addresses of a WebAssembly module into the native code. + +use cranelift_codegen::ir; +use wasmer_types::entity::PrimaryMap; +use wasmer_types::LocalFunctionIndex; + +/// Value ranges for functions. +pub type ValueLabelsRanges = PrimaryMap; + +/// Stack slots for functions. +pub type StackSlots = PrimaryMap; + +/// Memory definition offset in the VMContext structure. +#[derive(Debug, Clone)] +pub enum ModuleInfoMemoryOffset { + /// Not available. + None, + /// Offset to the defined memory. + Defined(u32), + /// Offset to the imported memory. + Imported(u32), +} + +/// ModuleInfo `vmctx` related info. +#[derive(Debug, Clone)] +pub struct ModuleInfoVmctxInfo { + /// The memory definition offset in the VMContext structure. + pub memory_offset: ModuleInfoMemoryOffset, + + /// The functions stack slots. + pub stack_slots: StackSlots, +} diff --git a/lib/compiler-cranelift/src/debug/mod.rs b/lib/compiler-cranelift/src/debug/mod.rs new file mode 100644 index 0000000000..f369fb79ac --- /dev/null +++ b/lib/compiler-cranelift/src/debug/mod.rs @@ -0,0 +1,3 @@ +mod address_map; + +pub use self::address_map::{ModuleInfoMemoryOffset, ModuleInfoVmctxInfo, ValueLabelsRanges}; diff --git a/lib/compiler-cranelift/src/dwarf.rs b/lib/compiler-cranelift/src/dwarf.rs new file mode 100644 index 0000000000..07e67bbdb0 --- /dev/null +++ b/lib/compiler-cranelift/src/dwarf.rs @@ -0,0 +1,102 @@ +use gimli::write::{Address, EndianVec, Result, Writer}; +use gimli::{RunTimeEndian, SectionId}; +use wasmer_compiler::{CustomSection, CustomSectionProtection, SectionBody}; +use wasmer_compiler::{Endianness, Relocation, RelocationKind, RelocationTarget}; +use wasmer_types::entity::EntityRef; +use wasmer_types::LocalFunctionIndex; + +#[derive(Clone, Debug)] +pub struct WriterRelocate { + pub relocs: Vec, + writer: EndianVec, +} + +impl WriterRelocate { + pub const FUNCTION_SYMBOL: usize = 0; + pub fn new(endianness: Option) -> Self { + let endianness = match endianness { + Some(Endianness::Little) => RunTimeEndian::Little, + Some(Endianness::Big) => RunTimeEndian::Big, + // We autodetect it, based on the host + None => RunTimeEndian::default(), + }; + WriterRelocate { + relocs: Vec::new(), + writer: EndianVec::new(endianness), + } + } + + pub fn into_section(mut self) -> CustomSection { + // GCC expects a terminating "empty" length, so write a 0 length at the end of the table. + self.writer.write_u32(0).unwrap(); + let data = self.writer.into_vec(); + CustomSection { + protection: CustomSectionProtection::Read, + bytes: SectionBody::new_with_vec(data), + relocations: self.relocs, + } + } +} + +impl Writer for WriterRelocate { + type Endian = RunTimeEndian; + + fn endian(&self) -> Self::Endian { + self.writer.endian() + } + + fn len(&self) -> usize { + self.writer.len() + } + + fn write(&mut self, bytes: &[u8]) -> Result<()> { + self.writer.write(bytes) + } + + fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> { + self.writer.write_at(offset, bytes) + } + + fn write_address(&mut self, address: Address, size: u8) -> Result<()> { + match address { + Address::Constant(val) => self.write_udata(val, size), + Address::Symbol { symbol, addend } => { + // Is a function relocation + if symbol == Self::FUNCTION_SYMBOL { + // We use the addend to detect the function index + let function_index = LocalFunctionIndex::new(addend as _); + let reloc_target = RelocationTarget::LocalFunc(function_index); + let offset = self.len() as u32; + let kind = match size { + 8 => RelocationKind::Abs8, + _ => unimplemented!("dwarf relocation size not yet supported: {}", size), + }; + let addend = 0; + self.relocs.push(Relocation { + kind, + reloc_target, + offset, + addend, + }); + self.write_udata(addend as u64, size) + } else { + unreachable!("Symbol {} in DWARF not recognized", symbol); + } + } + } + } + + fn write_offset(&mut self, _val: usize, _section: SectionId, _size: u8) -> Result<()> { + unimplemented!("write_offset not yet implemented"); + } + + fn write_offset_at( + &mut self, + _offset: usize, + _val: usize, + _section: SectionId, + _size: u8, + ) -> Result<()> { + unimplemented!("write_offset_at not yet implemented"); + } +} diff --git a/lib/compiler-cranelift/src/func_environ.rs b/lib/compiler-cranelift/src/func_environ.rs new file mode 100644 index 0000000000..7399553343 --- /dev/null +++ b/lib/compiler-cranelift/src/func_environ.rs @@ -0,0 +1,1547 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +use crate::translator::{ + type_to_irtype, FuncEnvironment as BaseFuncEnvironment, GlobalVariable, TargetEnvironment, +}; +use cranelift_codegen::cursor::FuncCursor; +use cranelift_codegen::ir; +use cranelift_codegen::ir::condcodes::*; +use cranelift_codegen::ir::immediates::{Offset32, Uimm64}; +use cranelift_codegen::ir::types::*; +use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature}; +use cranelift_codegen::isa::TargetFrontendConfig; +use cranelift_frontend::{FunctionBuilder, Variable}; +use std::convert::TryFrom; +use wasmer_compiler::wasmparser::Type; +use wasmer_compiler::{WasmError, WasmResult}; +use wasmer_types::entity::EntityRef; +use wasmer_types::entity::PrimaryMap; +use wasmer_types::{ + FunctionIndex, FunctionType, GlobalIndex, LocalFunctionIndex, MemoryIndex, ModuleInfo, + SignatureIndex, TableIndex, Type as WasmerType, +}; +use wasmer_vm::VMBuiltinFunctionIndex; +use wasmer_vm::VMOffsets; +use wasmer_vm::{MemoryStyle, TableStyle}; + +/// Compute an `ir::ExternalName` for a given wasm function index. +pub fn get_function_name(func_index: FunctionIndex) -> ir::ExternalName { + ir::ExternalName::user(0, func_index.as_u32()) +} + +/// The type of the `current_elements` field. +pub fn type_of_vmtable_definition_current_elements(vmoffsets: &VMOffsets) -> ir::Type { + ir::Type::int(u16::from(vmoffsets.size_of_vmtable_definition_current_elements()) * 8).unwrap() +} + +/// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`. +pub struct FuncEnvironment<'module_environment> { + /// Target-specified configuration. + target_config: TargetFrontendConfig, + + /// The module-level environment which this function-level environment belongs to. + module: &'module_environment ModuleInfo, + + /// A stack tracking the type of local variables. + type_stack: Vec, + + /// The module function signatures + signatures: &'module_environment PrimaryMap, + + /// The Cranelift global holding the vmctx address. + vmctx: Option, + + /// The external function signature for implementing wasm's `memory.size` + /// for locally-defined 32-bit memories. + memory32_size_sig: Option, + + /// The external function signature for implementing wasm's `table.size` + /// for locally-defined tables. + table_size_sig: Option, + + /// The external function signature for implementing wasm's `memory.grow` + /// for locally-defined memories. + memory_grow_sig: Option, + + /// The external function signature for implementing wasm's `table.grow` + /// for locally-defined tables. + table_grow_sig: Option, + + /// The external function signature for implementing wasm's `table.copy` + /// (it's the same for both local and imported tables). + table_copy_sig: Option, + + /// The external function signature for implementing wasm's `table.init`. + table_init_sig: Option, + + /// The external function signature for implementing wasm's `elem.drop`. + elem_drop_sig: Option, + + /// The external function signature for implementing wasm's `memory.copy` + /// (it's the same for both local and imported memories). + memory_copy_sig: Option, + + /// The external function signature for implementing wasm's `memory.fill` + /// (it's the same for both local and imported memories). + memory_fill_sig: Option, + + /// The external function signature for implementing wasm's `memory.init`. + memory_init_sig: Option, + + /// The external function signature for implementing wasm's `data.drop`. + data_drop_sig: Option, + + /// The external function signature for implementing wasm's `table.get`. + table_get_sig: Option, + + /// The external function signature for implementing wasm's `table.set`. + table_set_sig: Option, + + /// The external function signature for implementing wasm's `func.ref`. + func_ref_sig: Option, + + /// The external function signature for implementing wasm's `table.fill`. + table_fill_sig: Option, + + /// The external function signature for implementing reference increment for `extern.ref`. + externref_inc_sig: Option, + + /// The external function signature for implementing reference decrement for `extern.ref`. + externref_dec_sig: Option, + /// Offsets to struct fields accessed by JIT code. + offsets: VMOffsets, + + /// The memory styles + memory_styles: &'module_environment PrimaryMap, + + /// The table styles + table_styles: &'module_environment PrimaryMap, +} + +impl<'module_environment> FuncEnvironment<'module_environment> { + pub fn new( + target_config: TargetFrontendConfig, + module: &'module_environment ModuleInfo, + signatures: &'module_environment PrimaryMap, + memory_styles: &'module_environment PrimaryMap, + table_styles: &'module_environment PrimaryMap, + ) -> Self { + Self { + target_config, + module, + signatures, + type_stack: vec![], + vmctx: None, + memory32_size_sig: None, + table_size_sig: None, + memory_grow_sig: None, + table_grow_sig: None, + table_copy_sig: None, + table_init_sig: None, + elem_drop_sig: None, + memory_copy_sig: None, + memory_fill_sig: None, + memory_init_sig: None, + table_get_sig: None, + table_set_sig: None, + data_drop_sig: None, + func_ref_sig: None, + table_fill_sig: None, + externref_inc_sig: None, + externref_dec_sig: None, + offsets: VMOffsets::new(target_config.pointer_bytes()).with_module_info(module), + memory_styles, + table_styles, + } + } + + fn pointer_type(&self) -> ir::Type { + self.target_config.pointer_type() + } + + fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue { + self.vmctx.unwrap_or_else(|| { + let vmctx = func.create_global_value(ir::GlobalValueData::VMContext); + self.vmctx = Some(vmctx); + vmctx + }) + } + + fn get_table_fill_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.table_fill_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // table index + AbiParam::new(I32), + // dst + AbiParam::new(I32), + // value + AbiParam::new(R64), + // len + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.table_fill_sig = Some(sig); + sig + } + + fn get_table_fill_func( + &mut self, + func: &mut Function, + table_index: TableIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + ( + self.get_table_fill_sig(func), + table_index.index(), + VMBuiltinFunctionIndex::get_table_fill_index(), + ) + } + + fn get_externref_inc_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.externref_inc_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![AbiParam::new(R64)], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.externref_inc_sig = Some(sig); + sig + } + + fn get_externref_inc_func( + &mut self, + func: &mut Function, + ) -> (ir::SigRef, VMBuiltinFunctionIndex) { + ( + self.get_externref_inc_sig(func), + VMBuiltinFunctionIndex::get_externref_inc_index(), + ) + } + + fn get_externref_dec_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.externref_dec_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![AbiParam::new(R64)], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.externref_dec_sig = Some(sig); + sig + } + + fn get_externref_dec_func( + &mut self, + func: &mut Function, + ) -> (ir::SigRef, VMBuiltinFunctionIndex) { + ( + self.get_externref_dec_sig(func), + VMBuiltinFunctionIndex::get_externref_dec_index(), + ) + } + + fn get_func_ref_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.func_ref_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + AbiParam::new(I32), + ], + returns: vec![AbiParam::new(R64)], + call_conv: self.target_config.default_call_conv, + }) + }); + self.func_ref_sig = Some(sig); + sig + } + + fn get_func_ref_func( + &mut self, + func: &mut Function, + function_index: FunctionIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + ( + self.get_func_ref_sig(func), + function_index.index(), + VMBuiltinFunctionIndex::get_func_ref_index(), + ) + } + + fn get_table_get_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.table_get_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + AbiParam::new(I32), + AbiParam::new(I32), + ], + returns: vec![AbiParam::new(R64)], + call_conv: self.target_config.default_call_conv, + }) + }); + self.table_get_sig = Some(sig); + sig + } + + fn get_table_get_func( + &mut self, + func: &mut Function, + table_index: TableIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + if self.module.is_imported_table(table_index) { + ( + self.get_table_get_sig(func), + table_index.index(), + VMBuiltinFunctionIndex::get_imported_table_get_index(), + ) + } else { + ( + self.get_table_get_sig(func), + self.module.local_table_index(table_index).unwrap().index(), + VMBuiltinFunctionIndex::get_table_get_index(), + ) + } + } + + fn get_table_set_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.table_set_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + AbiParam::new(I32), + AbiParam::new(I32), + AbiParam::new(R64), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.table_set_sig = Some(sig); + sig + } + + fn get_table_set_func( + &mut self, + func: &mut Function, + table_index: TableIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + if self.module.is_imported_table(table_index) { + ( + self.get_table_set_sig(func), + table_index.index(), + VMBuiltinFunctionIndex::get_imported_table_set_index(), + ) + } else { + ( + self.get_table_set_sig(func), + self.module.local_table_index(table_index).unwrap().index(), + VMBuiltinFunctionIndex::get_table_set_index(), + ) + } + } + + fn get_table_grow_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.table_grow_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // TODO: figure out what the representation of a Wasm value is + AbiParam::new(R64), + AbiParam::new(I32), + AbiParam::new(I32), + ], + returns: vec![AbiParam::new(I32)], + call_conv: self.target_config.default_call_conv, + }) + }); + self.table_grow_sig = Some(sig); + sig + } + + /// Return the table.grow function signature to call for the given index, along with the + /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. + fn get_table_grow_func( + &mut self, + func: &mut Function, + index: TableIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + if self.module.is_imported_table(index) { + ( + self.get_table_grow_sig(func), + index.index(), + VMBuiltinFunctionIndex::get_imported_table_grow_index(), + ) + } else { + ( + self.get_table_grow_sig(func), + self.module.local_table_index(index).unwrap().index(), + VMBuiltinFunctionIndex::get_table_grow_index(), + ) + } + } + + fn get_memory_grow_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.memory_grow_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + AbiParam::new(I32), + AbiParam::new(I32), + ], + returns: vec![AbiParam::new(I32)], + call_conv: self.target_config.default_call_conv, + }) + }); + self.memory_grow_sig = Some(sig); + sig + } + + /// Return the memory.grow function signature to call for the given index, along with the + /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. + fn get_memory_grow_func( + &mut self, + func: &mut Function, + index: MemoryIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + if self.module.is_imported_memory(index) { + ( + self.get_memory_grow_sig(func), + index.index(), + VMBuiltinFunctionIndex::get_imported_memory32_grow_index(), + ) + } else { + ( + self.get_memory_grow_sig(func), + self.module.local_memory_index(index).unwrap().index(), + VMBuiltinFunctionIndex::get_memory32_grow_index(), + ) + } + } + + fn get_table_size_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.table_size_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + AbiParam::new(I32), + ], + returns: vec![AbiParam::new(I32)], + call_conv: self.target_config.default_call_conv, + }) + }); + self.table_size_sig = Some(sig); + sig + } + + /// Return the memory.size function signature to call for the given index, along with the + /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. + fn get_table_size_func( + &mut self, + func: &mut Function, + index: TableIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + if self.module.is_imported_table(index) { + ( + self.get_table_size_sig(func), + index.index(), + VMBuiltinFunctionIndex::get_imported_table_size_index(), + ) + } else { + ( + self.get_table_size_sig(func), + self.module.local_table_index(index).unwrap().index(), + VMBuiltinFunctionIndex::get_table_size_index(), + ) + } + } + + fn get_memory32_size_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.memory32_size_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + AbiParam::new(I32), + ], + returns: vec![AbiParam::new(I32)], + call_conv: self.target_config.default_call_conv, + }) + }); + self.memory32_size_sig = Some(sig); + sig + } + + /// Return the memory.size function signature to call for the given index, along with the + /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. + fn get_memory_size_func( + &mut self, + func: &mut Function, + index: MemoryIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + if self.module.is_imported_memory(index) { + ( + self.get_memory32_size_sig(func), + index.index(), + VMBuiltinFunctionIndex::get_imported_memory32_size_index(), + ) + } else { + ( + self.get_memory32_size_sig(func), + self.module.local_memory_index(index).unwrap().index(), + VMBuiltinFunctionIndex::get_memory32_size_index(), + ) + } + } + + fn get_table_copy_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.table_copy_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // Destination table index. + AbiParam::new(I32), + // Source table index. + AbiParam::new(I32), + // Index within destination table. + AbiParam::new(I32), + // Index within source table. + AbiParam::new(I32), + // Number of elements to copy. + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.table_copy_sig = Some(sig); + sig + } + + fn get_table_copy_func( + &mut self, + func: &mut Function, + dst_table_index: TableIndex, + src_table_index: TableIndex, + ) -> (ir::SigRef, usize, usize, VMBuiltinFunctionIndex) { + let sig = self.get_table_copy_sig(func); + ( + sig, + dst_table_index.as_u32() as usize, + src_table_index.as_u32() as usize, + VMBuiltinFunctionIndex::get_table_copy_index(), + ) + } + + fn get_table_init_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.table_init_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // Table index. + AbiParam::new(I32), + // Segment index. + AbiParam::new(I32), + // Destination index within table. + AbiParam::new(I32), + // Source index within segment. + AbiParam::new(I32), + // Number of elements to initialize. + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.table_init_sig = Some(sig); + sig + } + + fn get_table_init_func( + &mut self, + func: &mut Function, + table_index: TableIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + let sig = self.get_table_init_sig(func); + let table_index = table_index.as_u32() as usize; + ( + sig, + table_index, + VMBuiltinFunctionIndex::get_table_init_index(), + ) + } + + fn get_elem_drop_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.elem_drop_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // Element index. + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.elem_drop_sig = Some(sig); + sig + } + + fn get_elem_drop_func(&mut self, func: &mut Function) -> (ir::SigRef, VMBuiltinFunctionIndex) { + let sig = self.get_elem_drop_sig(func); + (sig, VMBuiltinFunctionIndex::get_elem_drop_index()) + } + + fn get_memory_copy_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.memory_copy_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // Memory index. + AbiParam::new(I32), + // Destination address. + AbiParam::new(I32), + // Source address. + AbiParam::new(I32), + // Length. + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.memory_copy_sig = Some(sig); + sig + } + + fn get_memory_copy_func( + &mut self, + func: &mut Function, + memory_index: MemoryIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + let sig = self.get_memory_copy_sig(func); + if let Some(local_memory_index) = self.module.local_memory_index(memory_index) { + ( + sig, + local_memory_index.index(), + VMBuiltinFunctionIndex::get_memory_copy_index(), + ) + } else { + ( + sig, + memory_index.index(), + VMBuiltinFunctionIndex::get_imported_memory_copy_index(), + ) + } + } + + fn get_memory_fill_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.memory_fill_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // Memory index. + AbiParam::new(I32), + // Destination address. + AbiParam::new(I32), + // Value. + AbiParam::new(I32), + // Length. + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.memory_fill_sig = Some(sig); + sig + } + + fn get_memory_fill_func( + &mut self, + func: &mut Function, + memory_index: MemoryIndex, + ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { + let sig = self.get_memory_fill_sig(func); + if let Some(local_memory_index) = self.module.local_memory_index(memory_index) { + ( + sig, + local_memory_index.index(), + VMBuiltinFunctionIndex::get_memory_fill_index(), + ) + } else { + ( + sig, + memory_index.index(), + VMBuiltinFunctionIndex::get_imported_memory_fill_index(), + ) + } + } + + fn get_memory_init_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.memory_init_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // Memory index. + AbiParam::new(I32), + // Data index. + AbiParam::new(I32), + // Destination address. + AbiParam::new(I32), + // Source index within the data segment. + AbiParam::new(I32), + // Length. + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.memory_init_sig = Some(sig); + sig + } + + fn get_memory_init_func( + &mut self, + func: &mut Function, + ) -> (ir::SigRef, VMBuiltinFunctionIndex) { + let sig = self.get_memory_init_sig(func); + (sig, VMBuiltinFunctionIndex::get_memory_init_index()) + } + + fn get_data_drop_sig(&mut self, func: &mut Function) -> ir::SigRef { + let sig = self.data_drop_sig.unwrap_or_else(|| { + func.import_signature(Signature { + params: vec![ + AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), + // Data index. + AbiParam::new(I32), + ], + returns: vec![], + call_conv: self.target_config.default_call_conv, + }) + }); + self.data_drop_sig = Some(sig); + sig + } + + fn get_data_drop_func(&mut self, func: &mut Function) -> (ir::SigRef, VMBuiltinFunctionIndex) { + let sig = self.get_data_drop_sig(func); + (sig, VMBuiltinFunctionIndex::get_data_drop_index()) + } + + /// Translates load of builtin function and returns a pair of values `vmctx` + /// and address of the loaded function. + fn translate_load_builtin_function_address( + &mut self, + pos: &mut FuncCursor<'_>, + callee_func_idx: VMBuiltinFunctionIndex, + ) -> (ir::Value, ir::Value) { + // We use an indirect call so that we don't have to patch the code at runtime. + let pointer_type = self.pointer_type(); + let vmctx = self.vmctx(&mut pos.func); + let base = pos.ins().global_value(pointer_type, vmctx); + + let mut mem_flags = ir::MemFlags::trusted(); + mem_flags.set_readonly(); + + // Load the callee address. + let body_offset = + i32::try_from(self.offsets.vmctx_builtin_function(callee_func_idx)).unwrap(); + let func_addr = pos.ins().load(pointer_type, mem_flags, base, body_offset); + + (base, func_addr) + } +} + +impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> { + fn target_config(&self) -> TargetFrontendConfig { + self.target_config + } +} + +impl<'module_environment> BaseFuncEnvironment for FuncEnvironment<'module_environment> { + fn is_wasm_parameter(&self, _signature: &ir::Signature, index: usize) -> bool { + // The first parameter is the vmctx. The rest are the wasm parameters. + index >= 1 + } + + fn make_table(&mut self, func: &mut ir::Function, index: TableIndex) -> WasmResult { + let pointer_type = self.pointer_type(); + + let (ptr, base_offset, current_elements_offset) = { + let vmctx = self.vmctx(func); + if let Some(def_index) = self.module.local_table_index(index) { + let base_offset = + i32::try_from(self.offsets.vmctx_vmtable_definition_base(def_index)).unwrap(); + let current_elements_offset = i32::try_from( + self.offsets + .vmctx_vmtable_definition_current_elements(def_index), + ) + .unwrap(); + (vmctx, base_offset, current_elements_offset) + } else { + let from_offset = self.offsets.vmctx_vmtable_import_definition(index); + let table = func.create_global_value(ir::GlobalValueData::Load { + base: vmctx, + offset: Offset32::new(i32::try_from(from_offset).unwrap()), + global_type: pointer_type, + readonly: true, + }); + let base_offset = i32::from(self.offsets.vmtable_definition_base()); + let current_elements_offset = + i32::from(self.offsets.vmtable_definition_current_elements()); + (table, base_offset, current_elements_offset) + } + }; + + let base_gv = func.create_global_value(ir::GlobalValueData::Load { + base: ptr, + offset: Offset32::new(base_offset), + global_type: pointer_type, + readonly: false, + }); + let bound_gv = func.create_global_value(ir::GlobalValueData::Load { + base: ptr, + offset: Offset32::new(current_elements_offset), + global_type: type_of_vmtable_definition_current_elements(&self.offsets), + readonly: false, + }); + + let element_size = match self.table_styles[index] { + TableStyle::CallerChecksSignature => u64::from(self.offsets.size_of_vm_funcref()), + }; + + Ok(func.create_table(ir::TableData { + base_gv, + min_size: Uimm64::new(0), + bound_gv, + element_size: Uimm64::new(element_size), + index_type: I32, + })) + } + + fn translate_table_grow( + &mut self, + mut pos: cranelift_codegen::cursor::FuncCursor<'_>, + table_index: TableIndex, + _table: ir::Table, + delta: ir::Value, + init_value: ir::Value, + ) -> WasmResult { + let (func_sig, index_arg, func_idx) = self.get_table_grow_func(&mut pos.func, table_index); + let table_index = pos.ins().iconst(I32, index_arg as i64); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + let call_inst = pos.ins().call_indirect( + func_sig, + func_addr, + &[vmctx, init_value, delta, table_index], + ); + Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) + } + + fn translate_table_get( + &mut self, + builder: &mut FunctionBuilder, + table_index: TableIndex, + _table: ir::Table, + index: ir::Value, + ) -> WasmResult { + let mut pos = builder.cursor(); + + let (func_sig, table_index_arg, func_idx) = + self.get_table_get_func(&mut pos.func, table_index); + let table_index = pos.ins().iconst(I32, table_index_arg as i64); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + let call_inst = pos + .ins() + .call_indirect(func_sig, func_addr, &[vmctx, table_index, index]); + Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) + } + + fn translate_table_set( + &mut self, + builder: &mut FunctionBuilder, + table_index: TableIndex, + _table: ir::Table, + value: ir::Value, + index: ir::Value, + ) -> WasmResult<()> { + let mut pos = builder.cursor(); + + let (func_sig, table_index_arg, func_idx) = + self.get_table_set_func(&mut pos.func, table_index); + let table_index = pos.ins().iconst(I32, table_index_arg as i64); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + pos.ins() + .call_indirect(func_sig, func_addr, &[vmctx, table_index, index, value]); + Ok(()) + } + + fn translate_table_fill( + &mut self, + mut pos: cranelift_codegen::cursor::FuncCursor<'_>, + table_index: TableIndex, + dst: ir::Value, + val: ir::Value, + len: ir::Value, + ) -> WasmResult<()> { + let (func_sig, table_index_arg, func_idx) = + self.get_table_fill_func(&mut pos.func, table_index); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + let table_index_arg = pos.ins().iconst(I32, table_index_arg as i64); + pos.ins().call_indirect( + func_sig, + func_addr, + &[vmctx, table_index_arg, dst, val, len], + ); + + Ok(()) + } + + fn translate_externref_inc( + &mut self, + mut pos: cranelift_codegen::cursor::FuncCursor<'_>, + externref: ir::Value, + ) -> WasmResult<()> { + let (func_sig, func_idx) = self.get_externref_inc_func(&mut pos.func); + let (_vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins().call_indirect(func_sig, func_addr, &[externref]); + + Ok(()) + } + + fn translate_externref_dec( + &mut self, + mut pos: cranelift_codegen::cursor::FuncCursor<'_>, + externref: ir::Value, + ) -> WasmResult<()> { + let (func_sig, func_idx) = self.get_externref_dec_func(&mut pos.func); + let (_vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins().call_indirect(func_sig, func_addr, &[externref]); + + Ok(()) + } + + fn translate_ref_null( + &mut self, + mut pos: cranelift_codegen::cursor::FuncCursor, + ty: Type, + ) -> WasmResult { + Ok(match ty { + Type::FuncRef => pos.ins().null(self.reference_type()), + Type::ExternRef => pos.ins().null(self.reference_type()), + _ => { + return Err(WasmError::Unsupported( + "`ref.null T` that is not a `funcref` or an `externref`".into(), + )); + } + }) + } + + fn translate_ref_is_null( + &mut self, + mut pos: cranelift_codegen::cursor::FuncCursor, + value: ir::Value, + ) -> WasmResult { + let bool_is_null = match pos.func.dfg.value_type(value) { + // `externref` + ty if ty.is_ref() => pos.ins().is_null(value), + // `funcref` + ty if ty == self.pointer_type() => { + pos.ins() + .icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, value, 0) + } + _ => unreachable!(), + }; + + Ok(pos.ins().bint(ir::types::I32, bool_is_null)) + } + + fn translate_ref_func( + &mut self, + mut pos: cranelift_codegen::cursor::FuncCursor<'_>, + func_index: FunctionIndex, + ) -> WasmResult { + // TODO: optimize this by storing a pointer to local func_index funcref metadata + // so that local funcref is just (*global + offset) instead of a function call + // + // Actually we can do the above for both local and imported functions because + // all of those are known statically. + // + // prototyping with a function call though + + let (func_sig, func_index_arg, func_idx) = + self.get_func_ref_func(&mut pos.func, func_index); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + let func_index_arg = pos.ins().iconst(I32, func_index_arg as i64); + let call_inst = pos + .ins() + .call_indirect(func_sig, func_addr, &[vmctx, func_index_arg]); + + Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) + } + + fn translate_custom_global_get( + &mut self, + mut _pos: cranelift_codegen::cursor::FuncCursor<'_>, + _index: GlobalIndex, + ) -> WasmResult { + unreachable!("we don't make any custom globals") + } + + fn translate_custom_global_set( + &mut self, + mut _pos: cranelift_codegen::cursor::FuncCursor<'_>, + _index: GlobalIndex, + _value: ir::Value, + ) -> WasmResult<()> { + unreachable!("we don't make any custom globals") + } + + fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult { + let pointer_type = self.pointer_type(); + + let (ptr, base_offset, current_length_offset) = { + let vmctx = self.vmctx(func); + if let Some(def_index) = self.module.local_memory_index(index) { + let base_offset = + i32::try_from(self.offsets.vmctx_vmmemory_definition_base(def_index)).unwrap(); + let current_length_offset = i32::try_from( + self.offsets + .vmctx_vmmemory_definition_current_length(def_index), + ) + .unwrap(); + (vmctx, base_offset, current_length_offset) + } else { + let from_offset = self.offsets.vmctx_vmmemory_import_definition(index); + let memory = func.create_global_value(ir::GlobalValueData::Load { + base: vmctx, + offset: Offset32::new(i32::try_from(from_offset).unwrap()), + global_type: pointer_type, + readonly: true, + }); + let base_offset = i32::from(self.offsets.vmmemory_definition_base()); + let current_length_offset = + i32::from(self.offsets.vmmemory_definition_current_length()); + (memory, base_offset, current_length_offset) + } + }; + + // If we have a declared maximum, we can make this a "static" heap, which is + // allocated up front and never moved. + let (offset_guard_size, heap_style, readonly_base) = match self.memory_styles[index] { + MemoryStyle::Dynamic { offset_guard_size } => { + let heap_bound = func.create_global_value(ir::GlobalValueData::Load { + base: ptr, + offset: Offset32::new(current_length_offset), + global_type: pointer_type, + readonly: false, + }); + ( + Uimm64::new(offset_guard_size), + ir::HeapStyle::Dynamic { + bound_gv: heap_bound, + }, + false, + ) + } + MemoryStyle::Static { + bound, + offset_guard_size, + } => ( + Uimm64::new(offset_guard_size), + ir::HeapStyle::Static { + bound: Uimm64::new(bound.bytes().0 as u64), + }, + true, + ), + }; + + let heap_base = func.create_global_value(ir::GlobalValueData::Load { + base: ptr, + offset: Offset32::new(base_offset), + global_type: pointer_type, + readonly: readonly_base, + }); + Ok(func.create_heap(ir::HeapData { + base: heap_base, + min_size: 0.into(), + offset_guard_size, + style: heap_style, + index_type: I32, + })) + } + + fn make_global( + &mut self, + func: &mut ir::Function, + index: GlobalIndex, + ) -> WasmResult { + let pointer_type = self.pointer_type(); + + let (ptr, offset) = { + let vmctx = self.vmctx(func); + let from_offset = if let Some(def_index) = self.module.local_global_index(index) { + self.offsets.vmctx_vmglobal_definition(def_index) + } else { + self.offsets.vmctx_vmglobal_import_definition(index) + }; + let global = func.create_global_value(ir::GlobalValueData::Load { + base: vmctx, + offset: Offset32::new(i32::try_from(from_offset).unwrap()), + global_type: pointer_type, + readonly: true, + }); + + (global, 0) + }; + + Ok(GlobalVariable::Memory { + gv: ptr, + offset: offset.into(), + ty: type_to_irtype(self.module.globals[index].ty, self.target_config())?, + }) + } + + fn make_indirect_sig( + &mut self, + func: &mut ir::Function, + index: SignatureIndex, + ) -> WasmResult { + Ok(func.import_signature(self.signatures[index].clone())) + } + + fn make_direct_func( + &mut self, + func: &mut ir::Function, + index: FunctionIndex, + ) -> WasmResult { + let sigidx = self.module.functions[index]; + let signature = func.import_signature(self.signatures[sigidx].clone()); + let name = get_function_name(index); + Ok(func.import_function(ir::ExtFuncData { + name, + signature, + // We currently allocate all code segments independently, so nothing + // is colocated. + colocated: false, + })) + } + + fn translate_call_indirect( + &mut self, + mut pos: FuncCursor<'_>, + table_index: TableIndex, + table: ir::Table, + sig_index: SignatureIndex, + sig_ref: ir::SigRef, + callee: ir::Value, + call_args: &[ir::Value], + ) -> WasmResult { + let pointer_type = self.pointer_type(); + + let table_entry_addr = pos.ins().table_addr(pointer_type, table, callee, 0); + + // Dereference table_entry_addr to get the function address. + let mem_flags = ir::MemFlags::trusted(); + let table_entry_addr = pos.ins().load( + pointer_type, + mem_flags, + table_entry_addr, + i32::from(self.offsets.vm_funcref_anyfunc_ptr()), + ); + + // check if the funcref is null + pos.ins() + .trapz(table_entry_addr, ir::TrapCode::IndirectCallToNull); + + let func_addr = pos.ins().load( + pointer_type, + mem_flags, + table_entry_addr, + i32::from(self.offsets.vmcaller_checked_anyfunc_func_ptr()), + ); + + // If necessary, check the signature. + match self.table_styles[table_index] { + TableStyle::CallerChecksSignature => { + let sig_id_size = self.offsets.size_of_vmshared_signature_index(); + let sig_id_type = ir::Type::int(u16::from(sig_id_size) * 8).unwrap(); + let vmctx = self.vmctx(pos.func); + let base = pos.ins().global_value(pointer_type, vmctx); + let offset = + i32::try_from(self.offsets.vmctx_vmshared_signature_id(sig_index)).unwrap(); + + // Load the caller ID. + let mut mem_flags = ir::MemFlags::trusted(); + mem_flags.set_readonly(); + let caller_sig_id = pos.ins().load(sig_id_type, mem_flags, base, offset); + + // Load the callee ID. + let mem_flags = ir::MemFlags::trusted(); + let callee_sig_id = pos.ins().load( + sig_id_type, + mem_flags, + table_entry_addr, + i32::from(self.offsets.vmcaller_checked_anyfunc_type_index()), + ); + + // Check that they match. + let cmp = pos.ins().icmp(IntCC::Equal, callee_sig_id, caller_sig_id); + pos.ins().trapz(cmp, ir::TrapCode::BadSignature); + } + } + + let mut real_call_args = Vec::with_capacity(call_args.len() + 2); + + // First append the callee vmctx address. + let vmctx = pos.ins().load( + pointer_type, + mem_flags, + table_entry_addr, + i32::from(self.offsets.vmcaller_checked_anyfunc_vmctx()), + ); + real_call_args.push(vmctx); + + // Then append the regular call arguments. + real_call_args.extend_from_slice(call_args); + + Ok(pos.ins().call_indirect(sig_ref, func_addr, &real_call_args)) + } + + fn translate_call( + &mut self, + mut pos: FuncCursor<'_>, + callee_index: FunctionIndex, + callee: ir::FuncRef, + call_args: &[ir::Value], + ) -> WasmResult { + let mut real_call_args = Vec::with_capacity(call_args.len() + 2); + + // Handle direct calls to locally-defined functions. + if !self.module.is_imported_function(callee_index) { + // Let's get the caller vmctx + let caller_vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap(); + // First append the callee vmctx address, which is the same as the caller vmctx in + // this case. + real_call_args.push(caller_vmctx); + + // Then append the regular call arguments. + real_call_args.extend_from_slice(call_args); + + return Ok(pos.ins().call(callee, &real_call_args)); + } + + // Handle direct calls to imported functions. We use an indirect call + // so that we don't have to patch the code at runtime. + let pointer_type = self.pointer_type(); + let sig_ref = pos.func.dfg.ext_funcs[callee].signature; + let vmctx = self.vmctx(&mut pos.func); + let base = pos.ins().global_value(pointer_type, vmctx); + + let mem_flags = ir::MemFlags::trusted(); + + // Load the callee address. + let body_offset = + i32::try_from(self.offsets.vmctx_vmfunction_import_body(callee_index)).unwrap(); + let func_addr = pos.ins().load(pointer_type, mem_flags, base, body_offset); + + // First append the callee vmctx address. + let vmctx_offset = + i32::try_from(self.offsets.vmctx_vmfunction_import_vmctx(callee_index)).unwrap(); + let vmctx = pos.ins().load(pointer_type, mem_flags, base, vmctx_offset); + real_call_args.push(vmctx); + + // Then append the regular call arguments. + real_call_args.extend_from_slice(call_args); + + Ok(pos.ins().call_indirect(sig_ref, func_addr, &real_call_args)) + } + + fn translate_memory_grow( + &mut self, + mut pos: FuncCursor<'_>, + index: MemoryIndex, + _heap: ir::Heap, + val: ir::Value, + ) -> WasmResult { + let (func_sig, index_arg, func_idx) = self.get_memory_grow_func(&mut pos.func, index); + let memory_index = pos.ins().iconst(I32, index_arg as i64); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + let call_inst = pos + .ins() + .call_indirect(func_sig, func_addr, &[vmctx, val, memory_index]); + Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) + } + + fn translate_memory_size( + &mut self, + mut pos: FuncCursor<'_>, + index: MemoryIndex, + _heap: ir::Heap, + ) -> WasmResult { + let (func_sig, index_arg, func_idx) = self.get_memory_size_func(&mut pos.func, index); + let memory_index = pos.ins().iconst(I32, index_arg as i64); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + let call_inst = pos + .ins() + .call_indirect(func_sig, func_addr, &[vmctx, memory_index]); + Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) + } + + fn translate_memory_copy( + &mut self, + mut pos: FuncCursor, + src_index: MemoryIndex, + _src_heap: ir::Heap, + _dst_index: MemoryIndex, + _dst_heap: ir::Heap, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()> { + let (func_sig, src_index, func_idx) = self.get_memory_copy_func(&mut pos.func, src_index); + + let src_index_arg = pos.ins().iconst(I32, src_index as i64); + + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins() + .call_indirect(func_sig, func_addr, &[vmctx, src_index_arg, dst, src, len]); + + Ok(()) + } + + fn translate_memory_fill( + &mut self, + mut pos: FuncCursor, + memory_index: MemoryIndex, + _heap: ir::Heap, + dst: ir::Value, + val: ir::Value, + len: ir::Value, + ) -> WasmResult<()> { + let (func_sig, memory_index, func_idx) = + self.get_memory_fill_func(&mut pos.func, memory_index); + + let memory_index_arg = pos.ins().iconst(I32, memory_index as i64); + + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins().call_indirect( + func_sig, + func_addr, + &[vmctx, memory_index_arg, dst, val, len], + ); + + Ok(()) + } + + fn translate_memory_init( + &mut self, + mut pos: FuncCursor, + memory_index: MemoryIndex, + _heap: ir::Heap, + seg_index: u32, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()> { + let (func_sig, func_idx) = self.get_memory_init_func(&mut pos.func); + + let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64); + let seg_index_arg = pos.ins().iconst(I32, seg_index as i64); + + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins().call_indirect( + func_sig, + func_addr, + &[vmctx, memory_index_arg, seg_index_arg, dst, src, len], + ); + + Ok(()) + } + + fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> { + let (func_sig, func_idx) = self.get_data_drop_func(&mut pos.func); + let seg_index_arg = pos.ins().iconst(I32, seg_index as i64); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + pos.ins() + .call_indirect(func_sig, func_addr, &[vmctx, seg_index_arg]); + Ok(()) + } + + fn translate_table_size( + &mut self, + mut pos: FuncCursor, + table_index: TableIndex, + _table: ir::Table, + ) -> WasmResult { + let (func_sig, index_arg, func_idx) = self.get_table_size_func(&mut pos.func, table_index); + let table_index = pos.ins().iconst(I32, index_arg as i64); + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + let call_inst = pos + .ins() + .call_indirect(func_sig, func_addr, &[vmctx, table_index]); + Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) + } + + fn translate_table_copy( + &mut self, + mut pos: FuncCursor, + dst_table_index: TableIndex, + _dst_table: ir::Table, + src_table_index: TableIndex, + _src_table: ir::Table, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()> { + let (func_sig, dst_table_index_arg, src_table_index_arg, func_idx) = + self.get_table_copy_func(&mut pos.func, dst_table_index, src_table_index); + + let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64); + let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64); + + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins().call_indirect( + func_sig, + func_addr, + &[ + vmctx, + dst_table_index_arg, + src_table_index_arg, + dst, + src, + len, + ], + ); + + Ok(()) + } + + fn translate_table_init( + &mut self, + mut pos: FuncCursor, + seg_index: u32, + table_index: TableIndex, + _table: ir::Table, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()> { + let (func_sig, table_index_arg, func_idx) = + self.get_table_init_func(&mut pos.func, table_index); + + let table_index_arg = pos.ins().iconst(I32, table_index_arg as i64); + let seg_index_arg = pos.ins().iconst(I32, seg_index as i64); + + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins().call_indirect( + func_sig, + func_addr, + &[vmctx, table_index_arg, seg_index_arg, dst, src, len], + ); + + Ok(()) + } + + fn translate_elem_drop(&mut self, mut pos: FuncCursor, elem_index: u32) -> WasmResult<()> { + let (func_sig, func_idx) = self.get_elem_drop_func(&mut pos.func); + + let elem_index_arg = pos.ins().iconst(I32, elem_index as i64); + + let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); + + pos.ins() + .call_indirect(func_sig, func_addr, &[vmctx, elem_index_arg]); + + Ok(()) + } + + fn translate_atomic_wait( + &mut self, + _pos: FuncCursor, + _index: MemoryIndex, + _heap: ir::Heap, + _addr: ir::Value, + _expected: ir::Value, + _timeout: ir::Value, + ) -> WasmResult { + Err(WasmError::Unsupported( + "wasm atomics (fn translate_atomic_wait)".to_string(), + )) + } + + fn translate_atomic_notify( + &mut self, + _pos: FuncCursor, + _index: MemoryIndex, + _heap: ir::Heap, + _addr: ir::Value, + _count: ir::Value, + ) -> WasmResult { + Err(WasmError::Unsupported( + "wasm atomics (fn translate_atomic_notify)".to_string(), + )) + } + + fn get_global_type(&self, global_index: GlobalIndex) -> Option { + Some(self.module.globals.get(global_index)?.ty) + } + + fn push_local_decl_on_stack(&mut self, ty: WasmerType) { + self.type_stack.push(ty); + } + + fn push_params_on_stack(&mut self, function_index: LocalFunctionIndex) { + let func_index = self.module.func_index(function_index); + let sig_idx = self.module.functions[func_index]; + let signature = &self.module.signatures[sig_idx]; + for param in signature.params() { + self.type_stack.push(*param); + } + } + + fn get_local_type(&self, local_index: u32) -> Option { + self.type_stack.get(local_index as usize).cloned() + } + + fn get_local_types(&self) -> &[WasmerType] { + &self.type_stack + } + + fn get_function_type(&self, function_index: FunctionIndex) -> Option<&FunctionType> { + let sig_idx = self.module.functions.get(function_index)?; + Some(&self.module.signatures[*sig_idx]) + } + + fn get_function_sig(&self, sig_index: SignatureIndex) -> Option<&FunctionType> { + self.module.signatures.get(sig_index) + } + + fn translate_drop_locals(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> { + // TODO: this allocation can be removed without too much effort but it will require + // maneuvering around the borrow checker + for (local_index, local_type) in self.type_stack.to_vec().iter().enumerate() { + if *local_type == WasmerType::ExternRef { + let val = builder.use_var(Variable::with_u32(local_index as _)); + self.translate_externref_dec(builder.cursor(), val)?; + } + } + Ok(()) + } +} diff --git a/lib/compiler-cranelift/src/lib.rs b/lib/compiler-cranelift/src/lib.rs new file mode 100644 index 0000000000..b047edca50 --- /dev/null +++ b/lib/compiler-cranelift/src/lib.rs @@ -0,0 +1,64 @@ +//! A WebAssembly `Compiler` implementation using Cranelift. +//! +//! Cranelift is a fast IR generator created by Mozilla for usage in +//! Firefox as a next JS compiler generator. +//! +//! Compared to LLVM, Cranelift is a bit faster and made entirely in Rust. +#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)] +#![warn(unused_import_braces)] +#![cfg_attr( + feature = "cargo-clippy", + allow(clippy::new_without_default, clippy::new_without_default) +)] +#![cfg_attr( + feature = "cargo-clippy", + warn( + clippy::float_arithmetic, + clippy::mut_mut, + clippy::nonminimal_bool, + clippy::option_map_unwrap_or, + clippy::option_map_unwrap_or_else, + clippy::print_stdout, + clippy::unicode_not_nfc, + clippy::use_self + ) +)] + +#[cfg(not(feature = "std"))] +#[macro_use] +extern crate alloc as std; +#[cfg(feature = "std")] +#[macro_use] +extern crate std; + +#[cfg(not(feature = "std"))] +use hashbrown::{ + hash_map, + hash_map::Entry::{Occupied, Vacant}, + HashMap, +}; +#[cfg(feature = "std")] +use std::collections::{ + hash_map, + hash_map::Entry::{Occupied, Vacant}, + HashMap, +}; + +mod address_map; +mod compiler; +mod config; +mod debug; +#[cfg(feature = "unwind")] +mod dwarf; +mod func_environ; +mod sink; +mod trampoline; +mod translator; + +pub use crate::compiler::CraneliftCompiler; +pub use crate::config::{Cranelift, CraneliftOptLevel}; +pub use crate::debug::{ModuleInfoMemoryOffset, ModuleInfoVmctxInfo, ValueLabelsRanges}; +pub use crate::trampoline::make_trampoline_function_call; + +/// Version number of this crate. +pub const VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/lib/compiler-cranelift/src/sink.rs b/lib/compiler-cranelift/src/sink.rs new file mode 100644 index 0000000000..7540821649 --- /dev/null +++ b/lib/compiler-cranelift/src/sink.rs @@ -0,0 +1,155 @@ +//! Support for compiling with Cranelift. + +use crate::translator::{irlibcall_to_libcall, irreloc_to_relocationkind}; +use cranelift_codegen::binemit; +use cranelift_codegen::ir::{self, ExternalName}; +use cranelift_entity::EntityRef as CraneliftEntityRef; +use wasmer_compiler::{JumpTable, Relocation, RelocationTarget, TrapInformation}; +use wasmer_compiler::{RelocationKind, SectionIndex}; +use wasmer_types::entity::EntityRef; +use wasmer_types::{FunctionIndex, LocalFunctionIndex, ModuleInfo}; +use wasmer_vm::TrapCode; + +/// Implementation of a relocation sink that just saves all the information for later +pub(crate) struct RelocSink<'a> { + module: &'a ModuleInfo, + + /// Current function index. + local_func_index: LocalFunctionIndex, + + /// Relocations recorded for the function. + pub func_relocs: Vec, + + /// The section where the probestack trampoline call is located + pub probestack_trampoline_relocation_target: Option, +} + +impl<'a> binemit::RelocSink for RelocSink<'a> { + fn reloc_external( + &mut self, + offset: binemit::CodeOffset, + _source_loc: ir::SourceLoc, + reloc: binemit::Reloc, + name: &ExternalName, + addend: binemit::Addend, + ) { + let reloc_target = if let ExternalName::User { namespace, index } = *name { + debug_assert_eq!(namespace, 0); + RelocationTarget::LocalFunc( + self.module + .local_func_index(FunctionIndex::from_u32(index)) + .expect("The provided function should be local"), + ) + } else if let ExternalName::LibCall(libcall) = *name { + match (libcall, self.probestack_trampoline_relocation_target) { + (ir::LibCall::Probestack, Some(probestack_trampoline_relocation_target)) => { + self.func_relocs.push(Relocation { + kind: RelocationKind::X86CallPCRel4, + reloc_target: RelocationTarget::CustomSection( + probestack_trampoline_relocation_target, + ), + offset: offset, + addend: addend, + }); + // Skip the default path + return; + } + _ => RelocationTarget::LibCall(irlibcall_to_libcall(libcall)), + } + } else { + panic!("unrecognized external name") + }; + self.func_relocs.push(Relocation { + kind: irreloc_to_relocationkind(reloc), + reloc_target, + offset, + addend, + }); + } + + fn reloc_constant( + &mut self, + _code_offset: binemit::CodeOffset, + _reloc: binemit::Reloc, + _constant_offset: ir::ConstantOffset, + ) { + // Do nothing for now: cranelift emits constant data after the function code and also emits + // function code with correct relative offsets to the constant data. + } + + fn reloc_jt(&mut self, offset: binemit::CodeOffset, reloc: binemit::Reloc, jt: ir::JumpTable) { + self.func_relocs.push(Relocation { + kind: irreloc_to_relocationkind(reloc), + reloc_target: RelocationTarget::JumpTable( + self.local_func_index, + JumpTable::new(jt.index()), + ), + offset, + addend: 0, + }); + } +} + +impl<'a> RelocSink<'a> { + /// Return a new `RelocSink` instance. + pub fn new( + module: &'a ModuleInfo, + func_index: FunctionIndex, + probestack_trampoline_relocation_target: Option, + ) -> Self { + let local_func_index = module + .local_func_index(func_index) + .expect("The provided function should be local"); + Self { + module, + local_func_index, + func_relocs: Vec::new(), + probestack_trampoline_relocation_target, + } + } +} + +pub(crate) struct TrapSink { + pub traps: Vec, +} + +impl TrapSink { + pub fn new() -> Self { + Self { traps: Vec::new() } + } +} + +impl binemit::TrapSink for TrapSink { + fn trap( + &mut self, + code_offset: binemit::CodeOffset, + _source_loc: ir::SourceLoc, + trap_code: ir::TrapCode, + ) { + self.traps.push(TrapInformation { + code_offset, + // TODO: Translate properly environment Trapcode into cranelift IR + trap_code: translate_ir_trapcode(trap_code), + }); + } +} + +/// Translates the Cranelift IR TrapCode into generic Trap Code +fn translate_ir_trapcode(trap: ir::TrapCode) -> TrapCode { + match trap { + ir::TrapCode::StackOverflow => TrapCode::StackOverflow, + ir::TrapCode::HeapOutOfBounds => TrapCode::HeapAccessOutOfBounds, + ir::TrapCode::HeapMisaligned => TrapCode::HeapMisaligned, + ir::TrapCode::TableOutOfBounds => TrapCode::TableAccessOutOfBounds, + ir::TrapCode::IndirectCallToNull => TrapCode::IndirectCallToNull, + ir::TrapCode::BadSignature => TrapCode::BadSignature, + ir::TrapCode::IntegerOverflow => TrapCode::IntegerOverflow, + ir::TrapCode::IntegerDivisionByZero => TrapCode::IntegerDivisionByZero, + ir::TrapCode::BadConversionToInteger => TrapCode::BadConversionToInteger, + ir::TrapCode::UnreachableCodeReached => TrapCode::UnreachableCodeReached, + ir::TrapCode::Interrupt => unimplemented!("Interrupts not supported"), + ir::TrapCode::User(_user_code) => unimplemented!("User trap code not supported"), + // ir::TrapCode::Interrupt => TrapCode::Interrupt, + // ir::TrapCode::User(user_code) => TrapCode::User(user_code), + } +} diff --git a/lib/compiler-cranelift/src/trampoline/dynamic_function.rs b/lib/compiler-cranelift/src/trampoline/dynamic_function.rs new file mode 100644 index 0000000000..db9d4633b4 --- /dev/null +++ b/lib/compiler-cranelift/src/trampoline/dynamic_function.rs @@ -0,0 +1,130 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +//! A trampoline generator for calling dynamic host functions from Wasm. + +use super::binemit::TrampolineRelocSink; +use crate::translator::{compiled_function_unwind_info, signature_to_cranelift_ir}; +use cranelift_codegen::ir::{ + ExternalName, Function, InstBuilder, MemFlags, StackSlotData, StackSlotKind, +}; +use cranelift_codegen::isa::TargetIsa; +use cranelift_codegen::print_errors::pretty_error; +use cranelift_codegen::Context; +use cranelift_codegen::{binemit, ir}; +use std::cmp; +use std::mem; + +use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext}; +use wasmer_compiler::{CompileError, FunctionBody}; +use wasmer_types::FunctionType; +use wasmer_vm::VMOffsets; + +/// Create a trampoline for invoking a WebAssembly function. +pub fn make_trampoline_dynamic_function( + isa: &dyn TargetIsa, + offsets: &VMOffsets, + fn_builder_ctx: &mut FunctionBuilderContext, + func_type: &FunctionType, +) -> Result { + let pointer_type = isa.pointer_type(); + let frontend_config = isa.frontend_config(); + let signature = signature_to_cranelift_ir(func_type, frontend_config); + let mut stub_sig = ir::Signature::new(frontend_config.default_call_conv); + // Add the caller `vmctx` parameter. + stub_sig.params.push(ir::AbiParam::special( + pointer_type, + ir::ArgumentPurpose::VMContext, + )); + + // Add the `values_vec` parameter. + stub_sig.params.push(ir::AbiParam::new(pointer_type)); + + // Compute the size of the values vector. The vmctx and caller vmctx are passed separately. + let value_size = mem::size_of::(); + let values_vec_len = + (value_size * cmp::max(signature.params.len() - 1, signature.returns.len())) as u32; + + let mut context = Context::new(); + context.func = Function::with_name_signature(ExternalName::user(0, 0), signature.clone()); + + let ss = context.func.create_stack_slot(StackSlotData::new( + StackSlotKind::ExplicitSlot, + values_vec_len, + )); + + { + let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx); + let block0 = builder.create_block(); + + builder.append_block_params_for_function_params(block0); + builder.switch_to_block(block0); + builder.seal_block(block0); + + let values_vec_ptr_val = builder.ins().stack_addr(pointer_type, ss, 0); + let mflags = MemFlags::trusted(); + // We only get the non-vmctx arguments + for i in 1..signature.params.len() { + let val = builder.func.dfg.block_params(block0)[i]; + builder.ins().store( + mflags, + val, + values_vec_ptr_val, + ((i - 1) * value_size) as i32, + ); + } + + let block_params = builder.func.dfg.block_params(block0); + let vmctx_ptr_val = block_params[0]; + let callee_args = vec![vmctx_ptr_val, values_vec_ptr_val]; + + let new_sig = builder.import_signature(stub_sig); + + let mem_flags = ir::MemFlags::trusted(); + let callee_value = builder.ins().load( + pointer_type, + mem_flags, + vmctx_ptr_val, + offsets.vmdynamicfunction_import_context_address() as i32, + ); + + builder + .ins() + .call_indirect(new_sig, callee_value, &callee_args); + + let mflags = MemFlags::trusted(); + let mut results = Vec::new(); + for (i, r) in signature.returns.iter().enumerate() { + let load = builder.ins().load( + r.value_type, + mflags, + values_vec_ptr_val, + (i * value_size) as i32, + ); + results.push(load); + } + builder.ins().return_(&results); + builder.finalize() + } + + let mut code_buf = Vec::new(); + let mut reloc_sink = TrampolineRelocSink {}; + let mut trap_sink = binemit::NullTrapSink {}; + let mut stackmap_sink = binemit::NullStackMapSink {}; + context + .compile_and_emit( + isa, + &mut code_buf, + &mut reloc_sink, + &mut trap_sink, + &mut stackmap_sink, + ) + .map_err(|error| CompileError::Codegen(pretty_error(&context.func, Some(isa), error)))?; + + let unwind_info = compiled_function_unwind_info(isa, &context)?.maybe_into_to_windows_unwind(); + + Ok(FunctionBody { + body: code_buf, + unwind_info, + }) +} diff --git a/lib/compiler-cranelift/src/trampoline/function_call.rs b/lib/compiler-cranelift/src/trampoline/function_call.rs new file mode 100644 index 0000000000..876bc8612d --- /dev/null +++ b/lib/compiler-cranelift/src/trampoline/function_call.rs @@ -0,0 +1,130 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +//! A trampoline generator for calling Wasm functions easily. +//! +//! That way, you can start calling Wasm functions doing things like: +//! ```ignore +//! let my_func = instance.exports.get("func"); +//! my_func.call([1, 2]) +//! ``` +use super::binemit::TrampolineRelocSink; +use crate::translator::{ + compiled_function_unwind_info, signature_to_cranelift_ir, /*transform_jump_table, */ +}; +use cranelift_codegen::ir::InstBuilder; +use cranelift_codegen::isa::TargetIsa; +use cranelift_codegen::print_errors::pretty_error; +use cranelift_codegen::Context; +use cranelift_codegen::{binemit, ir}; +use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext}; +use std::mem; +use wasmer_compiler::{CompileError, FunctionBody}; +use wasmer_types::FunctionType; + +/// Create a trampoline for invoking a WebAssembly function. +pub fn make_trampoline_function_call( + isa: &dyn TargetIsa, + fn_builder_ctx: &mut FunctionBuilderContext, + func_type: &FunctionType, +) -> Result { + let pointer_type = isa.pointer_type(); + let frontend_config = isa.frontend_config(); + let signature = signature_to_cranelift_ir(func_type, frontend_config); + let mut wrapper_sig = ir::Signature::new(frontend_config.default_call_conv); + + // Add the callee `vmctx` parameter. + wrapper_sig.params.push(ir::AbiParam::special( + pointer_type, + ir::ArgumentPurpose::VMContext, + )); + + // Add the `callee_address` parameter. + wrapper_sig.params.push(ir::AbiParam::new(pointer_type)); + + // Add the `values_vec` parameter. + wrapper_sig.params.push(ir::AbiParam::new(pointer_type)); + + let mut context = Context::new(); + context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig); + + let value_size = mem::size_of::(); + { + let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx); + let block0 = builder.create_block(); + + builder.append_block_params_for_function_params(block0); + builder.switch_to_block(block0); + builder.seal_block(block0); + + let (vmctx_ptr_val, callee_value, values_vec_ptr_val) = { + let params = builder.func.dfg.block_params(block0); + (params[0], params[1], params[2]) + }; + + // Load the argument values out of `values_vec`. + let mflags = ir::MemFlags::trusted(); + let callee_args = signature + .params + .iter() + .enumerate() + .map(|(i, r)| { + match i { + 0 => vmctx_ptr_val, + _ => + // i - 1 because vmctx is not passed through `values_vec`. + { + builder.ins().load( + r.value_type, + mflags, + values_vec_ptr_val, + ((i - 1) * value_size) as i32, + ) + } + } + }) + .collect::>(); + + let new_sig = builder.import_signature(signature); + + let call = builder + .ins() + .call_indirect(new_sig, callee_value, &callee_args); + + let results = builder.func.dfg.inst_results(call).to_vec(); + + // Store the return values into `values_vec`. + let mflags = ir::MemFlags::trusted(); + for (i, r) in results.iter().enumerate() { + builder + .ins() + .store(mflags, *r, values_vec_ptr_val, (i * value_size) as i32); + } + + builder.ins().return_(&[]); + builder.finalize() + } + + let mut code_buf = Vec::new(); + let mut reloc_sink = TrampolineRelocSink {}; + let mut trap_sink = binemit::NullTrapSink {}; + let mut stackmap_sink = binemit::NullStackMapSink {}; + + context + .compile_and_emit( + isa, + &mut code_buf, + &mut reloc_sink, + &mut trap_sink, + &mut stackmap_sink, + ) + .map_err(|error| CompileError::Codegen(pretty_error(&context.func, Some(isa), error)))?; + + let unwind_info = compiled_function_unwind_info(isa, &context)?.maybe_into_to_windows_unwind(); + + Ok(FunctionBody { + body: code_buf, + unwind_info, + // jt_offsets: transform_jump_table(context.func.jt_offsets), + }) +} diff --git a/lib/compiler-cranelift/src/trampoline/mod.rs b/lib/compiler-cranelift/src/trampoline/mod.rs new file mode 100644 index 0000000000..90039c190f --- /dev/null +++ b/lib/compiler-cranelift/src/trampoline/mod.rs @@ -0,0 +1,51 @@ +#![allow(missing_docs)] + +mod dynamic_function; +mod function_call; + +pub use self::dynamic_function::make_trampoline_dynamic_function; +pub use self::function_call::make_trampoline_function_call; + +pub use cranelift_codegen::print_errors::pretty_error; +pub use cranelift_codegen::Context; +pub use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext}; + +pub mod binemit { + pub use cranelift_codegen::binemit::NullTrapSink; + pub use cranelift_codegen::binemit::{CodeOffset, NullStackMapSink, TrapSink}; + + use cranelift_codegen::{binemit, ir}; + + /// We don't expect trampoline compilation to produce any relocations, so + /// this `RelocSink` just asserts that it doesn't recieve any. + pub struct TrampolineRelocSink {} + + impl binemit::RelocSink for TrampolineRelocSink { + fn reloc_external( + &mut self, + _offset: binemit::CodeOffset, + _source_loc: ir::SourceLoc, + _reloc: binemit::Reloc, + _name: &ir::ExternalName, + _addend: binemit::Addend, + ) { + panic!("trampoline compilation should not produce external symbol relocs"); + } + fn reloc_constant( + &mut self, + _code_offset: binemit::CodeOffset, + _reloc: binemit::Reloc, + _constant_offset: ir::ConstantOffset, + ) { + panic!("trampoline compilation should not produce constant relocs"); + } + fn reloc_jt( + &mut self, + _offset: binemit::CodeOffset, + _reloc: binemit::Reloc, + _jt: ir::JumpTable, + ) { + panic!("trampoline compilation should not produce jump table relocs"); + } + } +} diff --git a/lib/compiler-cranelift/src/translator/code_translator.rs b/lib/compiler-cranelift/src/translator/code_translator.rs new file mode 100644 index 0000000000..0173848653 --- /dev/null +++ b/lib/compiler-cranelift/src/translator/code_translator.rs @@ -0,0 +1,2982 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +//! This module contains the bulk of the interesting code performing the translation between +//! WebAssembly bytecode and Cranelift IR. +//! +//! The translation is done in one pass, opcode by opcode. Two main data structures are used during +//! code translations: the value stack and the control stack. The value stack mimics the execution +//! of the WebAssembly stack machine: each instruction result is pushed onto the stack and +//! instruction arguments are popped off the stack. Similarly, when encountering a control flow +//! block, it is pushed onto the control stack and popped off when encountering the corresponding +//! `End`. +//! +//! Another data structure, the translation state, records information concerning unreachable code +//! status and about if inserting a return at the end of the function is necessary. +//! +//! Some of the WebAssembly instructions need information about the environment for which they +//! are being translated: +//! +//! - the loads and stores need the memory base address; +//! - the `get_global` and `set_global` instructions depend on how the globals are implemented; +//! - `memory.size` and `memory.grow` are runtime functions; +//! - `call_indirect` has to translate the function index into the address of where this +//! is; +//! +//! That is why `translate_function_body` takes an object having the `WasmRuntime` trait as +//! argument. +//! +//! There is extra complexity associated with translation of 128-bit SIMD instructions. +//! Wasm only considers there to be a single 128-bit vector type. But CLIF's type system +//! distinguishes different lane configurations, so considers 8X16, 16X8, 32X4 and 64X2 to be +//! different types. The result is that, in wasm, it's perfectly OK to take the output of (eg) +//! an `add.16x8` and use that as an operand of a `sub.32x4`, without using any cast. But when +//! translated into CLIF, that will cause a verifier error due to the apparent type mismatch. +//! +//! This file works around that problem by liberally inserting `bitcast` instructions in many +//! places -- mostly, before the use of vector values, either as arguments to CLIF instructions +//! or as block actual parameters. These are no-op casts which nevertheless have different +//! input and output types, and are used (mostly) to "convert" 16X8, 32X4 and 64X2-typed vectors +//! to the "canonical" type, 8X16. Hence the functions `optionally_bitcast_vector`, +//! `bitcast_arguments`, `pop*_with_bitcast`, `canonicalise_then_jump`, +//! `canonicalise_then_br{z,nz}`, `is_non_canonical_v128` and `canonicalise_v128_values`. +//! Note that the `bitcast*` functions are occasionally used to convert to some type other than +//! 8X16, but the `canonicalise*` functions always convert to type 8X16. +//! +//! Be careful when adding support for new vector instructions. And when adding new jumps, even +//! if they are apparently don't have any connection to vectors. Never generate any kind of +//! (inter-block) jump directly. Instead use `canonicalise_then_jump` and +//! `canonicalise_then_br{z,nz}`. +//! +//! The use of bitcasts is ugly and inefficient, but currently unavoidable: +//! +//! * they make the logic in this file fragile: miss out a bitcast for any reason, and there is +//! the risk of the system failing in the verifier. At least for debug builds. +//! +//! * in the new backends, they potentially interfere with pattern matching on CLIF -- the +//! patterns need to take into account the presence of bitcast nodes. +//! +//! * in the new backends, they get translated into machine-level vector-register-copy +//! instructions, none of which are actually necessary. We then depend on the register +//! allocator to coalesce them all out. +//! +//! * they increase the total number of CLIF nodes that have to be processed, hence slowing down +//! the compilation pipeline. Also, the extra coalescing work generates a slowdown. +//! +//! A better solution which would avoid all four problems would be to remove the 8X16, 16X8, +//! 32X4 and 64X2 types from CLIF and instead have a single V128 type. +//! +//! For further background see also: +//! +//! ("Too many raw_bitcasts in SIMD code") +//! +//! ("Add X128 type to represent WebAssembly's V128 type") +//! +//! ("Relax verification to allow I8X16 to act as a default vector type") + +use super::func_environ::{FuncEnvironment, GlobalVariable, ReturnMode}; +use super::func_state::{ControlStackFrame, ElseData, FuncTranslationState, ValueExtraInfo}; +use super::translation_utils::{block_with_params, f32_translation, f64_translation}; +use crate::{hash_map, HashMap}; +use core::cmp; +use core::convert::TryFrom; +use core::{i32, u32}; +use cranelift_codegen::ir::condcodes::{FloatCC, IntCC}; +use cranelift_codegen::ir::immediates::Offset32; +use cranelift_codegen::ir::types::*; +use cranelift_codegen::ir::{ + self, AtomicRmwOp, ConstantData, InstBuilder, JumpTableData, MemFlags, Value, ValueLabel, +}; +use cranelift_codegen::packed_option::ReservedValue; +use cranelift_frontend::{FunctionBuilder, Variable}; +use smallvec::SmallVec; +use std::vec::Vec; + +use wasmer_compiler::wasmparser::{MemoryImmediate, Operator, Type as WPType}; +use wasmer_compiler::WasmResult; +use wasmer_compiler::{wasm_unsupported, ModuleTranslationState}; +use wasmer_types::{ + FunctionIndex, GlobalIndex, MemoryIndex, SignatureIndex, TableIndex, Type as WasmerType, +}; + +// Clippy warns about "align: _" but its important to document that the align field is ignored +#[cfg_attr( + feature = "cargo-clippy", + allow(clippy::unneeded_field_pattern, clippy::cognitive_complexity) +)] +/// Translates wasm operators into Cranelift IR instructions. Returns `true` if it inserted +/// a return. +pub fn translate_operator( + module_translation_state: &ModuleTranslationState, + op: &Operator, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + if !state.reachable { + translate_unreachable_operator(module_translation_state, &op, builder, state, environ)?; + return Ok(()); + } + + // This big match treats all Wasm code operators. + match op { + /********************************** Locals **************************************** + * `get_local` and `set_local` are treated as non-SSA variables and will completely + * disappear in the Cranelift Code + ***********************************************************************************/ + Operator::LocalGet { local_index } => { + let val = builder.use_var(Variable::with_u32(*local_index)); + let local_type = environ.get_local_type(*local_index).unwrap(); + let ref_counted = local_type == WasmerType::ExternRef; + state.push1_extra(( + val, + ValueExtraInfo { + ref_counted, + ..Default::default() + }, + )); + let label = ValueLabel::from_u32(*local_index); + builder.set_val_label(val, label); + + if ref_counted { + environ.translate_externref_inc(builder.cursor(), val)?; + } + } + Operator::LocalSet { local_index } => { + let (mut val, _metadata) = state.pop1(); + + let local_type = environ.get_local_type(*local_index).unwrap(); + if local_type == WasmerType::ExternRef { + debug_assert!(_metadata.ref_counted); + let existing_val = builder.use_var(Variable::with_u32(*local_index)); + environ.translate_externref_dec(builder.cursor(), existing_val)?; + } + // Ensure SIMD values are cast to their default Cranelift type, I8x16. + let ty = builder.func.dfg.value_type(val); + if ty.is_vector() { + val = optionally_bitcast_vector(val, I8X16, builder); + } + + builder.def_var(Variable::with_u32(*local_index), val); + let label = ValueLabel::from_u32(*local_index); + builder.set_val_label(val, label); + } + Operator::LocalTee { local_index } => { + let (mut val, _metadata) = state.peek1(); + + // ref count if we need to + let local_type = environ.get_local_type(*local_index).unwrap(); + if local_type == WasmerType::ExternRef { + debug_assert!(_metadata.ref_counted); + let existing_val = builder.use_var(Variable::with_u32(*local_index)); + environ.translate_externref_dec(builder.cursor(), existing_val)?; + environ.translate_externref_inc(builder.cursor(), val)?; + } + // Ensure SIMD values are cast to their default Cranelift type, I8x16. + let ty = builder.func.dfg.value_type(val); + if ty.is_vector() { + val = optionally_bitcast_vector(val, I8X16, builder); + } + + builder.def_var(Variable::with_u32(*local_index), val); + let label = ValueLabel::from_u32(*local_index); + builder.set_val_label(val, label); + } + /********************************** Globals **************************************** + * `get_global` and `set_global` are handled by the environment. + ***********************************************************************************/ + Operator::GlobalGet { global_index } => { + let global_index = GlobalIndex::from_u32(*global_index); + let stack_elem = match state.get_global(builder.func, global_index.as_u32(), environ)? { + GlobalVariable::Const(val) => (val, ValueExtraInfo::default()), + GlobalVariable::Memory { gv, offset, ty } => { + let global_type = environ.get_global_type(global_index).unwrap(); + let addr = builder.ins().global_value(environ.pointer_type(), gv); + let flags = ir::MemFlags::trusted(); + let value = builder.ins().load(ty, flags, addr, offset); + let ref_counted = global_type == WasmerType::ExternRef; + if ref_counted { + environ.translate_externref_inc(builder.cursor(), value)?; + } + + ( + value, + ValueExtraInfo { + ref_counted, + ..Default::default() + }, + ) + } + GlobalVariable::Custom => ( + environ.translate_custom_global_get(builder.cursor(), global_index)?, + ValueExtraInfo::default(), + ), + }; + state.push1_extra(stack_elem); + } + Operator::GlobalSet { global_index } => { + let global_index = GlobalIndex::from_u32(*global_index); + match state.get_global(builder.func, global_index.as_u32(), environ)? { + GlobalVariable::Const(_) => { + panic!("global #{} is a constant", global_index.as_u32()) + } + GlobalVariable::Memory { gv, offset, ty } => { + let global_type = environ.get_global_type(global_index).unwrap(); + let addr = builder.ins().global_value(environ.pointer_type(), gv); + let flags = ir::MemFlags::trusted(); + let (mut val, _) = state.pop1(); + // Ensure SIMD values are cast to their default Cranelift type, I8x16. + if ty.is_vector() { + val = optionally_bitcast_vector(val, I8X16, builder); + } + debug_assert_eq!(ty, builder.func.dfg.value_type(val)); + if global_type == WasmerType::ExternRef { + let value = builder.ins().load(ty, flags, addr, offset); + environ.translate_externref_dec(builder.cursor(), value)?; + } + builder.ins().store(flags, val, addr, offset); + } + GlobalVariable::Custom => { + let (val, _) = state.pop1(); + environ.translate_custom_global_set(builder.cursor(), global_index, val)?; + } + } + } + /********************************* Stack misc *************************************** + * `drop`, `nop`, `unreachable` and `select`. + ***********************************************************************************/ + Operator::Drop => { + let (val, metadata) = state.pop1(); + if metadata.ref_counted { + environ.translate_externref_dec(builder.cursor(), val)?; + } + } + Operator::Select => { + // we can ignore metadata because extern ref must use TypedSelect + let ((arg1, _), (arg2, _), (cond, _)) = state.pop3(); + state.push1(builder.ins().select(cond, arg1, arg2)); + } + Operator::TypedSelect { ty } => { + let ((arg1, _), (arg2, _), (cond, _)) = state.pop3(); + let ref_counted = *ty == WPType::ExternRef; + if ref_counted { + let selected_ref = builder.ins().select(cond, arg1, arg2); + let not_selected_ref = builder.ins().select(cond, arg2, arg1); + state.push1_extra(( + selected_ref, + ValueExtraInfo { + ref_counted, + ..Default::default() + }, + )); + environ.translate_externref_dec(builder.cursor(), not_selected_ref)?; + } else { + state.push1_extra(( + builder.ins().select(cond, arg1, arg2), + ValueExtraInfo::default(), + )); + } + } + Operator::Nop => { + // We do nothing + } + Operator::Unreachable => { + builder.ins().trap(ir::TrapCode::UnreachableCodeReached); + state.reachable = false; + } + /***************************** Control flow blocks ********************************** + * When starting a control flow block, we create a new `Block` that will hold the code + * after the block, and we push a frame on the control stack. Depending on the type + * of block, we create a new `Block` for the body of the block with an associated + * jump instruction. + * + * The `End` instruction pops the last control frame from the control stack, seals + * the destination block (since `br` instructions targeting it only appear inside the + * block and have already been translated) and modify the value stack to use the + * possible `Block`'s arguments values. + ***********************************************************************************/ + Operator::Block { ty } => { + let (params, results) = module_translation_state.blocktype_params_results(*ty)?; + let next = block_with_params(builder, results, environ)?; + state.push_block(next, params.len(), results.len()); + } + Operator::Loop { ty } => { + let (params, results) = module_translation_state.blocktype_params_results(*ty)?; + let loop_body = block_with_params(builder, params, environ)?; + let next = block_with_params(builder, results, environ)?; + canonicalise_then_jump(builder, loop_body, state.peekn(params.len())); + state.push_loop(loop_body, next, params.len(), results.len()); + + // Pop the initial `Block` actuals and replace them with the `Block`'s + // params since control flow joins at the top of the loop. + state.popn(params.len()); + state + .stack + .extend_from_slice(builder.block_params(loop_body)); + + builder.switch_to_block(loop_body); + environ.translate_loop_header(builder.cursor())?; + } + Operator::If { ty } => { + let (val, _) = state.pop1(); + + let (params, results) = module_translation_state.blocktype_params_results(*ty)?; + let (destination, else_data) = if params == results { + // It is possible there is no `else` block, so we will only + // allocate a block for it if/when we find the `else`. For now, + // we if the condition isn't true, then we jump directly to the + // destination block following the whole `if...end`. If we do end + // up discovering an `else`, then we will allocate a block for it + // and go back and patch the jump. + let destination = block_with_params(builder, results, environ)?; + let branch_inst = + canonicalise_then_brz(builder, val, destination, state.peekn(params.len())); + (destination, ElseData::NoElse { branch_inst }) + } else { + // The `if` type signature is not valid without an `else` block, + // so we eagerly allocate the `else` block here. + let destination = block_with_params(builder, results, environ)?; + let else_block = block_with_params(builder, params, environ)?; + canonicalise_then_brz(builder, val, else_block, state.peekn(params.len())); + builder.seal_block(else_block); + (destination, ElseData::WithElse { else_block }) + }; + + let next_block = builder.create_block(); + canonicalise_then_jump(builder, next_block, (&[], &[])); + builder.seal_block(next_block); // Only predecessor is the current block. + builder.switch_to_block(next_block); + + // Here we append an argument to a Block targeted by an argumentless jump instruction + // But in fact there are two cases: + // - either the If does not have a Else clause, in that case ty = EmptyBlock + // and we add nothing; + // - either the If have an Else clause, in that case the destination of this jump + // instruction will be changed later when we translate the Else operator. + state.push_if(destination, else_data, params.len(), results.len(), *ty); + } + Operator::Else => { + let i = state.control_stack.len() - 1; + match state.control_stack[i] { + ControlStackFrame::If { + ref else_data, + head_is_reachable, + ref mut consequent_ends_reachable, + num_return_values, + blocktype, + destination, + .. + } => { + // We finished the consequent, so record its final + // reachability state. + debug_assert!(consequent_ends_reachable.is_none()); + *consequent_ends_reachable = Some(state.reachable); + + if head_is_reachable { + // We have a branch from the head of the `if` to the `else`. + state.reachable = true; + + // Ensure we have a block for the `else` block (it may have + // already been pre-allocated, see `ElseData` for details). + let else_block = match *else_data { + ElseData::NoElse { branch_inst } => { + let (params, _results) = + module_translation_state.blocktype_params_results(blocktype)?; + debug_assert_eq!(params.len(), num_return_values); + let else_block = block_with_params(builder, params, environ)?; + canonicalise_then_jump( + builder, + destination, + state.peekn(params.len()), + ); + state.popn(params.len()); + + builder.change_jump_destination(branch_inst, else_block); + builder.seal_block(else_block); + else_block + } + ElseData::WithElse { else_block } => { + canonicalise_then_jump( + builder, + destination, + state.peekn(num_return_values), + ); + state.popn(num_return_values); + else_block + } + }; + + // You might be expecting that we push the parameters for this + // `else` block here, something like this: + // + // state.pushn(&control_stack_frame.params); + // + // We don't do that because they are already on the top of the stack + // for us: we pushed the parameters twice when we saw the initial + // `if` so that we wouldn't have to save the parameters in the + // `ControlStackFrame` as another `Vec` allocation. + + builder.switch_to_block(else_block); + + // We don't bother updating the control frame's `ElseData` + // to `WithElse` because nothing else will read it. + } + } + _ => unreachable!(), + } + } + Operator::End => { + let frame = state.control_stack.pop().unwrap(); + let next_block = frame.following_code(); + + if !builder.is_unreachable() || !builder.is_pristine() { + let return_count = frame.num_return_values(); + let return_args = state.peekn(return_count); + canonicalise_then_jump(builder, frame.following_code(), return_args); + // You might expect that if we just finished an `if` block that + // didn't have a corresponding `else` block, then we would clean + // up our duplicate set of parameters that we pushed earlier + // right here. However, we don't have to explicitly do that, + // since we truncate the stack back to the original height + // below. + } + + builder.switch_to_block(next_block); + builder.seal_block(next_block); + + // If it is a loop we also have to seal the body loop block + if let ControlStackFrame::Loop { header, .. } = frame { + builder.seal_block(header) + } + + frame.truncate_value_stack_to_original_size(&mut state.stack); + state + .stack + .extend_from_slice(builder.block_params(next_block)); + } + /**************************** Branch instructions ********************************* + * The branch instructions all have as arguments a target nesting level, which + * corresponds to how many control stack frames do we have to pop to get the + * destination `Block`. + * + * Once the destination `Block` is found, we sometimes have to declare a certain depth + * of the stack unreachable, because some branch instructions are terminator. + * + * The `br_table` case is much more complicated because Cranelift's `br_table` instruction + * does not support jump arguments like all the other branch instructions. That is why, in + * the case where we would use jump arguments for every other branch instruction, we + * need to split the critical edges leaving the `br_tables` by creating one `Block` per + * table destination; the `br_table` will point to these newly created `Blocks` and these + * `Block`s contain only a jump instruction pointing to the final destination, this time with + * jump arguments. + * + * This system is also implemented in Cranelift's SSA construction algorithm, because + * `use_var` located in a destination `Block` of a `br_table` might trigger the addition + * of jump arguments in each predecessor branch instruction, one of which might be a + * `br_table`. + ***********************************************************************************/ + Operator::Br { relative_depth } => { + let i = state.control_stack.len() - 1 - (*relative_depth as usize); + let (return_count, br_destination) = { + let frame = &mut state.control_stack[i]; + // We signal that all the code that follows until the next End is unreachable + frame.set_branched_to_exit(); + let return_count = if frame.is_loop() { + frame.num_param_values() + } else { + frame.num_return_values() + }; + (return_count, frame.br_destination()) + }; + let destination_args = state.peekn(return_count); + canonicalise_then_jump(builder, br_destination, destination_args); + state.popn(return_count); + state.reachable = false; + } + Operator::BrIf { relative_depth } => translate_br_if(*relative_depth, builder, state), + Operator::BrTable { table } => { + let mut depths = table.targets().collect::, _>>()?; + let default = depths.pop().unwrap().0; + let mut min_depth = default; + for (depth, _) in depths.iter() { + if *depth < min_depth { + min_depth = *depth; + } + } + let jump_args_count = { + let i = state.control_stack.len() - 1 - (min_depth as usize); + let min_depth_frame = &state.control_stack[i]; + if min_depth_frame.is_loop() { + min_depth_frame.num_param_values() + } else { + min_depth_frame.num_return_values() + } + }; + let (val, _) = state.pop1(); + let mut data = JumpTableData::with_capacity(depths.len()); + if jump_args_count == 0 { + // No jump arguments + for (depth, _) in depths.iter() { + let block = { + let i = state.control_stack.len() - 1 - (*depth as usize); + let frame = &mut state.control_stack[i]; + frame.set_branched_to_exit(); + frame.br_destination() + }; + data.push_entry(block); + } + let jt = builder.create_jump_table(data); + let block = { + let i = state.control_stack.len() - 1 - (default as usize); + let frame = &mut state.control_stack[i]; + frame.set_branched_to_exit(); + frame.br_destination() + }; + builder.ins().br_table(val, block, jt); + } else { + // Here we have jump arguments, but Cranelift's br_table doesn't support them + // We then proceed to split the edges going out of the br_table + let return_count = jump_args_count; + let mut dest_block_sequence = vec![]; + let mut dest_block_map = HashMap::new(); + for (depth, _) in depths.iter() { + let branch_block = match dest_block_map.entry(*depth as usize) { + hash_map::Entry::Occupied(entry) => *entry.get(), + hash_map::Entry::Vacant(entry) => { + let block = builder.create_block(); + dest_block_sequence.push((*depth as usize, block)); + *entry.insert(block) + } + }; + data.push_entry(branch_block); + } + let default_branch_block = match dest_block_map.entry(default as usize) { + hash_map::Entry::Occupied(entry) => *entry.get(), + hash_map::Entry::Vacant(entry) => { + let block = builder.create_block(); + dest_block_sequence.push((default as usize, block)); + *entry.insert(block) + } + }; + let jt = builder.create_jump_table(data); + builder.ins().br_table(val, default_branch_block, jt); + for (depth, dest_block) in dest_block_sequence { + builder.switch_to_block(dest_block); + builder.seal_block(dest_block); + let real_dest_block = { + let i = state.control_stack.len() - 1 - depth; + let frame = &mut state.control_stack[i]; + frame.set_branched_to_exit(); + frame.br_destination() + }; + let destination_args = state.peekn(return_count); + canonicalise_then_jump(builder, real_dest_block, destination_args); + } + state.popn(return_count); + } + state.reachable = false; + } + Operator::Return => { + let (return_count, br_destination) = { + let frame = &mut state.control_stack[0]; + if environ.return_mode() == ReturnMode::FallthroughReturn { + frame.set_branched_to_exit(); + } + let return_count = frame.num_return_values(); + (return_count, frame.br_destination()) + }; + { + let (return_args, return_args_metadata) = state.peekn_mut(return_count); + // TODO(reftypes): maybe ref count here? + let return_types = wasm_param_types(&builder.func.signature.returns, |i| { + environ.is_wasm_return(&builder.func.signature, i) + }); + bitcast_arguments(return_args, &return_types, builder); + match environ.return_mode() { + ReturnMode::NormalReturns => builder.ins().return_(return_args), + ReturnMode::FallthroughReturn => canonicalise_then_jump( + builder, + br_destination, + (&*return_args, &*return_args_metadata), + ), + }; + } + state.popn(return_count); + state.reachable = false; + } + /********************************** Exception handing **********************************/ + Operator::Try { .. } + | Operator::Catch { .. } + | Operator::Throw { .. } + | Operator::Unwind + | Operator::Rethrow { .. } + | Operator::Delegate { .. } + | Operator::CatchAll => { + return Err(wasm_unsupported!( + "proposed exception handling operator {:?}", + op + )); + } + /************************************ Calls **************************************** + * The call instructions pop off their arguments from the stack and append their + * return values to it. `call_indirect` needs environment support because there is an + * argument referring to an index in the external functions table of the module. + ************************************************************************************/ + Operator::Call { function_index } => { + let (fref, num_args) = state.get_direct_func(builder.func, *function_index, environ)?; + + let (args, _args_metadata) = state.peekn_mut(num_args); + + // Bitcast any vector arguments to their default type, I8X16, before calling. + let callee_signature = + &builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature]; + let types = wasm_param_types(&callee_signature.params, |i| { + environ.is_wasm_parameter(&callee_signature, i) + }); + bitcast_arguments(args, &types, builder); + let func_index = FunctionIndex::from_u32(*function_index); + + let call = environ.translate_call(builder.cursor(), func_index, fref, args)?; + let inst_results = builder.inst_results(call); + debug_assert_eq!( + inst_results.len(), + builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature] + .returns + .len(), + "translate_call results should match the call signature" + ); + let func_type = environ.get_function_type(func_index).unwrap(); + let mut results_metadata = Vec::with_capacity(func_type.results().len()); + for result in func_type.results() { + results_metadata.push(if *result == WasmerType::ExternRef { + ValueExtraInfo { + ref_counted: true, + ..Default::default() + } + } else { + Default::default() + }); + } + state.popn(num_args); + state.pushn(inst_results, &results_metadata); + } + Operator::CallIndirect { index, table_index } => { + // `index` is the index of the function's signature and `table_index` is the index of + // the table to search the function in. + let (sigref, num_args) = state.get_indirect_sig(builder.func, *index, environ)?; + let table = state.get_or_create_table(builder.func, *table_index, environ)?; + let (callee, _) = state.pop1(); + + // Bitcast any vector arguments to their default type, I8X16, before calling. + let callee_signature = &builder.func.dfg.signatures[sigref]; + let (args, _) = state.peekn_mut(num_args); + let types = wasm_param_types(&callee_signature.params, |i| { + environ.is_wasm_parameter(&callee_signature, i) + }); + bitcast_arguments(args, &types, builder); + + let (args, _args_metadata) = state.peekn(num_args); + let sig_idx = SignatureIndex::from_u32(*index); + + let call = environ.translate_call_indirect( + builder.cursor(), + TableIndex::from_u32(*table_index), + table, + sig_idx, + sigref, + callee, + args, + )?; + let inst_results = builder.inst_results(call); + debug_assert_eq!( + inst_results.len(), + builder.func.dfg.signatures[sigref].returns.len(), + "translate_call_indirect results should match the call signature" + ); + let func_type = environ.get_function_sig(sig_idx).unwrap(); + let mut results_metadata = Vec::with_capacity(func_type.results().len()); + for result in func_type.results() { + results_metadata.push(if *result == WasmerType::ExternRef { + ValueExtraInfo { + ref_counted: true, + ..Default::default() + } + } else { + Default::default() + }); + } + state.popn(num_args); + state.pushn(inst_results, &results_metadata); + } + /******************************* Memory management *********************************** + * Memory management is handled by environment. It is usually translated into calls to + * special functions. + ************************************************************************************/ + Operator::MemoryGrow { mem, mem_byte: _ } => { + // The WebAssembly MVP only supports one linear memory, but we expect the reserved + // argument to be a memory index. + let heap_index = MemoryIndex::from_u32(*mem); + let heap = state.get_heap(builder.func, *mem, environ)?; + let (val, _) = state.pop1(); + state.push1(environ.translate_memory_grow(builder.cursor(), heap_index, heap, val)?) + } + Operator::MemorySize { mem, mem_byte: _ } => { + let heap_index = MemoryIndex::from_u32(*mem); + let heap = state.get_heap(builder.func, *mem, environ)?; + state.push1(environ.translate_memory_size(builder.cursor(), heap_index, heap)?); + } + /******************************* Load instructions *********************************** + * Wasm specifies an integer alignment flag but we drop it in Cranelift. + * The memory base address is provided by the environment. + ************************************************************************************/ + Operator::I32Load8U { memarg } => { + translate_load(memarg, ir::Opcode::Uload8, I32, builder, state, environ)?; + } + Operator::I32Load16U { memarg } => { + translate_load(memarg, ir::Opcode::Uload16, I32, builder, state, environ)?; + } + Operator::I32Load8S { memarg } => { + translate_load(memarg, ir::Opcode::Sload8, I32, builder, state, environ)?; + } + Operator::I32Load16S { memarg } => { + translate_load(memarg, ir::Opcode::Sload16, I32, builder, state, environ)?; + } + Operator::I64Load8U { memarg } => { + translate_load(memarg, ir::Opcode::Uload8, I64, builder, state, environ)?; + } + Operator::I64Load16U { memarg } => { + translate_load(memarg, ir::Opcode::Uload16, I64, builder, state, environ)?; + } + Operator::I64Load8S { memarg } => { + translate_load(memarg, ir::Opcode::Sload8, I64, builder, state, environ)?; + } + Operator::I64Load16S { memarg } => { + translate_load(memarg, ir::Opcode::Sload16, I64, builder, state, environ)?; + } + Operator::I64Load32S { memarg } => { + translate_load(memarg, ir::Opcode::Sload32, I64, builder, state, environ)?; + } + Operator::I64Load32U { memarg } => { + translate_load(memarg, ir::Opcode::Uload32, I64, builder, state, environ)?; + } + Operator::I32Load { memarg } => { + translate_load(memarg, ir::Opcode::Load, I32, builder, state, environ)?; + } + Operator::F32Load { memarg } => { + translate_load(memarg, ir::Opcode::Load, F32, builder, state, environ)?; + } + Operator::I64Load { memarg } => { + translate_load(memarg, ir::Opcode::Load, I64, builder, state, environ)?; + } + Operator::F64Load { memarg } => { + translate_load(memarg, ir::Opcode::Load, F64, builder, state, environ)?; + } + Operator::V128Load { memarg } => { + translate_load(memarg, ir::Opcode::Load, I8X16, builder, state, environ)?; + } + Operator::V128Load8x8S { memarg } => { + let (flags, base, offset) = prepare_load(memarg, 8, builder, state, environ)?; + let loaded = builder.ins().sload8x8(flags, base, offset); + state.push1(loaded); + } + Operator::V128Load8x8U { memarg } => { + let (flags, base, offset) = prepare_load(memarg, 8, builder, state, environ)?; + let loaded = builder.ins().uload8x8(flags, base, offset); + state.push1(loaded); + } + Operator::V128Load16x4S { memarg } => { + let (flags, base, offset) = prepare_load(memarg, 8, builder, state, environ)?; + let loaded = builder.ins().sload16x4(flags, base, offset); + state.push1(loaded); + } + Operator::V128Load16x4U { memarg } => { + let (flags, base, offset) = prepare_load(memarg, 8, builder, state, environ)?; + let loaded = builder.ins().uload16x4(flags, base, offset); + state.push1(loaded); + } + Operator::V128Load32x2S { memarg } => { + let (flags, base, offset) = prepare_load(memarg, 8, builder, state, environ)?; + let loaded = builder.ins().sload32x2(flags, base, offset); + state.push1(loaded); + } + Operator::V128Load32x2U { memarg } => { + let (flags, base, offset) = prepare_load(memarg, 8, builder, state, environ)?; + let loaded = builder.ins().uload32x2(flags, base, offset); + state.push1(loaded); + } + /****************************** Store instructions *********************************** + * Wasm specifies an integer alignment flag but we drop it in Cranelift. + * The memory base address is provided by the environment. + ************************************************************************************/ + Operator::I32Store { memarg } + | Operator::I64Store { memarg } + | Operator::F32Store { memarg } + | Operator::F64Store { memarg } => { + translate_store(memarg, ir::Opcode::Store, builder, state, environ)?; + } + Operator::I32Store8 { memarg } | Operator::I64Store8 { memarg } => { + translate_store(memarg, ir::Opcode::Istore8, builder, state, environ)?; + } + Operator::I32Store16 { memarg } | Operator::I64Store16 { memarg } => { + translate_store(memarg, ir::Opcode::Istore16, builder, state, environ)?; + } + Operator::I64Store32 { memarg } => { + translate_store(memarg, ir::Opcode::Istore32, builder, state, environ)?; + } + Operator::V128Store { memarg } => { + translate_store(memarg, ir::Opcode::Store, builder, state, environ)?; + } + /****************************** Nullary Operators ************************************/ + Operator::I32Const { value } => state.push1(builder.ins().iconst(I32, i64::from(*value))), + Operator::I64Const { value } => state.push1(builder.ins().iconst(I64, *value)), + Operator::F32Const { value } => { + state.push1(builder.ins().f32const(f32_translation(*value))); + } + Operator::F64Const { value } => { + state.push1(builder.ins().f64const(f64_translation(*value))); + } + /******************************* Unary Operators *************************************/ + Operator::I32Clz | Operator::I64Clz => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().clz(arg)); + } + Operator::I32Ctz | Operator::I64Ctz => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().ctz(arg)); + } + Operator::I32Popcnt | Operator::I64Popcnt => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().popcnt(arg)); + } + Operator::I64ExtendI32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().sextend(I64, val)); + } + Operator::I64ExtendI32U => { + let (val, _) = state.pop1(); + state.push1(builder.ins().uextend(I64, val)); + } + Operator::I32WrapI64 => { + let (val, _) = state.pop1(); + state.push1(builder.ins().ireduce(I32, val)); + } + Operator::F32Sqrt | Operator::F64Sqrt => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().sqrt(arg)); + } + Operator::F32Ceil | Operator::F64Ceil => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().ceil(arg)); + } + Operator::F32Floor | Operator::F64Floor => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().floor(arg)); + } + Operator::F32Trunc | Operator::F64Trunc => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().trunc(arg)); + } + Operator::F32Nearest | Operator::F64Nearest => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().nearest(arg)); + } + Operator::F32Abs | Operator::F64Abs => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fabs(val)); + } + Operator::F32Neg | Operator::F64Neg => { + let (arg, _) = state.pop1(); + state.push1(builder.ins().fneg(arg)); + } + Operator::F64ConvertI64U | Operator::F64ConvertI32U => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_from_uint(F64, val)); + } + Operator::F64ConvertI64S | Operator::F64ConvertI32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_from_sint(F64, val)); + } + Operator::F32ConvertI64S | Operator::F32ConvertI32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_from_sint(F32, val)); + } + Operator::F32ConvertI64U | Operator::F32ConvertI32U => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_from_uint(F32, val)); + } + Operator::F64PromoteF32 => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fpromote(F64, val)); + } + Operator::F32DemoteF64 => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fdemote(F32, val)); + } + Operator::I64TruncF64S | Operator::I64TruncF32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_sint(I64, val)); + } + Operator::I32TruncF64S | Operator::I32TruncF32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_sint(I32, val)); + } + Operator::I64TruncF64U | Operator::I64TruncF32U => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_uint(I64, val)); + } + Operator::I32TruncF64U | Operator::I32TruncF32U => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_uint(I32, val)); + } + Operator::I64TruncSatF64S | Operator::I64TruncSatF32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_sint_sat(I64, val)); + } + Operator::I32TruncSatF64S | Operator::I32TruncSatF32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_sint_sat(I32, val)); + } + Operator::I64TruncSatF64U | Operator::I64TruncSatF32U => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_uint_sat(I64, val)); + } + Operator::I32TruncSatF64U | Operator::I32TruncSatF32U => { + let (val, _) = state.pop1(); + state.push1(builder.ins().fcvt_to_uint_sat(I32, val)); + } + Operator::F32ReinterpretI32 => { + let (val, _) = state.pop1(); + state.push1(builder.ins().bitcast(F32, val)); + } + Operator::F64ReinterpretI64 => { + let (val, _) = state.pop1(); + state.push1(builder.ins().bitcast(F64, val)); + } + Operator::I32ReinterpretF32 => { + let (val, _) = state.pop1(); + state.push1(builder.ins().bitcast(I32, val)); + } + Operator::I64ReinterpretF64 => { + let (val, _) = state.pop1(); + state.push1(builder.ins().bitcast(I64, val)); + } + Operator::I32Extend8S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().ireduce(I8, val)); + let (val, _) = state.pop1(); + state.push1(builder.ins().sextend(I32, val)); + } + Operator::I32Extend16S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().ireduce(I16, val)); + let (val, _) = state.pop1(); + state.push1(builder.ins().sextend(I32, val)); + } + Operator::I64Extend8S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().ireduce(I8, val)); + let (val, _) = state.pop1(); + state.push1(builder.ins().sextend(I64, val)); + } + Operator::I64Extend16S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().ireduce(I16, val)); + let (val, _) = state.pop1(); + state.push1(builder.ins().sextend(I64, val)); + } + Operator::I64Extend32S => { + let (val, _) = state.pop1(); + state.push1(builder.ins().ireduce(I32, val)); + let (val, _) = state.pop1(); + state.push1(builder.ins().sextend(I64, val)); + } + /****************************** Binary Operators ************************************/ + Operator::I32Add | Operator::I64Add => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().iadd(arg1, arg2)); + } + Operator::I32And | Operator::I64And => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().band(arg1, arg2)); + } + Operator::I32Or | Operator::I64Or => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().bor(arg1, arg2)); + } + Operator::I32Xor | Operator::I64Xor => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().bxor(arg1, arg2)); + } + Operator::I32Shl | Operator::I64Shl => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().ishl(arg1, arg2)); + } + Operator::I32ShrS | Operator::I64ShrS => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().sshr(arg1, arg2)); + } + Operator::I32ShrU | Operator::I64ShrU => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().ushr(arg1, arg2)); + } + Operator::I32Rotl | Operator::I64Rotl => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().rotl(arg1, arg2)); + } + Operator::I32Rotr | Operator::I64Rotr => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().rotr(arg1, arg2)); + } + Operator::F32Add | Operator::F64Add => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().fadd(arg1, arg2)); + } + Operator::I32Sub | Operator::I64Sub => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().isub(arg1, arg2)); + } + Operator::F32Sub | Operator::F64Sub => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().fsub(arg1, arg2)); + } + Operator::I32Mul | Operator::I64Mul => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().imul(arg1, arg2)); + } + Operator::F32Mul | Operator::F64Mul => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().fmul(arg1, arg2)); + } + Operator::F32Div | Operator::F64Div => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().fdiv(arg1, arg2)); + } + Operator::I32DivS | Operator::I64DivS => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().sdiv(arg1, arg2)); + } + Operator::I32DivU | Operator::I64DivU => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().udiv(arg1, arg2)); + } + Operator::I32RemS | Operator::I64RemS => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().srem(arg1, arg2)); + } + Operator::I32RemU | Operator::I64RemU => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().urem(arg1, arg2)); + } + Operator::F32Min | Operator::F64Min => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().fmin(arg1, arg2)); + } + Operator::F32Max | Operator::F64Max => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().fmax(arg1, arg2)); + } + Operator::F32Copysign | Operator::F64Copysign => { + let ((arg1, _), (arg2, _)) = state.pop2(); + state.push1(builder.ins().fcopysign(arg1, arg2)); + } + /**************************** Comparison Operators **********************************/ + Operator::I32LtS | Operator::I64LtS => { + translate_icmp(IntCC::SignedLessThan, builder, state) + } + Operator::I32LtU | Operator::I64LtU => { + translate_icmp(IntCC::UnsignedLessThan, builder, state) + } + Operator::I32LeS | Operator::I64LeS => { + translate_icmp(IntCC::SignedLessThanOrEqual, builder, state) + } + Operator::I32LeU | Operator::I64LeU => { + translate_icmp(IntCC::UnsignedLessThanOrEqual, builder, state) + } + Operator::I32GtS | Operator::I64GtS => { + translate_icmp(IntCC::SignedGreaterThan, builder, state) + } + Operator::I32GtU | Operator::I64GtU => { + translate_icmp(IntCC::UnsignedGreaterThan, builder, state) + } + Operator::I32GeS | Operator::I64GeS => { + translate_icmp(IntCC::SignedGreaterThanOrEqual, builder, state) + } + Operator::I32GeU | Operator::I64GeU => { + translate_icmp(IntCC::UnsignedGreaterThanOrEqual, builder, state) + } + Operator::I32Eqz | Operator::I64Eqz => { + let (arg, _) = state.pop1(); + let val = builder.ins().icmp_imm(IntCC::Equal, arg, 0); + state.push1(builder.ins().bint(I32, val)); + } + Operator::I32Eq | Operator::I64Eq => translate_icmp(IntCC::Equal, builder, state), + Operator::F32Eq | Operator::F64Eq => translate_fcmp(FloatCC::Equal, builder, state), + Operator::I32Ne | Operator::I64Ne => translate_icmp(IntCC::NotEqual, builder, state), + Operator::F32Ne | Operator::F64Ne => translate_fcmp(FloatCC::NotEqual, builder, state), + Operator::F32Gt | Operator::F64Gt => translate_fcmp(FloatCC::GreaterThan, builder, state), + Operator::F32Ge | Operator::F64Ge => { + translate_fcmp(FloatCC::GreaterThanOrEqual, builder, state) + } + Operator::F32Lt | Operator::F64Lt => translate_fcmp(FloatCC::LessThan, builder, state), + Operator::F32Le | Operator::F64Le => { + translate_fcmp(FloatCC::LessThanOrEqual, builder, state) + } + Operator::RefNull { ty } => state.push1(environ.translate_ref_null(builder.cursor(), *ty)?), + Operator::RefIsNull => { + let (value, _) = state.pop1(); + state.push1(environ.translate_ref_is_null(builder.cursor(), value)?); + } + Operator::RefFunc { function_index } => { + let index = FunctionIndex::from_u32(*function_index); + state.push1(environ.translate_ref_func(builder.cursor(), index)?); + } + Operator::MemoryAtomicWait32 { memarg } | Operator::MemoryAtomicWait64 { memarg } => { + // The WebAssembly MVP only supports one linear memory and + // wasmparser will ensure that the memory indices specified are + // zero. + let implied_ty = match op { + Operator::MemoryAtomicWait64 { .. } => I64, + Operator::MemoryAtomicWait32 { .. } => I32, + _ => unreachable!(), + }; + let heap_index = MemoryIndex::from_u32(memarg.memory); + let heap = state.get_heap(builder.func, memarg.memory, environ)?; + let (timeout, _) = state.pop1(); // 64 (fixed) + let (expected, _) = state.pop1(); // 32 or 64 (per the `Ixx` in `IxxAtomicWait`) + let (addr, _) = state.pop1(); // 32 (fixed) + let addr = fold_atomic_mem_addr(addr, memarg, implied_ty, builder); + assert!(builder.func.dfg.value_type(expected) == implied_ty); + // `fn translate_atomic_wait` can inspect the type of `expected` to figure out what + // code it needs to generate, if it wants. + let res = environ.translate_atomic_wait( + builder.cursor(), + heap_index, + heap, + addr, + expected, + timeout, + )?; + state.push1(res); + } + Operator::MemoryAtomicNotify { memarg } => { + let heap_index = MemoryIndex::from_u32(memarg.memory); + let heap = state.get_heap(builder.func, memarg.memory, environ)?; + let (count, _) = state.pop1(); // 32 (fixed) + let (addr, _) = state.pop1(); // 32 (fixed) + let addr = fold_atomic_mem_addr(addr, memarg, I32, builder); + let res = + environ.translate_atomic_notify(builder.cursor(), heap_index, heap, addr, count)?; + state.push1(res); + } + Operator::I32AtomicLoad { memarg } => { + translate_atomic_load(I32, I32, memarg, builder, state, environ)? + } + Operator::I64AtomicLoad { memarg } => { + translate_atomic_load(I64, I64, memarg, builder, state, environ)? + } + Operator::I32AtomicLoad8U { memarg } => { + translate_atomic_load(I32, I8, memarg, builder, state, environ)? + } + Operator::I32AtomicLoad16U { memarg } => { + translate_atomic_load(I32, I16, memarg, builder, state, environ)? + } + Operator::I64AtomicLoad8U { memarg } => { + translate_atomic_load(I64, I8, memarg, builder, state, environ)? + } + Operator::I64AtomicLoad16U { memarg } => { + translate_atomic_load(I64, I16, memarg, builder, state, environ)? + } + Operator::I64AtomicLoad32U { memarg } => { + translate_atomic_load(I64, I32, memarg, builder, state, environ)? + } + + Operator::I32AtomicStore { memarg } => { + translate_atomic_store(I32, memarg, builder, state, environ)? + } + Operator::I64AtomicStore { memarg } => { + translate_atomic_store(I64, memarg, builder, state, environ)? + } + Operator::I32AtomicStore8 { memarg } => { + translate_atomic_store(I8, memarg, builder, state, environ)? + } + Operator::I32AtomicStore16 { memarg } => { + translate_atomic_store(I16, memarg, builder, state, environ)? + } + Operator::I64AtomicStore8 { memarg } => { + translate_atomic_store(I8, memarg, builder, state, environ)? + } + Operator::I64AtomicStore16 { memarg } => { + translate_atomic_store(I16, memarg, builder, state, environ)? + } + Operator::I64AtomicStore32 { memarg } => { + translate_atomic_store(I32, memarg, builder, state, environ)? + } + + Operator::I32AtomicRmwAdd { memarg } => { + translate_atomic_rmw(I32, I32, AtomicRmwOp::Add, memarg, builder, state, environ)? + } + Operator::I64AtomicRmwAdd { memarg } => { + translate_atomic_rmw(I64, I64, AtomicRmwOp::Add, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw8AddU { memarg } => { + translate_atomic_rmw(I32, I8, AtomicRmwOp::Add, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw16AddU { memarg } => { + translate_atomic_rmw(I32, I16, AtomicRmwOp::Add, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw8AddU { memarg } => { + translate_atomic_rmw(I64, I8, AtomicRmwOp::Add, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw16AddU { memarg } => { + translate_atomic_rmw(I64, I16, AtomicRmwOp::Add, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw32AddU { memarg } => { + translate_atomic_rmw(I64, I32, AtomicRmwOp::Add, memarg, builder, state, environ)? + } + + Operator::I32AtomicRmwSub { memarg } => { + translate_atomic_rmw(I32, I32, AtomicRmwOp::Sub, memarg, builder, state, environ)? + } + Operator::I64AtomicRmwSub { memarg } => { + translate_atomic_rmw(I64, I64, AtomicRmwOp::Sub, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw8SubU { memarg } => { + translate_atomic_rmw(I32, I8, AtomicRmwOp::Sub, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw16SubU { memarg } => { + translate_atomic_rmw(I32, I16, AtomicRmwOp::Sub, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw8SubU { memarg } => { + translate_atomic_rmw(I64, I8, AtomicRmwOp::Sub, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw16SubU { memarg } => { + translate_atomic_rmw(I64, I16, AtomicRmwOp::Sub, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw32SubU { memarg } => { + translate_atomic_rmw(I64, I32, AtomicRmwOp::Sub, memarg, builder, state, environ)? + } + + Operator::I32AtomicRmwAnd { memarg } => { + translate_atomic_rmw(I32, I32, AtomicRmwOp::And, memarg, builder, state, environ)? + } + Operator::I64AtomicRmwAnd { memarg } => { + translate_atomic_rmw(I64, I64, AtomicRmwOp::And, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw8AndU { memarg } => { + translate_atomic_rmw(I32, I8, AtomicRmwOp::And, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw16AndU { memarg } => { + translate_atomic_rmw(I32, I16, AtomicRmwOp::And, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw8AndU { memarg } => { + translate_atomic_rmw(I64, I8, AtomicRmwOp::And, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw16AndU { memarg } => { + translate_atomic_rmw(I64, I16, AtomicRmwOp::And, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw32AndU { memarg } => { + translate_atomic_rmw(I64, I32, AtomicRmwOp::And, memarg, builder, state, environ)? + } + + Operator::I32AtomicRmwOr { memarg } => { + translate_atomic_rmw(I32, I32, AtomicRmwOp::Or, memarg, builder, state, environ)? + } + Operator::I64AtomicRmwOr { memarg } => { + translate_atomic_rmw(I64, I64, AtomicRmwOp::Or, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw8OrU { memarg } => { + translate_atomic_rmw(I32, I8, AtomicRmwOp::Or, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw16OrU { memarg } => { + translate_atomic_rmw(I32, I16, AtomicRmwOp::Or, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw8OrU { memarg } => { + translate_atomic_rmw(I64, I8, AtomicRmwOp::Or, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw16OrU { memarg } => { + translate_atomic_rmw(I64, I16, AtomicRmwOp::Or, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw32OrU { memarg } => { + translate_atomic_rmw(I64, I32, AtomicRmwOp::Or, memarg, builder, state, environ)? + } + + Operator::I32AtomicRmwXor { memarg } => { + translate_atomic_rmw(I32, I32, AtomicRmwOp::Xor, memarg, builder, state, environ)? + } + Operator::I64AtomicRmwXor { memarg } => { + translate_atomic_rmw(I64, I64, AtomicRmwOp::Xor, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw8XorU { memarg } => { + translate_atomic_rmw(I32, I8, AtomicRmwOp::Xor, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw16XorU { memarg } => { + translate_atomic_rmw(I32, I16, AtomicRmwOp::Xor, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw8XorU { memarg } => { + translate_atomic_rmw(I64, I8, AtomicRmwOp::Xor, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw16XorU { memarg } => { + translate_atomic_rmw(I64, I16, AtomicRmwOp::Xor, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw32XorU { memarg } => { + translate_atomic_rmw(I64, I32, AtomicRmwOp::Xor, memarg, builder, state, environ)? + } + + Operator::I32AtomicRmwXchg { memarg } => { + translate_atomic_rmw(I32, I32, AtomicRmwOp::Xchg, memarg, builder, state, environ)? + } + Operator::I64AtomicRmwXchg { memarg } => { + translate_atomic_rmw(I64, I64, AtomicRmwOp::Xchg, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw8XchgU { memarg } => { + translate_atomic_rmw(I32, I8, AtomicRmwOp::Xchg, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw16XchgU { memarg } => { + translate_atomic_rmw(I32, I16, AtomicRmwOp::Xchg, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw8XchgU { memarg } => { + translate_atomic_rmw(I64, I8, AtomicRmwOp::Xchg, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw16XchgU { memarg } => { + translate_atomic_rmw(I64, I16, AtomicRmwOp::Xchg, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw32XchgU { memarg } => { + translate_atomic_rmw(I64, I32, AtomicRmwOp::Xchg, memarg, builder, state, environ)? + } + + Operator::I32AtomicRmwCmpxchg { memarg } => { + translate_atomic_cas(I32, I32, memarg, builder, state, environ)? + } + Operator::I64AtomicRmwCmpxchg { memarg } => { + translate_atomic_cas(I64, I64, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw8CmpxchgU { memarg } => { + translate_atomic_cas(I32, I8, memarg, builder, state, environ)? + } + Operator::I32AtomicRmw16CmpxchgU { memarg } => { + translate_atomic_cas(I32, I16, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw8CmpxchgU { memarg } => { + translate_atomic_cas(I64, I8, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw16CmpxchgU { memarg } => { + translate_atomic_cas(I64, I16, memarg, builder, state, environ)? + } + Operator::I64AtomicRmw32CmpxchgU { memarg } => { + translate_atomic_cas(I64, I32, memarg, builder, state, environ)? + } + + Operator::AtomicFence { .. } => { + builder.ins().fence(); + } + Operator::MemoryCopy { src, dst } => { + let src_index = MemoryIndex::from_u32(*src); + let dst_index = MemoryIndex::from_u32(*dst); + let src_heap = state.get_heap(builder.func, *src, environ)?; + let dst_heap = state.get_heap(builder.func, *dst, environ)?; + let (len, _) = state.pop1(); + let (src_pos, _) = state.pop1(); + let (dst_pos, _) = state.pop1(); + environ.translate_memory_copy( + builder.cursor(), + src_index, + src_heap, + dst_index, + dst_heap, + dst_pos, + src_pos, + len, + )?; + } + Operator::MemoryFill { mem } => { + let heap_index = MemoryIndex::from_u32(*mem); + let heap = state.get_heap(builder.func, *mem, environ)?; + let (len, _) = state.pop1(); + let (val, _) = state.pop1(); + let (dest, _) = state.pop1(); + environ.translate_memory_fill(builder.cursor(), heap_index, heap, dest, val, len)?; + } + Operator::MemoryInit { segment, mem } => { + let heap_index = MemoryIndex::from_u32(*mem); + let heap = state.get_heap(builder.func, *mem, environ)?; + let (len, _) = state.pop1(); + let (src, _) = state.pop1(); + let (dest, _) = state.pop1(); + environ.translate_memory_init( + builder.cursor(), + heap_index, + heap, + *segment, + dest, + src, + len, + )?; + } + Operator::DataDrop { segment } => { + environ.translate_data_drop(builder.cursor(), *segment)?; + } + Operator::TableSize { table: index } => { + let table = state.get_or_create_table(builder.func, *index, environ)?; + state.push1(environ.translate_table_size( + builder.cursor(), + TableIndex::from_u32(*index), + table, + )?); + } + Operator::TableGrow { table: index } => { + let table_index = TableIndex::from_u32(*index); + let table = state.get_or_create_table(builder.func, *index, environ)?; + let (delta, _) = state.pop1(); + let (init_value, _) = state.pop1(); + state.push1(environ.translate_table_grow( + builder.cursor(), + table_index, + table, + delta, + init_value, + )?); + } + Operator::TableGet { table: index } => { + let table_index = TableIndex::from_u32(*index); + let table = state.get_or_create_table(builder.func, *index, environ)?; + let (index, _) = state.pop1(); + state.push1(environ.translate_table_get(builder, table_index, table, index)?); + } + Operator::TableSet { table: index } => { + let table_index = TableIndex::from_u32(*index); + let table = state.get_or_create_table(builder.func, *index, environ)?; + // We don't touch the ref count here because we're passing it to the host + // then dropping it from the stack. Thus 1 + -1 = 0. + let (value, _) = state.pop1(); + let (index, _) = state.pop1(); + environ.translate_table_set(builder, table_index, table, value, index)?; + } + Operator::TableCopy { + dst_table: dst_table_index, + src_table: src_table_index, + } => { + let dst_table = state.get_or_create_table(builder.func, *dst_table_index, environ)?; + let src_table = state.get_or_create_table(builder.func, *src_table_index, environ)?; + let (len, _) = state.pop1(); + let (src, _) = state.pop1(); + let (dest, _) = state.pop1(); + environ.translate_table_copy( + builder.cursor(), + TableIndex::from_u32(*dst_table_index), + dst_table, + TableIndex::from_u32(*src_table_index), + src_table, + dest, + src, + len, + )?; + } + Operator::TableFill { table } => { + let table_index = TableIndex::from_u32(*table); + let (len, _) = state.pop1(); + let (val, _) = state.pop1(); + let (dest, _) = state.pop1(); + environ.translate_table_fill(builder.cursor(), table_index, dest, val, len)?; + } + Operator::TableInit { + segment, + table: table_index, + } => { + let table = state.get_or_create_table(builder.func, *table_index, environ)?; + let (len, _) = state.pop1(); + let (src, _) = state.pop1(); + let (dest, _) = state.pop1(); + environ.translate_table_init( + builder.cursor(), + *segment, + TableIndex::from_u32(*table_index), + table, + dest, + src, + len, + )?; + } + Operator::ElemDrop { segment } => { + environ.translate_elem_drop(builder.cursor(), *segment)?; + } + Operator::V128Const { value } => { + let data = value.bytes().to_vec().into(); + let handle = builder.func.dfg.constants.insert(data); + let value = builder.ins().vconst(I8X16, handle); + // the v128.const is typed in CLIF as a I8x16 but raw_bitcast to a different type + // before use + state.push1(value) + } + Operator::I8x16Splat | Operator::I16x8Splat => { + let reduced = builder + .ins() + .ireduce(type_of(op).lane_type(), state.pop1().0); + let splatted = builder.ins().splat(type_of(op), reduced); + state.push1(splatted) + } + Operator::I32x4Splat + | Operator::I64x2Splat + | Operator::F32x4Splat + | Operator::F64x2Splat => { + let splatted = builder.ins().splat(type_of(op), state.pop1().0); + state.push1(splatted) + } + Operator::V128Load8Splat { memarg } + | Operator::V128Load16Splat { memarg } + | Operator::V128Load32Splat { memarg } + | Operator::V128Load64Splat { memarg } => { + translate_load( + memarg, + ir::Opcode::Load, + type_of(op).lane_type(), + builder, + state, + environ, + )?; + let splatted = builder.ins().splat(type_of(op), state.pop1().0); + state.push1(splatted) + } + Operator::V128Load32Zero { memarg } | Operator::V128Load64Zero { memarg } => { + translate_load( + memarg, + ir::Opcode::Load, + type_of(op).lane_type(), + builder, + state, + environ, + )?; + let as_vector = builder.ins().scalar_to_vector(type_of(op), state.pop1().0); + state.push1(as_vector) + } + Operator::V128Load8Lane { memarg, lane } + | Operator::V128Load16Lane { memarg, lane } + | Operator::V128Load32Lane { memarg, lane } + | Operator::V128Load64Lane { memarg, lane } => { + let vector = pop1_with_bitcast(state, type_of(op), builder); + translate_load( + memarg, + ir::Opcode::Load, + type_of(op).lane_type(), + builder, + state, + environ, + )?; + let replacement = state.pop1().0; + state.push1(builder.ins().insertlane(vector, replacement, *lane)) + } + Operator::V128Store8Lane { memarg, lane } + | Operator::V128Store16Lane { memarg, lane } + | Operator::V128Store32Lane { memarg, lane } + | Operator::V128Store64Lane { memarg, lane } => { + let vector = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().extractlane(vector, lane.clone())); + translate_store(memarg, ir::Opcode::Store, builder, state, environ)?; + } + Operator::I8x16ExtractLaneS { lane } | Operator::I16x8ExtractLaneS { lane } => { + let vector = pop1_with_bitcast(state, type_of(op), builder); + let extracted = builder.ins().extractlane(vector, lane.clone()); + state.push1(builder.ins().sextend(I32, extracted)) + } + Operator::I8x16ExtractLaneU { lane } | Operator::I16x8ExtractLaneU { lane } => { + let vector = pop1_with_bitcast(state, type_of(op), builder); + let extracted = builder.ins().extractlane(vector, lane.clone()); + state.push1(builder.ins().uextend(I32, extracted)); + // On x86, PEXTRB zeroes the upper bits of the destination register of extractlane so + // uextend could be elided; for now, uextend is needed for Cranelift's type checks to + // work. + } + Operator::I32x4ExtractLane { lane } + | Operator::I64x2ExtractLane { lane } + | Operator::F32x4ExtractLane { lane } + | Operator::F64x2ExtractLane { lane } => { + let vector = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().extractlane(vector, lane.clone())) + } + Operator::I8x16ReplaceLane { lane } | Operator::I16x8ReplaceLane { lane } => { + let ((vector, _), (replacement, _)) = state.pop2(); + let ty = type_of(op); + let reduced = builder.ins().ireduce(ty.lane_type(), replacement); + let vector = optionally_bitcast_vector(vector, ty, builder); + state.push1(builder.ins().insertlane(vector, reduced, *lane)) + } + Operator::I32x4ReplaceLane { lane } + | Operator::I64x2ReplaceLane { lane } + | Operator::F32x4ReplaceLane { lane } + | Operator::F64x2ReplaceLane { lane } => { + let ((vector, _), (replacement, _)) = state.pop2(); + let vector = optionally_bitcast_vector(vector, type_of(op), builder); + state.push1(builder.ins().insertlane(vector, replacement, *lane)) + } + Operator::I8x16Shuffle { lanes, .. } => { + let (a, b) = pop2_with_bitcast(state, I8X16, builder); + let lanes = ConstantData::from(lanes.as_ref()); + let mask = builder.func.dfg.immediates.push(lanes); + let shuffled = builder.ins().shuffle(a, b, mask); + state.push1(shuffled) + // At this point the original types of a and b are lost; users of this value (i.e. this + // WASM-to-CLIF translator) may need to raw_bitcast for type-correctness. This is due + // to WASM using the less specific v128 type for certain operations and more specific + // types (e.g. i8x16) for others. + } + Operator::I8x16Swizzle => { + let (a, b) = pop2_with_bitcast(state, I8X16, builder); + state.push1(builder.ins().swizzle(I8X16, a, b)) + } + Operator::I8x16Add | Operator::I16x8Add | Operator::I32x4Add | Operator::I64x2Add => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().iadd(a, b)) + } + Operator::I8x16AddSatS | Operator::I16x8AddSatS => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().sadd_sat(a, b)) + } + Operator::I8x16AddSatU | Operator::I16x8AddSatU => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().uadd_sat(a, b)) + } + Operator::I8x16Sub | Operator::I16x8Sub | Operator::I32x4Sub | Operator::I64x2Sub => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().isub(a, b)) + } + Operator::I8x16SubSatS | Operator::I16x8SubSatS => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().ssub_sat(a, b)) + } + Operator::I8x16SubSatU | Operator::I16x8SubSatU => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().usub_sat(a, b)) + } + Operator::I8x16MinS | Operator::I16x8MinS | Operator::I32x4MinS => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().imin(a, b)) + } + Operator::I8x16MinU | Operator::I16x8MinU | Operator::I32x4MinU => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().umin(a, b)) + } + Operator::I8x16MaxS | Operator::I16x8MaxS | Operator::I32x4MaxS => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().imax(a, b)) + } + Operator::I8x16MaxU | Operator::I16x8MaxU | Operator::I32x4MaxU => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().umax(a, b)) + } + Operator::I8x16RoundingAverageU | Operator::I16x8RoundingAverageU => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().avg_round(a, b)) + } + Operator::I8x16Neg | Operator::I16x8Neg | Operator::I32x4Neg | Operator::I64x2Neg => { + let a = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().ineg(a)) + } + Operator::I8x16Abs | Operator::I16x8Abs | Operator::I32x4Abs | Operator::I64x2Abs => { + let a = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().iabs(a)) + } + Operator::I16x8Mul | Operator::I32x4Mul | Operator::I64x2Mul => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().imul(a, b)) + } + Operator::V128Or => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().bor(a, b)) + } + Operator::V128Xor => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().bxor(a, b)) + } + Operator::V128And => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().band(a, b)) + } + Operator::V128AndNot => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().band_not(a, b)) + } + Operator::V128Not => { + let (a, _) = state.pop1(); + state.push1(builder.ins().bnot(a)); + } + Operator::I8x16Shl | Operator::I16x8Shl | Operator::I32x4Shl | Operator::I64x2Shl => { + let ((a, _), (b, _)) = state.pop2(); + let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder); + let bitwidth = i64::from(type_of(op).lane_bits()); + // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width + // we do `b AND 15`; this means fewer instructions than `iconst + urem`. + let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1); + state.push1(builder.ins().ishl(bitcast_a, b_mod_bitwidth)) + } + Operator::I8x16ShrU | Operator::I16x8ShrU | Operator::I32x4ShrU | Operator::I64x2ShrU => { + let ((a, _), (b, _)) = state.pop2(); + let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder); + let bitwidth = i64::from(type_of(op).lane_bits()); + // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width + // we do `b AND 15`; this means fewer instructions than `iconst + urem`. + let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1); + state.push1(builder.ins().ushr(bitcast_a, b_mod_bitwidth)) + } + Operator::I8x16ShrS | Operator::I16x8ShrS | Operator::I32x4ShrS | Operator::I64x2ShrS => { + let ((a, _), (b, _)) = state.pop2(); + let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder); + let bitwidth = i64::from(type_of(op).lane_bits()); + // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width + // we do `b AND 15`; this means fewer instructions than `iconst + urem`. + let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1); + state.push1(builder.ins().sshr(bitcast_a, b_mod_bitwidth)) + } + Operator::V128Bitselect => { + let ((a, _), (b, _), (c, _)) = state.pop3(); + let bitcast_a = optionally_bitcast_vector(a, I8X16, builder); + let bitcast_b = optionally_bitcast_vector(b, I8X16, builder); + let bitcast_c = optionally_bitcast_vector(c, I8X16, builder); + // The CLIF operand ordering is slightly different and the types of all three + // operands must match (hence the bitcast). + state.push1(builder.ins().bitselect(bitcast_c, bitcast_a, bitcast_b)) + } + Operator::V128AnyTrue => { + let a = pop1_with_bitcast(state, type_of(op), builder); + let bool_result = builder.ins().vany_true(a); + state.push1(builder.ins().bint(I32, bool_result)) + } + Operator::I8x16AllTrue + | Operator::I16x8AllTrue + | Operator::I32x4AllTrue + | Operator::I64x2AllTrue => { + let a = pop1_with_bitcast(state, type_of(op), builder); + let bool_result = builder.ins().vall_true(a); + state.push1(builder.ins().bint(I32, bool_result)) + } + Operator::I8x16Bitmask + | Operator::I16x8Bitmask + | Operator::I32x4Bitmask + | Operator::I64x2Bitmask => { + let a = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().vhigh_bits(I32, a)); + } + Operator::I8x16Eq | Operator::I16x8Eq | Operator::I32x4Eq | Operator::I64x2Eq => { + translate_vector_icmp(IntCC::Equal, type_of(op), builder, state) + } + Operator::I8x16Ne | Operator::I16x8Ne | Operator::I32x4Ne | Operator::I64x2Ne => { + translate_vector_icmp(IntCC::NotEqual, type_of(op), builder, state) + } + Operator::I8x16GtS | Operator::I16x8GtS | Operator::I32x4GtS | Operator::I64x2GtS => { + translate_vector_icmp(IntCC::SignedGreaterThan, type_of(op), builder, state) + } + Operator::I8x16LtS | Operator::I16x8LtS | Operator::I32x4LtS | Operator::I64x2LtS => { + translate_vector_icmp(IntCC::SignedLessThan, type_of(op), builder, state) + } + Operator::I8x16GtU | Operator::I16x8GtU | Operator::I32x4GtU => { + translate_vector_icmp(IntCC::UnsignedGreaterThan, type_of(op), builder, state) + } + Operator::I8x16LtU | Operator::I16x8LtU | Operator::I32x4LtU => { + translate_vector_icmp(IntCC::UnsignedLessThan, type_of(op), builder, state) + } + Operator::I8x16GeS | Operator::I16x8GeS | Operator::I32x4GeS | Operator::I64x2GeS => { + translate_vector_icmp(IntCC::SignedGreaterThanOrEqual, type_of(op), builder, state) + } + Operator::I8x16LeS | Operator::I16x8LeS | Operator::I32x4LeS | Operator::I64x2LeS => { + translate_vector_icmp(IntCC::SignedLessThanOrEqual, type_of(op), builder, state) + } + Operator::I8x16GeU | Operator::I16x8GeU | Operator::I32x4GeU => translate_vector_icmp( + IntCC::UnsignedGreaterThanOrEqual, + type_of(op), + builder, + state, + ), + Operator::I8x16LeU | Operator::I16x8LeU | Operator::I32x4LeU => { + translate_vector_icmp(IntCC::UnsignedLessThanOrEqual, type_of(op), builder, state) + } + Operator::F32x4Eq | Operator::F64x2Eq => { + translate_vector_fcmp(FloatCC::Equal, type_of(op), builder, state) + } + Operator::F32x4Ne | Operator::F64x2Ne => { + translate_vector_fcmp(FloatCC::NotEqual, type_of(op), builder, state) + } + Operator::F32x4Lt | Operator::F64x2Lt => { + translate_vector_fcmp(FloatCC::LessThan, type_of(op), builder, state) + } + Operator::F32x4Gt | Operator::F64x2Gt => { + translate_vector_fcmp(FloatCC::GreaterThan, type_of(op), builder, state) + } + Operator::F32x4Le | Operator::F64x2Le => { + translate_vector_fcmp(FloatCC::LessThanOrEqual, type_of(op), builder, state) + } + Operator::F32x4Ge | Operator::F64x2Ge => { + translate_vector_fcmp(FloatCC::GreaterThanOrEqual, type_of(op), builder, state) + } + Operator::F32x4Add | Operator::F64x2Add => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fadd(a, b)) + } + Operator::F32x4Sub | Operator::F64x2Sub => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fsub(a, b)) + } + Operator::F32x4Mul | Operator::F64x2Mul => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fmul(a, b)) + } + Operator::F32x4Div | Operator::F64x2Div => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fdiv(a, b)) + } + Operator::F32x4Max | Operator::F64x2Max => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fmax(a, b)) + } + Operator::F32x4Min | Operator::F64x2Min => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fmin(a, b)) + } + Operator::F32x4PMax | Operator::F64x2PMax => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fmax_pseudo(a, b)) + } + Operator::F32x4PMin | Operator::F64x2PMin => { + let (a, b) = pop2_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fmin_pseudo(a, b)) + } + Operator::F32x4Sqrt | Operator::F64x2Sqrt => { + let a = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().sqrt(a)) + } + Operator::F32x4Neg | Operator::F64x2Neg => { + let a = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fneg(a)) + } + Operator::F32x4Abs | Operator::F64x2Abs => { + let a = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().fabs(a)) + } + Operator::F32x4ConvertI32x4S => { + let a = pop1_with_bitcast(state, I32X4, builder); + state.push1(builder.ins().fcvt_from_sint(F32X4, a)) + } + Operator::F32x4ConvertI32x4U => { + let a = pop1_with_bitcast(state, I32X4, builder); + state.push1(builder.ins().fcvt_from_uint(F32X4, a)) + } + Operator::F64x2ConvertLowI32x4S => { + let a = pop1_with_bitcast(state, I32X4, builder); + state.push1(builder.ins().fcvt_low_from_sint(F64X2, a)); + } + Operator::I32x4TruncSatF32x4S => { + let a = pop1_with_bitcast(state, F32X4, builder); + state.push1(builder.ins().fcvt_to_sint_sat(I32X4, a)) + } + Operator::I32x4TruncSatF32x4U => { + let a = pop1_with_bitcast(state, F32X4, builder); + state.push1(builder.ins().fcvt_to_uint_sat(I32X4, a)) + } + Operator::I8x16NarrowI16x8S => { + let (a, b) = pop2_with_bitcast(state, I16X8, builder); + state.push1(builder.ins().snarrow(a, b)) + } + Operator::I16x8NarrowI32x4S => { + let (a, b) = pop2_with_bitcast(state, I32X4, builder); + state.push1(builder.ins().snarrow(a, b)) + } + Operator::I8x16NarrowI16x8U => { + let (a, b) = pop2_with_bitcast(state, I16X8, builder); + state.push1(builder.ins().unarrow(a, b)) + } + Operator::I16x8NarrowI32x4U => { + let (a, b) = pop2_with_bitcast(state, I32X4, builder); + state.push1(builder.ins().unarrow(a, b)) + } + Operator::I16x8ExtendLowI8x16S => { + let a = pop1_with_bitcast(state, I8X16, builder); + state.push1(builder.ins().swiden_low(a)) + } + Operator::I16x8ExtendHighI8x16S => { + let a = pop1_with_bitcast(state, I8X16, builder); + state.push1(builder.ins().swiden_high(a)) + } + Operator::I16x8ExtendLowI8x16U => { + let a = pop1_with_bitcast(state, I8X16, builder); + state.push1(builder.ins().uwiden_low(a)) + } + Operator::I16x8ExtendHighI8x16U => { + let a = pop1_with_bitcast(state, I8X16, builder); + state.push1(builder.ins().uwiden_high(a)) + } + Operator::I32x4ExtendLowI16x8S => { + let a = pop1_with_bitcast(state, I16X8, builder); + state.push1(builder.ins().swiden_low(a)) + } + Operator::I32x4ExtendHighI16x8S => { + let a = pop1_with_bitcast(state, I16X8, builder); + state.push1(builder.ins().swiden_high(a)) + } + Operator::I32x4ExtendLowI16x8U => { + let a = pop1_with_bitcast(state, I16X8, builder); + state.push1(builder.ins().uwiden_low(a)) + } + Operator::I32x4ExtendHighI16x8U => { + let a = pop1_with_bitcast(state, I16X8, builder); + state.push1(builder.ins().uwiden_high(a)) + } + + Operator::F32x4Ceil | Operator::F64x2Ceil => { + // This is something of a misuse of `type_of`, because that produces the return type + // of `op`. In this case we want the arg type, but we know it's the same as the + // return type. Same for the 3 cases below. + let arg = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().ceil(arg)); + } + Operator::F32x4Floor | Operator::F64x2Floor => { + let arg = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().floor(arg)); + } + Operator::F32x4Trunc | Operator::F64x2Trunc => { + let arg = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().trunc(arg)); + } + Operator::F32x4Nearest | Operator::F64x2Nearest => { + let arg = pop1_with_bitcast(state, type_of(op), builder); + state.push1(builder.ins().nearest(arg)); + } + Operator::I32x4DotI16x8S => { + let (a, b) = pop2_with_bitcast(state, I16X8, builder); + state.push1(builder.ins().widening_pairwise_dot_product_s(a, b)); + } + Operator::I64x2ExtendLowI32x4S + | Operator::I64x2ExtendHighI32x4S + | Operator::I64x2ExtendLowI32x4U + | Operator::I64x2ExtendHighI32x4U + | Operator::I16x8Q15MulrSatS + | Operator::I16x8ExtMulLowI8x16S + | Operator::I16x8ExtMulHighI8x16S + | Operator::I16x8ExtMulLowI8x16U + | Operator::I16x8ExtMulHighI8x16U + | Operator::I32x4ExtMulLowI16x8S + | Operator::I32x4ExtMulHighI16x8S + | Operator::I32x4ExtMulLowI16x8U + | Operator::I32x4ExtMulHighI16x8U + | Operator::I64x2ExtMulLowI32x4S + | Operator::I64x2ExtMulHighI32x4S + | Operator::I64x2ExtMulLowI32x4U + | Operator::I64x2ExtMulHighI32x4U + | Operator::I16x8ExtAddPairwiseI8x16S + | Operator::I16x8ExtAddPairwiseI8x16U + | Operator::I32x4ExtAddPairwiseI16x8S + | Operator::I32x4ExtAddPairwiseI16x8U + | Operator::F32x4DemoteF64x2Zero + | Operator::F64x2PromoteLowF32x4 + | Operator::F64x2ConvertLowI32x4U + | Operator::I32x4TruncSatF64x2SZero + | Operator::I32x4TruncSatF64x2UZero + | Operator::I8x16Popcnt => { + return Err(wasm_unsupported!("proposed simd operator {:?}", op)); + } + Operator::ReturnCall { .. } | Operator::ReturnCallIndirect { .. } => { + return Err(wasm_unsupported!("proposed tail-call operator {:?}", op)); + } + }; + Ok(()) +} + +// Clippy warns us of some fields we are deliberately ignoring +#[cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))] +/// Deals with a Wasm instruction located in an unreachable portion of the code. Most of them +/// are dropped but special ones like `End` or `Else` signal the potential end of the unreachable +/// portion so the translation state must be updated accordingly. +fn translate_unreachable_operator( + module_translation_state: &ModuleTranslationState, + op: &Operator, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + debug_assert!(!state.reachable); + match *op { + Operator::If { ty } => { + // Push a placeholder control stack entry. The if isn't reachable, + // so we don't have any branches anywhere. + state.push_if( + ir::Block::reserved_value(), + ElseData::NoElse { + branch_inst: ir::Inst::reserved_value(), + }, + 0, + 0, + ty, + ); + } + Operator::Loop { ty: _ } | Operator::Block { ty: _ } => { + state.push_block(ir::Block::reserved_value(), 0, 0); + } + Operator::Else => { + let i = state.control_stack.len() - 1; + match state.control_stack[i] { + ControlStackFrame::If { + ref else_data, + head_is_reachable, + ref mut consequent_ends_reachable, + blocktype, + .. + } => { + debug_assert!(consequent_ends_reachable.is_none()); + *consequent_ends_reachable = Some(state.reachable); + + if head_is_reachable { + // We have a branch from the head of the `if` to the `else`. + state.reachable = true; + + let else_block = match *else_data { + ElseData::NoElse { branch_inst } => { + let (params, _results) = + module_translation_state.blocktype_params_results(blocktype)?; + let else_block = block_with_params(builder, params, environ)?; + let frame = state.control_stack.last().unwrap(); + frame.truncate_value_stack_to_else_params(&mut state.stack); + + // We change the target of the branch instruction. + builder.change_jump_destination(branch_inst, else_block); + builder.seal_block(else_block); + else_block + } + ElseData::WithElse { else_block } => { + let frame = state.control_stack.last().unwrap(); + frame.truncate_value_stack_to_else_params(&mut state.stack); + else_block + } + }; + + builder.switch_to_block(else_block); + + // Again, no need to push the parameters for the `else`, + // since we already did when we saw the original `if`. See + // the comment for translating `Operator::Else` in + // `translate_operator` for details. + } + } + _ => unreachable!(), + } + } + Operator::End => { + let stack = &mut state.stack; + let control_stack = &mut state.control_stack; + let frame = control_stack.pop().unwrap(); + + // Pop unused parameters from stack. + frame.truncate_value_stack_to_original_size(stack); + + let reachable_anyway = match frame { + // If it is a loop we also have to seal the body loop block + ControlStackFrame::Loop { header, .. } => { + builder.seal_block(header); + // And loops can't have branches to the end. + false + } + // If we never set `consequent_ends_reachable` then that means + // we are finishing the consequent now, and there was no + // `else`. Whether the following block is reachable depends only + // on if the head was reachable. + ControlStackFrame::If { + head_is_reachable, + consequent_ends_reachable: None, + .. + } => head_is_reachable, + // Since we are only in this function when in unreachable code, + // we know that the alternative just ended unreachable. Whether + // the following block is reachable depends on if the consequent + // ended reachable or not. + ControlStackFrame::If { + head_is_reachable, + consequent_ends_reachable: Some(consequent_ends_reachable), + .. + } => head_is_reachable && consequent_ends_reachable, + // All other control constructs are already handled. + _ => false, + }; + + if frame.exit_is_branched_to() || reachable_anyway { + builder.switch_to_block(frame.following_code()); + builder.seal_block(frame.following_code()); + + // And add the return values of the block but only if the next block is reachable + // (which corresponds to testing if the stack depth is 1) + stack.extend_from_slice(builder.block_params(frame.following_code())); + state.reachable = true; + } + } + _ => { + // We don't translate because this is unreachable code + } + } + + Ok(()) +} + +/// Get the address+offset to use for a heap access. +fn get_heap_addr( + heap: ir::Heap, + addr32: ir::Value, + offset: u32, + width: u32, + addr_ty: Type, + builder: &mut FunctionBuilder, +) -> (ir::Value, i32) { + let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into(); + + // How exactly the bounds check is performed here and what it's performed + // on is a bit tricky. Generally we want to rely on access violations (e.g. + // segfaults) to generate traps since that means we don't have to bounds + // check anything explicitly. + // + // If we don't have a guard page of unmapped memory, though, then we can't + // rely on this trapping behavior through segfaults. Instead we need to + // bounds-check the entire memory access here which is everything from + // `addr32 + offset` to `addr32 + offset + width` (not inclusive). In this + // scenario our adjusted offset that we're checking is `offset + width`. + // + // If we have a guard page, however, then we can perform a further + // optimization of the generated code by only checking multiples of the + // offset-guard size to be more CSE-friendly. Knowing that we have at least + // 1 page of a guard page we're then able to disregard the `width` since we + // know it's always less than one page. Our bounds check will be for the + // first byte which will either succeed and be guaranteed to fault if it's + // actually out of bounds, or the bounds check itself will fail. In any case + // we assert that the width is reasonably small for now so this assumption + // can be adjusted in the future if we get larger widths. + // + // Put another way we can say, where `y < offset_guard_size`: + // + // n * offset_guard_size + y = offset + // + // We'll then pass `n * offset_guard_size` as the bounds check value. If + // this traps then our `offset` would have trapped anyway. If this check + // passes we know + // + // addr32 + n * offset_guard_size < bound + // + // which means + // + // addr32 + n * offset_guard_size + y < bound + offset_guard_size + // + // because `y < offset_guard_size`, which then means: + // + // addr32 + offset < bound + offset_guard_size + // + // Since we know that that guard size bytes are all unmapped we're + // guaranteed that `offset` and the `width` bytes after it are either + // in-bounds or will hit the guard page, meaning we'll get the desired + // semantics we want. + // + // As one final comment on the bits with the guard size here, another goal + // of this is to hit an optimization in `heap_addr` where if the heap size + // minus the offset is >= 4GB then bounds checks are 100% eliminated. This + // means that with huge guard regions (e.g. our 2GB default) most adjusted + // offsets we're checking here are zero. This means that we'll hit the fast + // path and emit zero conditional traps for bounds checks + let adjusted_offset = if offset_guard_size == 0 { + u64::from(offset) + u64::from(width) + } else { + assert!(width < 1024); + cmp::max(u64::from(offset) / offset_guard_size * offset_guard_size, 1) + }; + debug_assert!(adjusted_offset > 0); // want to bounds check at least 1 byte + let check_size = u32::try_from(adjusted_offset).unwrap_or(u32::MAX); + let base = builder.ins().heap_addr(addr_ty, heap, addr32, check_size); + + // Native load/store instructions take a signed `Offset32` immediate, so adjust the base + // pointer if necessary. + if offset > i32::MAX as u32 { + // Offset doesn't fit in the load/store instruction. + let adj = builder.ins().iadd_imm(base, i64::from(i32::MAX) + 1); + (adj, (offset - (i32::MAX as u32 + 1)) as i32) + } else { + (base, offset as i32) + } +} + +/// Prepare for a load; factors out common functionality between load and load_extend operations. +fn prepare_load( + memarg: &MemoryImmediate, + loaded_bytes: u32, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<(MemFlags, Value, Offset32)> { + let (addr32, _) = state.pop1(); + + let heap = state.get_heap(builder.func, memarg.memory, environ)?; + let (base, offset) = get_heap_addr( + heap, + addr32, + memarg.offset, + loaded_bytes, + environ.pointer_type(), + builder, + ); + + // Note that we don't set `is_aligned` here, even if the load instruction's + // alignment immediate says it's aligned, because WebAssembly's immediate + // field is just a hint, while Cranelift's aligned flag needs a guarantee. + // WebAssembly memory accesses are always little-endian. + let mut flags = MemFlags::new(); + flags.set_endianness(ir::Endianness::Little); + + Ok((flags, base, offset.into())) +} + +/// Translate a load instruction. +fn translate_load( + memarg: &MemoryImmediate, + opcode: ir::Opcode, + result_ty: Type, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + let (flags, base, offset) = prepare_load( + memarg, + mem_op_size(opcode, result_ty), + builder, + state, + environ, + )?; + let (load, dfg) = builder.ins().Load(opcode, result_ty, flags, offset, base); + state.push1(dfg.first_result(load)); + Ok(()) +} + +/// Translate a store instruction. +fn translate_store( + memarg: &MemoryImmediate, + opcode: ir::Opcode, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + let ((addr32, _), (val, _)) = state.pop2(); + let val_ty = builder.func.dfg.value_type(val); + + let heap = state.get_heap(builder.func, memarg.memory, environ)?; + let (base, offset) = get_heap_addr( + heap, + addr32, + memarg.offset, + mem_op_size(opcode, val_ty), + environ.pointer_type(), + builder, + ); + // See the comments in `prepare_load` about the flags. + let mut flags = MemFlags::new(); + flags.set_endianness(ir::Endianness::Little); + builder + .ins() + .Store(opcode, val_ty, flags, offset.into(), val, base); + Ok(()) +} + +fn mem_op_size(opcode: ir::Opcode, ty: Type) -> u32 { + match opcode { + ir::Opcode::Istore8 | ir::Opcode::Sload8 | ir::Opcode::Uload8 => 1, + ir::Opcode::Istore16 | ir::Opcode::Sload16 | ir::Opcode::Uload16 => 2, + ir::Opcode::Istore32 | ir::Opcode::Sload32 | ir::Opcode::Uload32 => 4, + ir::Opcode::Store | ir::Opcode::Load => ty.bytes(), + _ => panic!("unknown size of mem op for {:?}", opcode), + } +} + +fn translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) { + let ((arg0, _), (arg1, _)) = state.pop2(); + let val = builder.ins().icmp(cc, arg0, arg1); + state.push1(builder.ins().bint(I32, val)); +} + +fn fold_atomic_mem_addr( + linear_mem_addr: Value, + memarg: &MemoryImmediate, + access_ty: Type, + builder: &mut FunctionBuilder, +) -> Value { + let access_ty_bytes = access_ty.bytes(); + let final_lma = if memarg.offset > 0 { + assert!(builder.func.dfg.value_type(linear_mem_addr) == I32); + let linear_mem_addr = builder.ins().uextend(I64, linear_mem_addr); + let a = builder + .ins() + .iadd_imm(linear_mem_addr, i64::from(memarg.offset)); + let cflags = builder.ins().ifcmp_imm(a, 0x1_0000_0000i64); + builder.ins().trapif( + IntCC::UnsignedGreaterThanOrEqual, + cflags, + ir::TrapCode::HeapOutOfBounds, + ); + builder.ins().ireduce(I32, a) + } else { + linear_mem_addr + }; + assert!(access_ty_bytes == 4 || access_ty_bytes == 8); + let final_lma_misalignment = builder + .ins() + .band_imm(final_lma, i64::from(access_ty_bytes - 1)); + let f = builder + .ins() + .ifcmp_imm(final_lma_misalignment, i64::from(0)); + builder + .ins() + .trapif(IntCC::NotEqual, f, ir::TrapCode::HeapMisaligned); + final_lma +} + +// For an atomic memory operation, emit an alignment check for the linear memory address, +// and then compute the final effective address. +fn finalise_atomic_mem_addr( + linear_mem_addr: Value, + memarg: &MemoryImmediate, + access_ty: Type, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult { + // Check the alignment of `linear_mem_addr`. + let access_ty_bytes = access_ty.bytes(); + let final_lma = builder + .ins() + .iadd_imm(linear_mem_addr, i64::from(memarg.offset)); + if access_ty_bytes != 1 { + assert!(access_ty_bytes == 2 || access_ty_bytes == 4 || access_ty_bytes == 8); + let final_lma_misalignment = builder + .ins() + .band_imm(final_lma, i64::from(access_ty_bytes - 1)); + let f = builder + .ins() + .ifcmp_imm(final_lma_misalignment, i64::from(0)); + builder + .ins() + .trapif(IntCC::NotEqual, f, ir::TrapCode::HeapMisaligned); + } + + // Compute the final effective address. + let heap = state.get_heap(builder.func, memarg.memory, environ)?; + let (base, offset) = get_heap_addr( + heap, + final_lma, + /*offset=*/ 0, + access_ty.bytes(), + environ.pointer_type(), + builder, + ); + + let final_effective_address = builder.ins().iadd_imm(base, i64::from(offset)); + Ok(final_effective_address) +} + +fn translate_atomic_rmw( + widened_ty: Type, + access_ty: Type, + op: AtomicRmwOp, + memarg: &MemoryImmediate, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + let ((linear_mem_addr, _), (mut arg2, _)) = state.pop2(); + let arg2_ty = builder.func.dfg.value_type(arg2); + + // The operation is performed at type `access_ty`, and the old value is zero-extended + // to type `widened_ty`. + match access_ty { + I8 | I16 | I32 | I64 => {} + _ => { + return Err(wasm_unsupported!( + "atomic_rmw: unsupported access type {:?}", + access_ty + )) + } + }; + let w_ty_ok = match widened_ty { + I32 | I64 => true, + _ => false, + }; + assert!(w_ty_ok && widened_ty.bytes() >= access_ty.bytes()); + + assert!(arg2_ty.bytes() >= access_ty.bytes()); + if arg2_ty.bytes() > access_ty.bytes() { + arg2 = builder.ins().ireduce(access_ty, arg2); + } + + let final_effective_address = + finalise_atomic_mem_addr(linear_mem_addr, memarg, access_ty, builder, state, environ)?; + + // See the comments in `prepare_load` about the flags. + let mut flags = MemFlags::new(); + flags.set_endianness(ir::Endianness::Little); + let mut res = builder + .ins() + .atomic_rmw(access_ty, flags, op, final_effective_address, arg2); + if access_ty != widened_ty { + res = builder.ins().uextend(widened_ty, res); + } + state.push1(res); + Ok(()) +} + +fn translate_atomic_cas( + widened_ty: Type, + access_ty: Type, + memarg: &MemoryImmediate, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + let ((linear_mem_addr, _), (mut expected, _), (mut replacement, _)) = state.pop3(); + let expected_ty = builder.func.dfg.value_type(expected); + let replacement_ty = builder.func.dfg.value_type(replacement); + + // The compare-and-swap is performed at type `access_ty`, and the old value is zero-extended + // to type `widened_ty`. + match access_ty { + I8 | I16 | I32 | I64 => {} + _ => { + return Err(wasm_unsupported!( + "atomic_cas: unsupported access type {:?}", + access_ty + )) + } + }; + let w_ty_ok = match widened_ty { + I32 | I64 => true, + _ => false, + }; + assert!(w_ty_ok && widened_ty.bytes() >= access_ty.bytes()); + + assert!(expected_ty.bytes() >= access_ty.bytes()); + if expected_ty.bytes() > access_ty.bytes() { + expected = builder.ins().ireduce(access_ty, expected); + } + assert!(replacement_ty.bytes() >= access_ty.bytes()); + if replacement_ty.bytes() > access_ty.bytes() { + replacement = builder.ins().ireduce(access_ty, replacement); + } + + let final_effective_address = + finalise_atomic_mem_addr(linear_mem_addr, memarg, access_ty, builder, state, environ)?; + + // See the comments in `prepare_load` about the flags. + let mut flags = MemFlags::new(); + flags.set_endianness(ir::Endianness::Little); + let mut res = builder + .ins() + .atomic_cas(flags, final_effective_address, expected, replacement); + if access_ty != widened_ty { + res = builder.ins().uextend(widened_ty, res); + } + state.push1(res); + Ok(()) +} + +fn translate_atomic_load( + widened_ty: Type, + access_ty: Type, + memarg: &MemoryImmediate, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + let (linear_mem_addr, _) = state.pop1(); + + // The load is performed at type `access_ty`, and the loaded value is zero extended + // to `widened_ty`. + match access_ty { + I8 | I16 | I32 | I64 => {} + _ => { + return Err(wasm_unsupported!( + "atomic_load: unsupported access type {:?}", + access_ty + )) + } + }; + let w_ty_ok = match widened_ty { + I32 | I64 => true, + _ => false, + }; + assert!(w_ty_ok && widened_ty.bytes() >= access_ty.bytes()); + + let final_effective_address = + finalise_atomic_mem_addr(linear_mem_addr, memarg, access_ty, builder, state, environ)?; + + // See the comments in `prepare_load` about the flags. + let mut flags = MemFlags::new(); + flags.set_endianness(ir::Endianness::Little); + let mut res = builder + .ins() + .atomic_load(access_ty, flags, final_effective_address); + if access_ty != widened_ty { + res = builder.ins().uextend(widened_ty, res); + } + state.push1(res); + Ok(()) +} + +fn translate_atomic_store( + access_ty: Type, + memarg: &MemoryImmediate, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + let ((linear_mem_addr, _), (mut data, _)) = state.pop2(); + let data_ty = builder.func.dfg.value_type(data); + + // The operation is performed at type `access_ty`, and the data to be stored may first + // need to be narrowed accordingly. + match access_ty { + I8 | I16 | I32 | I64 => {} + _ => { + return Err(wasm_unsupported!( + "atomic_store: unsupported access type {:?}", + access_ty + )) + } + }; + let d_ty_ok = match data_ty { + I32 | I64 => true, + _ => false, + }; + assert!(d_ty_ok && data_ty.bytes() >= access_ty.bytes()); + + if data_ty.bytes() > access_ty.bytes() { + data = builder.ins().ireduce(access_ty, data); + } + + let final_effective_address = + finalise_atomic_mem_addr(linear_mem_addr, memarg, access_ty, builder, state, environ)?; + + // See the comments in `prepare_load` about the flags. + let mut flags = MemFlags::new(); + flags.set_endianness(ir::Endianness::Little); + builder + .ins() + .atomic_store(flags, data, final_effective_address); + Ok(()) +} + +fn translate_vector_icmp( + cc: IntCC, + needed_type: Type, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, +) { + let ((a, _), (b, _)) = state.pop2(); + let bitcast_a = optionally_bitcast_vector(a, needed_type, builder); + let bitcast_b = optionally_bitcast_vector(b, needed_type, builder); + state.push1(builder.ins().icmp(cc, bitcast_a, bitcast_b)) +} + +fn translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) { + let ((arg0, _), (arg1, _)) = state.pop2(); + let val = builder.ins().fcmp(cc, arg0, arg1); + state.push1(builder.ins().bint(I32, val)); +} + +fn translate_vector_fcmp( + cc: FloatCC, + needed_type: Type, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, +) { + let ((a, _), (b, _)) = state.pop2(); + let bitcast_a = optionally_bitcast_vector(a, needed_type, builder); + let bitcast_b = optionally_bitcast_vector(b, needed_type, builder); + state.push1(builder.ins().fcmp(cc, bitcast_a, bitcast_b)) +} + +fn translate_br_if( + relative_depth: u32, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, +) { + let (val, _) = state.pop1(); + let (br_destination, inputs) = translate_br_if_args(relative_depth, state); + let inputs = (&*inputs.0, &*inputs.1); + canonicalise_then_brnz(builder, val, br_destination, inputs); + + let next_block = builder.create_block(); + canonicalise_then_jump(builder, next_block, (&[], &[])); + builder.seal_block(next_block); // The only predecessor is the current block. + builder.switch_to_block(next_block); +} + +fn translate_br_if_args( + relative_depth: u32, + state: &mut FuncTranslationState, +) -> (ir::Block, (&mut [ir::Value], &mut [ValueExtraInfo])) { + let i = state.control_stack.len() - 1 - (relative_depth as usize); + let (return_count, br_destination) = { + let frame = &mut state.control_stack[i]; + // The values returned by the branch are still available for the reachable + // code that comes after it + frame.set_branched_to_exit(); + let return_count = if frame.is_loop() { + frame.num_param_values() + } else { + frame.num_return_values() + }; + (return_count, frame.br_destination()) + }; + let inputs = state.peekn_mut(return_count); + (br_destination, inputs) +} + +/// Determine the returned value type of a WebAssembly operator +fn type_of(operator: &Operator) -> Type { + match operator { + Operator::V128Load { .. } + | Operator::V128Store { .. } + | Operator::V128Const { .. } + | Operator::V128Not + | Operator::V128And + | Operator::V128AndNot + | Operator::V128Or + | Operator::V128Xor + | Operator::V128AnyTrue + | Operator::V128Bitselect => I8X16, // default type representing V128 + + Operator::I8x16Shuffle { .. } + | Operator::I8x16Splat + | Operator::V128Load8Splat { .. } + | Operator::V128Load8Lane { .. } + | Operator::V128Store8Lane { .. } + | Operator::I8x16ExtractLaneS { .. } + | Operator::I8x16ExtractLaneU { .. } + | Operator::I8x16ReplaceLane { .. } + | Operator::I8x16Eq + | Operator::I8x16Ne + | Operator::I8x16LtS + | Operator::I8x16LtU + | Operator::I8x16GtS + | Operator::I8x16GtU + | Operator::I8x16LeS + | Operator::I8x16LeU + | Operator::I8x16GeS + | Operator::I8x16GeU + | Operator::I8x16Neg + | Operator::I8x16Abs + | Operator::I8x16AllTrue + | Operator::I8x16Shl + | Operator::I8x16ShrS + | Operator::I8x16ShrU + | Operator::I8x16Add + | Operator::I8x16AddSatS + | Operator::I8x16AddSatU + | Operator::I8x16Sub + | Operator::I8x16SubSatS + | Operator::I8x16SubSatU + | Operator::I8x16MinS + | Operator::I8x16MinU + | Operator::I8x16MaxS + | Operator::I8x16MaxU + | Operator::I8x16RoundingAverageU + | Operator::I8x16Bitmask => I8X16, + + Operator::I16x8Splat + | Operator::V128Load16Splat { .. } + | Operator::V128Load16Lane { .. } + | Operator::V128Store16Lane { .. } + | Operator::I16x8ExtractLaneS { .. } + | Operator::I16x8ExtractLaneU { .. } + | Operator::I16x8ReplaceLane { .. } + | Operator::I16x8Eq + | Operator::I16x8Ne + | Operator::I16x8LtS + | Operator::I16x8LtU + | Operator::I16x8GtS + | Operator::I16x8GtU + | Operator::I16x8LeS + | Operator::I16x8LeU + | Operator::I16x8GeS + | Operator::I16x8GeU + | Operator::I16x8Neg + | Operator::I16x8Abs + | Operator::I16x8AllTrue + | Operator::I16x8Shl + | Operator::I16x8ShrS + | Operator::I16x8ShrU + | Operator::I16x8Add + | Operator::I16x8AddSatS + | Operator::I16x8AddSatU + | Operator::I16x8Sub + | Operator::I16x8SubSatS + | Operator::I16x8SubSatU + | Operator::I16x8MinS + | Operator::I16x8MinU + | Operator::I16x8MaxS + | Operator::I16x8MaxU + | Operator::I16x8RoundingAverageU + | Operator::I16x8Mul + | Operator::I16x8Bitmask => I16X8, + + Operator::I32x4Splat + | Operator::V128Load32Splat { .. } + | Operator::V128Load32Lane { .. } + | Operator::V128Store32Lane { .. } + | Operator::I32x4ExtractLane { .. } + | Operator::I32x4ReplaceLane { .. } + | Operator::I32x4Eq + | Operator::I32x4Ne + | Operator::I32x4LtS + | Operator::I32x4LtU + | Operator::I32x4GtS + | Operator::I32x4GtU + | Operator::I32x4LeS + | Operator::I32x4LeU + | Operator::I32x4GeS + | Operator::I32x4GeU + | Operator::I32x4Neg + | Operator::I32x4Abs + | Operator::I32x4AllTrue + | Operator::I32x4Shl + | Operator::I32x4ShrS + | Operator::I32x4ShrU + | Operator::I32x4Add + | Operator::I32x4Sub + | Operator::I32x4Mul + | Operator::I32x4MinS + | Operator::I32x4MinU + | Operator::I32x4MaxS + | Operator::I32x4MaxU + | Operator::F32x4ConvertI32x4S + | Operator::F32x4ConvertI32x4U + | Operator::I32x4Bitmask + | Operator::V128Load32Zero { .. } => I32X4, + + Operator::I64x2Splat + | Operator::V128Load64Splat { .. } + | Operator::V128Load64Lane { .. } + | Operator::V128Store64Lane { .. } + | Operator::I64x2ExtractLane { .. } + | Operator::I64x2ReplaceLane { .. } + | Operator::I64x2Eq + | Operator::I64x2Ne + | Operator::I64x2LtS + | Operator::I64x2GtS + | Operator::I64x2LeS + | Operator::I64x2GeS + | Operator::I64x2Neg + | Operator::I64x2Abs + | Operator::I64x2AllTrue + | Operator::I64x2Shl + | Operator::I64x2ShrS + | Operator::I64x2ShrU + | Operator::I64x2Add + | Operator::I64x2Sub + | Operator::I64x2Mul + | Operator::I64x2Bitmask + | Operator::V128Load64Zero { .. } => I64X2, + + Operator::F32x4Splat + | Operator::F32x4ExtractLane { .. } + | Operator::F32x4ReplaceLane { .. } + | Operator::F32x4Eq + | Operator::F32x4Ne + | Operator::F32x4Lt + | Operator::F32x4Gt + | Operator::F32x4Le + | Operator::F32x4Ge + | Operator::F32x4Abs + | Operator::F32x4Neg + | Operator::F32x4Sqrt + | Operator::F32x4Add + | Operator::F32x4Sub + | Operator::F32x4Mul + | Operator::F32x4Div + | Operator::F32x4Min + | Operator::F32x4Max + | Operator::F32x4PMin + | Operator::F32x4PMax + | Operator::I32x4TruncSatF32x4S + | Operator::I32x4TruncSatF32x4U + | Operator::F32x4Ceil + | Operator::F32x4Floor + | Operator::F32x4Trunc + | Operator::F32x4Nearest => F32X4, + + Operator::F64x2Splat + | Operator::F64x2ExtractLane { .. } + | Operator::F64x2ReplaceLane { .. } + | Operator::F64x2Eq + | Operator::F64x2Ne + | Operator::F64x2Lt + | Operator::F64x2Gt + | Operator::F64x2Le + | Operator::F64x2Ge + | Operator::F64x2Abs + | Operator::F64x2Neg + | Operator::F64x2Sqrt + | Operator::F64x2Add + | Operator::F64x2Sub + | Operator::F64x2Mul + | Operator::F64x2Div + | Operator::F64x2Min + | Operator::F64x2Max + | Operator::F64x2PMin + | Operator::F64x2PMax + | Operator::F64x2Ceil + | Operator::F64x2Floor + | Operator::F64x2Trunc + | Operator::F64x2Nearest => F64X2, + + _ => unimplemented!( + "Currently only SIMD instructions are mapped to their return type; the \ + following instruction is not mapped: {:?}", + operator + ), + } +} + +/// Some SIMD operations only operate on I8X16 in CLIF; this will convert them to that type by +/// adding a raw_bitcast if necessary. +fn optionally_bitcast_vector( + value: Value, + needed_type: Type, + builder: &mut FunctionBuilder, +) -> Value { + if builder.func.dfg.value_type(value) != needed_type { + builder.ins().raw_bitcast(needed_type, value) + } else { + value + } +} + +#[inline(always)] +fn is_non_canonical_v128(ty: ir::Type) -> bool { + match ty { + B8X16 | B16X8 | B32X4 | B64X2 | I64X2 | I32X4 | I16X8 | F32X4 | F64X2 => true, + _ => false, + } +} + +/// Cast to I8X16, any vector values in `values` that are of "non-canonical" type (meaning, not +/// I8X16), and return them in a slice. A pre-scan is made to determine whether any casts are +/// actually necessary, and if not, the original slice is returned. Otherwise the cast values +/// are returned in a slice that belongs to the caller-supplied `SmallVec`. +fn canonicalise_v128_values<'a>( + tmp_canonicalised: &'a mut SmallVec<[ir::Value; 16]>, + builder: &mut FunctionBuilder, + values: &'a [ir::Value], +) -> &'a [ir::Value] { + debug_assert!(tmp_canonicalised.is_empty()); + // First figure out if any of the parameters need to be cast. Mostly they don't need to be. + let any_non_canonical = values + .iter() + .any(|v| is_non_canonical_v128(builder.func.dfg.value_type(*v))); + // Hopefully we take this exit most of the time, hence doing no heap allocation. + if !any_non_canonical { + return values; + } + // Otherwise we'll have to cast, and push the resulting `Value`s into `canonicalised`. + for v in values { + tmp_canonicalised.push(if is_non_canonical_v128(builder.func.dfg.value_type(*v)) { + builder.ins().raw_bitcast(I8X16, *v) + } else { + *v + }); + } + tmp_canonicalised.as_slice() +} + +/// Generate a `jump` instruction, but first cast all 128-bit vector values to I8X16 if they +/// don't have that type. This is done in somewhat roundabout way so as to ensure that we +/// almost never have to do any heap allocation. +fn canonicalise_then_jump( + builder: &mut FunctionBuilder, + destination: ir::Block, + params: (&[ir::Value], &[ValueExtraInfo]), +) -> ir::Inst { + let mut tmp_canonicalised = SmallVec::<[ir::Value; 16]>::new(); + let canonicalised = canonicalise_v128_values(&mut tmp_canonicalised, builder, params.0); + builder.ins().jump(destination, canonicalised) +} + +/// The same but for a `brz` instruction. +fn canonicalise_then_brz( + builder: &mut FunctionBuilder, + cond: ir::Value, + destination: ir::Block, + params: (&[ir::Value], &[ValueExtraInfo]), +) -> ir::Inst { + let mut tmp_canonicalised = SmallVec::<[ir::Value; 16]>::new(); + let canonicalised = canonicalise_v128_values(&mut tmp_canonicalised, builder, params.0); + builder.ins().brz(cond, destination, canonicalised) +} + +/// The same but for a `brnz` instruction. +fn canonicalise_then_brnz( + builder: &mut FunctionBuilder, + cond: ir::Value, + destination: ir::Block, + params: (&[ir::Value], &[ValueExtraInfo]), +) -> ir::Inst { + let mut tmp_canonicalised = SmallVec::<[ir::Value; 16]>::new(); + let canonicalised = canonicalise_v128_values(&mut tmp_canonicalised, builder, params.0); + builder.ins().brnz(cond, destination, canonicalised) +} + +/// A helper for popping and bitcasting a single value; since SIMD values can lose their type by +/// using v128 (i.e. CLIF's I8x16) we must re-type the values using a bitcast to avoid CLIF +/// typing issues. +fn pop1_with_bitcast( + state: &mut FuncTranslationState, + needed_type: Type, + builder: &mut FunctionBuilder, +) -> Value { + optionally_bitcast_vector(state.pop1().0, needed_type, builder) +} + +/// A helper for popping and bitcasting two values; since SIMD values can lose their type by +/// using v128 (i.e. CLIF's I8x16) we must re-type the values using a bitcast to avoid CLIF +/// typing issues. +fn pop2_with_bitcast( + state: &mut FuncTranslationState, + needed_type: Type, + builder: &mut FunctionBuilder, +) -> (Value, Value) { + let ((a, _), (b, _)) = state.pop2(); + let bitcast_a = optionally_bitcast_vector(a, needed_type, builder); + let bitcast_b = optionally_bitcast_vector(b, needed_type, builder); + (bitcast_a, bitcast_b) +} + +/// A helper for bitcasting a sequence of values (e.g. function arguments). If a value is a +/// vector type that does not match its expected type, this will modify the value in place to point +/// to the result of a `raw_bitcast`. This conversion is necessary to translate Wasm code that +/// uses `V128` as function parameters (or implicitly in block parameters) and still use specific +/// CLIF types (e.g. `I32X4`) in the function body. +pub fn bitcast_arguments( + arguments: &mut [Value], + expected_types: &[Type], + builder: &mut FunctionBuilder, +) { + assert_eq!(arguments.len(), expected_types.len()); + for (i, t) in expected_types.iter().enumerate() { + if t.is_vector() { + assert!( + builder.func.dfg.value_type(arguments[i]).is_vector(), + "unexpected type mismatch: expected {}, argument {} was actually of type {}", + t, + arguments[i], + builder.func.dfg.value_type(arguments[i]) + ); + arguments[i] = optionally_bitcast_vector(arguments[i], *t, builder) + } + } +} + +/// A helper to extract all the `Type` listings of each variable in `params` +/// for only parameters the return true for `is_wasm`, typically paired with +/// `is_wasm_return` or `is_wasm_parameter`. +pub fn wasm_param_types(params: &[ir::AbiParam], is_wasm: impl Fn(usize) -> bool) -> Vec { + let mut ret = Vec::with_capacity(params.len()); + for (i, param) in params.iter().enumerate() { + if is_wasm(i) { + ret.push(param.value_type); + } + } + ret +} diff --git a/lib/compiler-cranelift/src/translator/func_environ.rs b/lib/compiler-cranelift/src/translator/func_environ.rs new file mode 100644 index 0000000000..a5f74a094c --- /dev/null +++ b/lib/compiler-cranelift/src/translator/func_environ.rs @@ -0,0 +1,511 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +//! All the runtime support necessary for the wasm to cranelift translation is formalized by the +//! traits `FunctionEnvironment`. + +use super::func_state::FuncTranslationState; +use super::translation_utils::reference_type; +use core::convert::From; +use cranelift_codegen::cursor::FuncCursor; +use cranelift_codegen::ir::immediates::Offset32; +use cranelift_codegen::ir::{self, InstBuilder}; +use cranelift_codegen::isa::TargetFrontendConfig; +use cranelift_frontend::FunctionBuilder; +use wasmer_compiler::wasmparser::{Operator, Type}; +use wasmer_compiler::WasmResult; +use wasmer_types::{ + FunctionIndex, FunctionType, GlobalIndex, LocalFunctionIndex, MemoryIndex, SignatureIndex, + TableIndex, Type as WasmerType, +}; + +/// The value of a WebAssembly global variable. +#[derive(Clone, Copy)] +pub enum GlobalVariable { + #[allow(dead_code)] + /// This is a constant global with a value known at compile time. + Const(ir::Value), + + /// This is a variable in memory that should be referenced through a `GlobalValue`. + Memory { + /// The address of the global variable storage. + gv: ir::GlobalValue, + /// An offset to add to the address. + offset: Offset32, + /// The global variable's type. + ty: ir::Type, + }, + + #[allow(dead_code)] + /// This is a global variable that needs to be handled by the environment. + Custom, +} + +#[allow(dead_code)] +/// How to return from functions. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum ReturnMode { + /// Use normal return instructions as needed. + NormalReturns, + /// Use a single fallthrough return at the end of the function. + FallthroughReturn, +} + +/// Environment affecting the translation of a WebAssembly. +pub trait TargetEnvironment { + /// Get the information needed to produce Cranelift IR for the given target. + fn target_config(&self) -> TargetFrontendConfig; + + /// Get the Cranelift integer type to use for native pointers. + /// + /// This returns `I64` for 64-bit architectures and `I32` for 32-bit architectures. + fn pointer_type(&self) -> ir::Type { + ir::Type::int(u16::from(self.target_config().pointer_bits())).unwrap() + } + + /// Get the size of a native pointer, in bytes. + fn pointer_bytes(&self) -> u8 { + self.target_config().pointer_bytes() + } + + /// Get the Cranelift reference type to use for native references. + /// + /// This returns `R64` for 64-bit architectures and `R32` for 32-bit architectures. + fn reference_type(&self) -> ir::Type { + reference_type(self.target_config()).expect("expected reference type") + } +} + +/// Environment affecting the translation of a single WebAssembly function. +/// +/// A `FuncEnvironment` trait object is required to translate a WebAssembly function to Cranelift +/// IR. The function environment provides information about the WebAssembly module as well as the +/// runtime environment. +pub trait FuncEnvironment: TargetEnvironment { + /// Is the given parameter of the given function a wasm-level parameter, as opposed to a hidden + /// parameter added for use by the implementation? + fn is_wasm_parameter(&self, signature: &ir::Signature, index: usize) -> bool { + signature.params[index].purpose == ir::ArgumentPurpose::Normal + } + + /// Is the given return of the given function a wasm-level parameter, as + /// opposed to a hidden parameter added for use by the implementation? + fn is_wasm_return(&self, signature: &ir::Signature, index: usize) -> bool { + signature.returns[index].purpose == ir::ArgumentPurpose::Normal + } + + /// Should the code be structured to use a single `fallthrough_return` instruction at the end + /// of the function body, rather than `return` instructions as needed? This is used by VMs + /// to append custom epilogues. + fn return_mode(&self) -> ReturnMode { + ReturnMode::NormalReturns + } + + /// Set up the necessary preamble definitions in `func` to access the global variable + /// identified by `index`. + /// + /// The index space covers both imported globals and globals defined by the module. + /// + /// Return the global variable reference that should be used to access the global and the + /// WebAssembly type of the global. + fn make_global( + &mut self, + func: &mut ir::Function, + index: GlobalIndex, + ) -> WasmResult; + + /// Set up the necessary preamble definitions in `func` to access the linear memory identified + /// by `index`. + /// + /// The index space covers both imported and locally declared memories. + fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult; + + /// Set up the necessary preamble definitions in `func` to access the table identified + /// by `index`. + /// + /// The index space covers both imported and locally declared tables. + fn make_table(&mut self, func: &mut ir::Function, index: TableIndex) -> WasmResult; + + /// Set up a signature definition in the preamble of `func` that can be used for an indirect + /// call with signature `index`. + /// + /// The signature may contain additional arguments needed for an indirect call, but the + /// arguments marked as `ArgumentPurpose::Normal` must correspond to the WebAssembly signature + /// arguments. + /// + /// The signature will only be used for indirect calls, even if the module has direct function + /// calls with the same WebAssembly type. + fn make_indirect_sig( + &mut self, + func: &mut ir::Function, + index: SignatureIndex, + ) -> WasmResult; + + /// Set up an external function definition in the preamble of `func` that can be used to + /// directly call the function `index`. + /// + /// The index space covers both imported functions and functions defined in the current module. + /// + /// The function's signature may contain additional arguments needed for a direct call, but the + /// arguments marked as `ArgumentPurpose::Normal` must correspond to the WebAssembly signature + /// arguments. + /// + /// The function's signature will only be used for direct calls, even if the module has + /// indirect calls with the same WebAssembly type. + fn make_direct_func( + &mut self, + func: &mut ir::Function, + index: FunctionIndex, + ) -> WasmResult; + + /// Translate a `call_indirect` WebAssembly instruction at `pos`. + /// + /// Insert instructions at `pos` for an indirect call to the function `callee` in the table + /// `table_index` with WebAssembly signature `sig_index`. The `callee` value will have type + /// `i32`. + /// + /// The signature `sig_ref` was previously created by `make_indirect_sig()`. + /// + /// Return the call instruction whose results are the WebAssembly return values. + #[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))] + fn translate_call_indirect( + &mut self, + pos: FuncCursor, + table_index: TableIndex, + table: ir::Table, + sig_index: SignatureIndex, + sig_ref: ir::SigRef, + callee: ir::Value, + call_args: &[ir::Value], + ) -> WasmResult; + + /// Translate a `call` WebAssembly instruction at `pos`. + /// + /// Insert instructions at `pos` for a direct call to the function `callee_index`. + /// + /// The function reference `callee` was previously created by `make_direct_func()`. + /// + /// Return the call instruction whose results are the WebAssembly return values. + fn translate_call( + &mut self, + mut pos: FuncCursor, + _callee_index: FunctionIndex, + callee: ir::FuncRef, + call_args: &[ir::Value], + ) -> WasmResult { + Ok(pos.ins().call(callee, call_args)) + } + + /// Translate a `memory.grow` WebAssembly instruction. + /// + /// The `index` provided identifies the linear memory to grow, and `heap` is the heap reference + /// returned by `make_heap` for the same index. + /// + /// The `val` value is the requested memory size in pages. + /// + /// Returns the old size (in pages) of the memory. + fn translate_memory_grow( + &mut self, + pos: FuncCursor, + index: MemoryIndex, + heap: ir::Heap, + val: ir::Value, + ) -> WasmResult; + + /// Translates a `memory.size` WebAssembly instruction. + /// + /// The `index` provided identifies the linear memory to query, and `heap` is the heap reference + /// returned by `make_heap` for the same index. + /// + /// Returns the size in pages of the memory. + fn translate_memory_size( + &mut self, + pos: FuncCursor, + index: MemoryIndex, + heap: ir::Heap, + ) -> WasmResult; + + /// Translate a `memory.copy` WebAssembly instruction. + /// + /// The `index` provided identifies the linear memory to query, and `heap` is the heap reference + /// returned by `make_heap` for the same index. + fn translate_memory_copy( + &mut self, + pos: FuncCursor, + src_index: MemoryIndex, + src_heap: ir::Heap, + dst_index: MemoryIndex, + dst_heap: ir::Heap, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()>; + + /// Translate a `memory.fill` WebAssembly instruction. + /// + /// The `index` provided identifies the linear memory to query, and `heap` is the heap reference + /// returned by `make_heap` for the same index. + fn translate_memory_fill( + &mut self, + pos: FuncCursor, + index: MemoryIndex, + heap: ir::Heap, + dst: ir::Value, + val: ir::Value, + len: ir::Value, + ) -> WasmResult<()>; + + /// Translate a `memory.init` WebAssembly instruction. + /// + /// The `index` provided identifies the linear memory to query, and `heap` is the heap reference + /// returned by `make_heap` for the same index. `seg_index` is the index of the segment to copy + /// from. + #[allow(clippy::too_many_arguments)] + fn translate_memory_init( + &mut self, + pos: FuncCursor, + index: MemoryIndex, + heap: ir::Heap, + seg_index: u32, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()>; + + /// Translate a `data.drop` WebAssembly instruction. + fn translate_data_drop(&mut self, pos: FuncCursor, seg_index: u32) -> WasmResult<()>; + + /// Translate a `table.size` WebAssembly instruction. + fn translate_table_size( + &mut self, + pos: FuncCursor, + index: TableIndex, + table: ir::Table, + ) -> WasmResult; + + /// Translate a `table.grow` WebAssembly instruction. + fn translate_table_grow( + &mut self, + pos: FuncCursor, + table_index: TableIndex, + table: ir::Table, + delta: ir::Value, + init_value: ir::Value, + ) -> WasmResult; + + /// Translate a `table.get` WebAssembly instruction. + fn translate_table_get( + &mut self, + builder: &mut FunctionBuilder, + table_index: TableIndex, + table: ir::Table, + index: ir::Value, + ) -> WasmResult; + + /// Translate a `table.set` WebAssembly instruction. + fn translate_table_set( + &mut self, + builder: &mut FunctionBuilder, + table_index: TableIndex, + table: ir::Table, + value: ir::Value, + index: ir::Value, + ) -> WasmResult<()>; + + /// Translate a `table.copy` WebAssembly instruction. + #[allow(clippy::too_many_arguments)] + fn translate_table_copy( + &mut self, + pos: FuncCursor, + dst_table_index: TableIndex, + dst_table: ir::Table, + src_table_index: TableIndex, + src_table: ir::Table, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()>; + + /// Translate a `table.fill` WebAssembly instruction. + fn translate_table_fill( + &mut self, + pos: FuncCursor, + table_index: TableIndex, + dst: ir::Value, + val: ir::Value, + len: ir::Value, + ) -> WasmResult<()>; + + /// Translates an externref ref count increment. + fn translate_externref_inc( + &mut self, + pos: cranelift_codegen::cursor::FuncCursor<'_>, + externref: ir::Value, + ) -> WasmResult<()>; + + /// Translates an externref ref count decrement. + fn translate_externref_dec( + &mut self, + pos: cranelift_codegen::cursor::FuncCursor<'_>, + externref: ir::Value, + ) -> WasmResult<()>; + + /// Translate a `table.init` WebAssembly instruction. + #[allow(clippy::too_many_arguments)] + fn translate_table_init( + &mut self, + pos: FuncCursor, + seg_index: u32, + table_index: TableIndex, + table: ir::Table, + dst: ir::Value, + src: ir::Value, + len: ir::Value, + ) -> WasmResult<()>; + + /// Translate a `elem.drop` WebAssembly instruction. + fn translate_elem_drop(&mut self, pos: FuncCursor, seg_index: u32) -> WasmResult<()>; + + /// Translate a `ref.null T` WebAssembly instruction. + /// + /// By default, translates into a null reference type. + /// + /// Override this if you don't use Cranelift reference types for all Wasm + /// reference types (e.g. you use a raw pointer for `funcref`s) or if the + /// null sentinel is not a null reference type pointer for your type. If you + /// override this method, then you should also override + /// `translate_ref_is_null` as well. + fn translate_ref_null(&mut self, pos: FuncCursor, ty: Type) -> WasmResult; + // { + // let _ = ty; + // Ok(pos.ins().null(self.reference_type(ty))) + // } + + /// Translate a `ref.is_null` WebAssembly instruction. + /// + /// By default, assumes that `value` is a Cranelift reference type, and that + /// a null Cranelift reference type is the null value for all Wasm reference + /// types. + /// + /// If you override this method, you probably also want to override + /// `translate_ref_null` as well. + fn translate_ref_is_null( + &mut self, + mut pos: FuncCursor, + value: ir::Value, + ) -> WasmResult { + let is_null = pos.ins().is_null(value); + Ok(pos.ins().bint(ir::types::I64, is_null)) + } + + /// Translate a `ref.func` WebAssembly instruction. + fn translate_ref_func( + &mut self, + pos: FuncCursor, + func_index: FunctionIndex, + ) -> WasmResult; + + /// Translate a `global.get` WebAssembly instruction at `pos` for a global + /// that is custom. + fn translate_custom_global_get( + &mut self, + pos: FuncCursor, + global_index: GlobalIndex, + ) -> WasmResult; + + /// Translate a `global.set` WebAssembly instruction at `pos` for a global + /// that is custom. + fn translate_custom_global_set( + &mut self, + pos: FuncCursor, + global_index: GlobalIndex, + val: ir::Value, + ) -> WasmResult<()>; + + /// Translate an `i32.atomic.wait` or `i64.atomic.wait` WebAssembly instruction. + /// The `index` provided identifies the linear memory containing the value + /// to wait on, and `heap` is the heap reference returned by `make_heap` + /// for the same index. Whether the waited-on value is 32- or 64-bit can be + /// determined by examining the type of `expected`, which must be only I32 or I64. + /// + /// Returns an i32, which is negative if the helper call failed. + fn translate_atomic_wait( + &mut self, + pos: FuncCursor, + index: MemoryIndex, + heap: ir::Heap, + addr: ir::Value, + expected: ir::Value, + timeout: ir::Value, + ) -> WasmResult; + + /// Translate an `atomic.notify` WebAssembly instruction. + /// The `index` provided identifies the linear memory containing the value + /// to wait on, and `heap` is the heap reference returned by `make_heap` + /// for the same index. + /// + /// Returns an i64, which is negative if the helper call failed. + fn translate_atomic_notify( + &mut self, + pos: FuncCursor, + index: MemoryIndex, + heap: ir::Heap, + addr: ir::Value, + count: ir::Value, + ) -> WasmResult; + + /// Emit code at the beginning of every wasm loop. + /// + /// This can be used to insert explicit interrupt or safepoint checking at + /// the beginnings of loops. + fn translate_loop_header(&mut self, _pos: FuncCursor) -> WasmResult<()> { + // By default, don't emit anything. + Ok(()) + } + + /// Optional callback for the `FunctionEnvironment` performing this translation to maintain + /// internal state or prepare custom state for the operator to translate + fn before_translate_operator( + &mut self, + _op: &Operator, + _builder: &mut FunctionBuilder, + _state: &FuncTranslationState, + ) -> WasmResult<()> { + Ok(()) + } + + /// Optional callback for the `FunctionEnvironment` performing this translation to maintain + /// internal state or finalize custom state for the operator that was translated + fn after_translate_operator( + &mut self, + _op: &Operator, + _builder: &mut FunctionBuilder, + _state: &FuncTranslationState, + ) -> WasmResult<()> { + Ok(()) + } + + /// Get the type of the global at the given index. + fn get_global_type(&self, global_index: GlobalIndex) -> Option; + + /// Push a local declaration on to the stack to track the type of locals. + fn push_local_decl_on_stack(&mut self, ty: WasmerType); + + /// Push locals for a the params of a function on to the stack. + fn push_params_on_stack(&mut self, function_index: LocalFunctionIndex); + + /// Get the type of the local at the given index. + fn get_local_type(&self, local_index: u32) -> Option; + + /// Get the types of all the current locals. + fn get_local_types(&self) -> &[WasmerType]; + + /// Get the type of the local at the given index. + fn get_function_type(&self, function_index: FunctionIndex) -> Option<&FunctionType>; + + /// Get the type of a function with the given signature index. + fn get_function_sig(&self, sig_index: SignatureIndex) -> Option<&FunctionType>; + + /// Drops all locals that need to be dropped. Useful for returning from functions. + fn translate_drop_locals(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()>; +} diff --git a/lib/compiler-cranelift/src/translator/func_state.rs b/lib/compiler-cranelift/src/translator/func_state.rs new file mode 100644 index 0000000000..6255b9e327 --- /dev/null +++ b/lib/compiler-cranelift/src/translator/func_state.rs @@ -0,0 +1,596 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +//! WebAssembly module and function translation state. +//! +//! The `ModuleTranslationState` struct defined in this module is used to keep track of data about +//! the whole WebAssembly module, such as the decoded type signatures. +//! +//! The `FuncTranslationState` struct defined in this module is used to keep track of the WebAssembly +//! value and control stacks during the translation of a single function. + +use super::func_environ::{FuncEnvironment, GlobalVariable}; +use crate::{HashMap, Occupied, Vacant}; +use cranelift_codegen::ir::{self, Block, Inst, Value}; +use std::vec::Vec; +use wasmer_compiler::WasmResult; +use wasmer_types::{FunctionIndex, GlobalIndex, MemoryIndex, SignatureIndex, TableIndex}; + +/// Information about the presence of an associated `else` for an `if`, or the +/// lack thereof. +#[derive(Debug)] +pub enum ElseData { + /// The `if` does not already have an `else` block. + /// + /// This doesn't mean that it will never have an `else`, just that we + /// haven't seen it yet. + NoElse { + /// If we discover that we need an `else` block, this is the jump + /// instruction that needs to be fixed up to point to the new `else` + /// block rather than the destination block after the `if...end`. + branch_inst: Inst, + }, + + /// We have already allocated an `else` block. + /// + /// Usually we don't know whether we will hit an `if .. end` or an `if + /// .. else .. end`, but sometimes we can tell based on the block's type + /// signature that the signature is not valid if there isn't an `else`. In + /// these cases, we pre-allocate the `else` block. + WithElse { + /// This is the `else` block. + else_block: Block, + }, +} + +/// A control stack frame can be an `if`, a `block` or a `loop`, each one having the following +/// fields: +/// +/// - `destination`: reference to the `Block` that will hold the code after the control block; +/// - `num_return_values`: number of values returned by the control block; +/// - `original_stack_size`: size of the value stack at the beginning of the control block. +/// +/// Moreover, the `if` frame has the `branch_inst` field that points to the `brz` instruction +/// separating the `true` and `false` branch. The `loop` frame has a `header` field that references +/// the `Block` that contains the beginning of the body of the loop. +#[derive(Debug)] +pub enum ControlStackFrame { + If { + destination: Block, + else_data: ElseData, + num_param_values: usize, + num_return_values: usize, + original_stack_size: usize, + exit_is_branched_to: bool, + blocktype: wasmer_compiler::wasmparser::TypeOrFuncType, + /// Was the head of the `if` reachable? + head_is_reachable: bool, + /// What was the reachability at the end of the consequent? + /// + /// This is `None` until we're finished translating the consequent, and + /// is set to `Some` either by hitting an `else` when we will begin + /// translating the alternative, or by hitting an `end` in which case + /// there is no alternative. + consequent_ends_reachable: Option, + // Note: no need for `alternative_ends_reachable` because that is just + // `state.reachable` when we hit the `end` in the `if .. else .. end`. + }, + Block { + destination: Block, + num_param_values: usize, + num_return_values: usize, + original_stack_size: usize, + exit_is_branched_to: bool, + }, + Loop { + destination: Block, + header: Block, + num_param_values: usize, + num_return_values: usize, + original_stack_size: usize, + }, +} + +/// Helper methods for the control stack objects. +impl ControlStackFrame { + pub fn num_return_values(&self) -> usize { + match *self { + Self::If { + num_return_values, .. + } + | Self::Block { + num_return_values, .. + } + | Self::Loop { + num_return_values, .. + } => num_return_values, + } + } + pub fn num_param_values(&self) -> usize { + match *self { + Self::If { + num_param_values, .. + } + | Self::Block { + num_param_values, .. + } + | Self::Loop { + num_param_values, .. + } => num_param_values, + } + } + pub fn following_code(&self) -> Block { + match *self { + Self::If { destination, .. } + | Self::Block { destination, .. } + | Self::Loop { destination, .. } => destination, + } + } + pub fn br_destination(&self) -> Block { + match *self { + Self::If { destination, .. } | Self::Block { destination, .. } => destination, + Self::Loop { header, .. } => header, + } + } + /// Private helper. Use `truncate_value_stack_to_else_params()` or + /// `truncate_value_stack_to_original_size()` to restore value-stack state. + fn original_stack_size(&self) -> usize { + match *self { + Self::If { + original_stack_size, + .. + } + | Self::Block { + original_stack_size, + .. + } + | Self::Loop { + original_stack_size, + .. + } => original_stack_size, + } + } + pub fn is_loop(&self) -> bool { + match *self { + Self::If { .. } | Self::Block { .. } => false, + Self::Loop { .. } => true, + } + } + + pub fn exit_is_branched_to(&self) -> bool { + match *self { + Self::If { + exit_is_branched_to, + .. + } + | Self::Block { + exit_is_branched_to, + .. + } => exit_is_branched_to, + Self::Loop { .. } => false, + } + } + + pub fn set_branched_to_exit(&mut self) { + match *self { + Self::If { + ref mut exit_is_branched_to, + .. + } + | Self::Block { + ref mut exit_is_branched_to, + .. + } => *exit_is_branched_to = true, + Self::Loop { .. } => {} + } + } + + /// Pop values from the value stack so that it is left at the + /// input-parameters to an else-block. + pub fn truncate_value_stack_to_else_params(&self, stack: &mut Vec) { + debug_assert!(matches!(self, &ControlStackFrame::If { .. })); + stack.truncate(self.original_stack_size()); + } + + /// Pop values from the value stack so that it is left at the state it was + /// before this control-flow frame. + pub fn truncate_value_stack_to_original_size(&self, stack: &mut Vec) { + // The "If" frame pushes its parameters twice, so they're available to the else block + // (see also `FuncTranslationState::push_if`). + // Yet, the original_stack_size member accounts for them only once, so that the else + // block can see the same number of parameters as the consequent block. As a matter of + // fact, we need to substract an extra number of parameter values for if blocks. + let num_duplicated_params = match self { + &ControlStackFrame::If { + num_param_values, .. + } => { + debug_assert!(num_param_values <= self.original_stack_size()); + num_param_values + } + _ => 0, + }; + stack.truncate(self.original_stack_size() - num_duplicated_params); + } +} + +/// Extra info about values. For example, on the stack. +#[derive(Debug, Clone, Default)] +pub struct ValueExtraInfo { + /// Whether or not the value should be ref counted. + pub ref_counted: bool, +} + +/// Contains information passed along during a function's translation and that records: +/// +/// - The current value and control stacks. +/// - The depth of the two unreachable control blocks stacks, that are manipulated when translating +/// unreachable code; +pub struct FuncTranslationState { + /// A stack of values corresponding to the active values in the input wasm function at this + /// point. + pub(crate) stack: Vec, + /// A stack of active control flow operations at this point in the input wasm function. + pub(crate) control_stack: Vec, + /// Is the current translation state still reachable? This is false when translating operators + /// like End, Return, or Unreachable. + pub(crate) reachable: bool, + + // Map of global variables that have already been created by `FuncEnvironment::make_global`. + globals: HashMap, + + // Map of heaps that have been created by `FuncEnvironment::make_heap`. + heaps: HashMap, + + // Map of tables that have been created by `FuncEnvironment::make_table`. + tables: HashMap, + + // Map of indirect call signatures that have been created by + // `FuncEnvironment::make_indirect_sig()`. + // Stores both the signature reference and the number of WebAssembly arguments + signatures: HashMap, + + // Imported and local functions that have been created by + // `FuncEnvironment::make_direct_func()`. + // Stores both the function reference and the number of WebAssembly arguments + functions: HashMap, +} + +// Public methods that are exposed to non-`cranelift_wasm` API consumers. +impl FuncTranslationState { + /// True if the current translation state expresses reachable code, false if it is unreachable. + #[inline] + #[allow(dead_code)] + pub fn reachable(&self) -> bool { + self.reachable + } +} + +impl FuncTranslationState { + /// Construct a new, empty, `FuncTranslationState` + pub(crate) fn new() -> Self { + Self { + stack: Vec::new(), + // TODO(reftypes): + //metadata_stack: Vec::new(), + control_stack: Vec::new(), + reachable: true, + globals: HashMap::new(), + heaps: HashMap::new(), + tables: HashMap::new(), + signatures: HashMap::new(), + functions: HashMap::new(), + } + } + + fn clear(&mut self) { + debug_assert!(self.stack.is_empty()); + debug_assert!(self.control_stack.is_empty()); + self.reachable = true; + self.globals.clear(); + self.heaps.clear(); + self.tables.clear(); + self.signatures.clear(); + self.functions.clear(); + } + + /// Initialize the state for compiling a function with the given signature. + /// + /// This resets the state to containing only a single block representing the whole function. + /// The exit block is the last block in the function which will contain the return instruction. + pub(crate) fn initialize(&mut self, sig: &ir::Signature, exit_block: Block) { + self.clear(); + self.push_block( + exit_block, + 0, + sig.returns + .iter() + .filter(|arg| arg.purpose == ir::ArgumentPurpose::Normal) + .count(), + ); + } + + /// Push a value with extra info attached. + pub(crate) fn push1_extra(&mut self, val: (Value, ValueExtraInfo)) { + self.stack.push(val.0); + // TODO(reftypes): + //self.metadata_stack.push(val.1); + } + + /// Push a value with default extra info. + pub(crate) fn push1(&mut self, val: Value) { + self.stack.push(val); + // TODO(reftypes): + //self.metadata_stack.push(ValueExtraInfo::default()); + } + + /// Push multiple values. + pub(crate) fn pushn(&mut self, vals: &[Value], _vals_metadata: &[ValueExtraInfo]) { + assert_eq!(vals.len(), _vals_metadata.len()); + self.stack.extend_from_slice(vals); + // TODO(reftypes): + //self.metadata_stack.extend_from_slice(vals_metadata); + } + + /// Pop one value. + pub(crate) fn pop1(&mut self) -> (Value, ValueExtraInfo) { + let val = self + .stack + .pop() + .expect("attempted to pop a value from an empty stack"); + let val_metadata = Default::default(); + (val, val_metadata) + } + + /// Peek at the top of the stack without popping it. + pub(crate) fn peek1(&self) -> (Value, ValueExtraInfo) { + let val = *self + .stack + .last() + .expect("attempted to peek at a value on an empty stack"); + let val_metadata = Default::default(); + (val, val_metadata) + } + + /// Pop two values. Return them in the order they were pushed. + pub(crate) fn pop2(&mut self) -> ((Value, ValueExtraInfo), (Value, ValueExtraInfo)) { + let v2 = self.pop1(); + let v1 = self.pop1(); + (v1, v2) + } + + /// Pop three values. Return them in the order they were pushed. + pub(crate) fn pop3( + &mut self, + ) -> ( + (Value, ValueExtraInfo), + (Value, ValueExtraInfo), + (Value, ValueExtraInfo), + ) { + let v3 = self.pop1(); + let v2 = self.pop1(); + let v1 = self.pop1(); + (v1, v2, v3) + } + + /// Helper to ensure the the stack size is at least as big as `n`; note that due to + /// `debug_assert` this will not execute in non-optimized builds. + #[inline] + fn ensure_length_is_at_least(&self, n: usize) { + debug_assert!( + n <= self.stack.len(), + "attempted to access {} values but stack only has {} values", + n, + self.stack.len() + ); + // TODO(reftypes): + /*debug_assert!( + n <= self.metadata_stack.len(), + "attempted to access {} values but stack only has {} values", + n, + self.metadata_stack.len() + );*/ + } + + /// Pop the top `n` values on the stack. + /// + /// The popped values are not returned. Use `peekn` to look at them before popping. + pub(crate) fn popn(&mut self, n: usize) { + self.ensure_length_is_at_least(n); + let new_len = self.stack.len() - n; + self.stack.truncate(new_len); + } + + /// Peek at the top `n` values on the stack in the order they were pushed. + pub(crate) fn peekn(&self, n: usize) -> (&[Value], &[ValueExtraInfo]) { + self.ensure_length_is_at_least(n); + let vals = &self.stack[self.stack.len() - n..]; + // TODO(reftypes): + let vals_metadata = &[]; //&self.metadata_stack[self.metadata_stack.len() - n..]; + (vals, vals_metadata) + } + + /// Peek at the top `n` values on the stack in the order they were pushed. + pub(crate) fn peekn_mut(&mut self, n: usize) -> (&mut [Value], &mut [ValueExtraInfo]) { + self.ensure_length_is_at_least(n); + let len = self.stack.len(); + // TODO(reftypes): + //let metadata_len = self.metadata_stack.len(); + //assert_eq!(len, metadata_len); + let vals = &mut self.stack[len - n..]; + // TODO(reftypes): + let vals_metadata = &mut []; //&mut self.metadata_stack[metadata_len - n..]; + (vals, vals_metadata) + } + + /// Push a block on the control stack. + pub(crate) fn push_block( + &mut self, + following_code: Block, + num_param_types: usize, + num_result_types: usize, + ) { + debug_assert!(num_param_types <= self.stack.len()); + self.control_stack.push(ControlStackFrame::Block { + destination: following_code, + original_stack_size: self.stack.len() - num_param_types, + num_param_values: num_param_types, + num_return_values: num_result_types, + exit_is_branched_to: false, + }); + } + + /// Push a loop on the control stack. + pub(crate) fn push_loop( + &mut self, + header: Block, + following_code: Block, + num_param_types: usize, + num_result_types: usize, + ) { + debug_assert!(num_param_types <= self.stack.len()); + self.control_stack.push(ControlStackFrame::Loop { + header, + destination: following_code, + original_stack_size: self.stack.len() - num_param_types, + num_param_values: num_param_types, + num_return_values: num_result_types, + }); + } + + /// Push an if on the control stack. + pub(crate) fn push_if( + &mut self, + destination: Block, + else_data: ElseData, + num_param_types: usize, + num_result_types: usize, + blocktype: wasmer_compiler::wasmparser::TypeOrFuncType, + ) { + debug_assert!(num_param_types <= self.stack.len()); + + // Push a second copy of our `if`'s parameters on the stack. This lets + // us avoid saving them on the side in the `ControlStackFrame` for our + // `else` block (if it exists), which would require a second heap + // allocation. See also the comment in `translate_operator` for + // `Operator::Else`. + self.stack.reserve(num_param_types); + for i in (self.stack.len() - num_param_types)..self.stack.len() { + let val = self.stack[i]; + self.stack.push(val); + } + + self.control_stack.push(ControlStackFrame::If { + destination, + else_data, + original_stack_size: self.stack.len() - num_param_types, + num_param_values: num_param_types, + num_return_values: num_result_types, + exit_is_branched_to: false, + head_is_reachable: self.reachable, + consequent_ends_reachable: None, + blocktype, + }); + } +} + +/// Methods for handling entity references. +impl FuncTranslationState { + /// Get the `GlobalVariable` reference that should be used to access the global variable + /// `index`. Create the reference if necessary. + /// Also return the WebAssembly type of the global. + pub(crate) fn get_global( + &mut self, + func: &mut ir::Function, + index: u32, + environ: &mut FE, + ) -> WasmResult { + let index = GlobalIndex::from_u32(index); + match self.globals.entry(index) { + Occupied(entry) => Ok(*entry.get()), + Vacant(entry) => Ok(*entry.insert(environ.make_global(func, index)?)), + } + } + + /// Get the `Heap` reference that should be used to access linear memory `index`. + /// Create the reference if necessary. + pub(crate) fn get_heap( + &mut self, + func: &mut ir::Function, + index: u32, + environ: &mut FE, + ) -> WasmResult { + let index = MemoryIndex::from_u32(index); + match self.heaps.entry(index) { + Occupied(entry) => Ok(*entry.get()), + Vacant(entry) => Ok(*entry.insert(environ.make_heap(func, index)?)), + } + } + + /// Get the `Table` reference that should be used to access table `index`. + /// Create the reference if necessary. + pub(crate) fn get_or_create_table( + &mut self, + func: &mut ir::Function, + index: u32, + environ: &mut FE, + ) -> WasmResult { + let index = TableIndex::from_u32(index); + match self.tables.entry(index) { + Occupied(entry) => Ok(*entry.get()), + Vacant(entry) => Ok(*entry.insert(environ.make_table(func, index)?)), + } + } + + /// Get the `SigRef` reference that should be used to make an indirect call with signature + /// `index`. Also return the number of WebAssembly arguments in the signature. + /// + /// Create the signature if necessary. + pub(crate) fn get_indirect_sig( + &mut self, + func: &mut ir::Function, + index: u32, + environ: &mut FE, + ) -> WasmResult<(ir::SigRef, usize)> { + let index = SignatureIndex::from_u32(index); + match self.signatures.entry(index) { + Occupied(entry) => Ok(*entry.get()), + Vacant(entry) => { + let sig = environ.make_indirect_sig(func, index)?; + Ok(*entry.insert((sig, num_wasm_parameters(environ, &func.dfg.signatures[sig])))) + } + } + } + + /// Get the `FuncRef` reference that should be used to make a direct call to function + /// `index`. Also return the number of WebAssembly arguments in the signature. + /// + /// Create the function reference if necessary. + pub(crate) fn get_direct_func( + &mut self, + func: &mut ir::Function, + index: u32, + environ: &mut FE, + ) -> WasmResult<(ir::FuncRef, usize)> { + let index = FunctionIndex::from_u32(index); + match self.functions.entry(index) { + Occupied(entry) => Ok(*entry.get()), + Vacant(entry) => { + let fref = environ.make_direct_func(func, index)?; + let sig = func.dfg.ext_funcs[fref].signature; + Ok(*entry.insert(( + fref, + num_wasm_parameters(environ, &func.dfg.signatures[sig]), + ))) + } + } + } +} + +fn num_wasm_parameters( + environ: &FE, + signature: &ir::Signature, +) -> usize { + (0..signature.params.len()) + .filter(|index| environ.is_wasm_parameter(signature, *index)) + .count() +} diff --git a/lib/compiler-cranelift/src/translator/func_translator.rs b/lib/compiler-cranelift/src/translator/func_translator.rs new file mode 100644 index 0000000000..48c0ec25d0 --- /dev/null +++ b/lib/compiler-cranelift/src/translator/func_translator.rs @@ -0,0 +1,288 @@ +// This file contains code from external sources. +// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md + +//! Standalone WebAssembly to Cranelift IR translator. +//! +//! This module defines the `FuncTranslator` type which can translate a single WebAssembly +//! function to Cranelift IR guided by a `FuncEnvironment` which provides information about the +//! WebAssembly module and the runtime environment. + +use super::code_translator::{bitcast_arguments, translate_operator, wasm_param_types}; +use super::func_environ::{FuncEnvironment, ReturnMode}; +use super::func_state::FuncTranslationState; +use super::translation_utils::get_vmctx_value_label; +use cranelift_codegen::entity::EntityRef; +use cranelift_codegen::ir::{self, Block, InstBuilder, ValueLabel}; +use cranelift_codegen::timing; +use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable}; +use wasmer_compiler::wasmparser; +use wasmer_compiler::{wasm_unsupported, wptype_to_type, ModuleTranslationState, WasmResult}; +use wasmer_types::LocalFunctionIndex; + +/// WebAssembly to Cranelift IR function translator. +/// +/// A `FuncTranslator` is used to translate a binary WebAssembly function into Cranelift IR guided +/// by a `FuncEnvironment` object. A single translator instance can be reused to translate multiple +/// functions which will reduce heap allocation traffic. +pub struct FuncTranslator { + func_ctx: FunctionBuilderContext, + state: FuncTranslationState, +} + +impl FuncTranslator { + /// Create a new translator. + pub fn new() -> Self { + Self { + func_ctx: FunctionBuilderContext::new(), + state: FuncTranslationState::new(), + } + } + + /// Translate a binary WebAssembly function. + /// + /// The `code` slice contains the binary WebAssembly *function code* as it appears in the code + /// section of a WebAssembly module, not including the initial size of the function code. The + /// slice is expected to contain two parts: + /// + /// - The declaration of *locals*, and + /// - The function *body* as an expression. + /// + /// See [the WebAssembly specification][wasm]. + /// + /// [wasm]: https://webassembly.github.io/spec/core/binary/modules.html#code-section + /// + /// The Cranelift IR function `func` should be completely empty except for the `func.signature` + /// and `func.name` fields. The signature may contain special-purpose arguments which are not + /// regarded as WebAssembly local variables. Any signature arguments marked as + /// `ArgumentPurpose::Normal` are made accessible as WebAssembly local variables. + /// + pub fn translate( + &mut self, + module_translation_state: &ModuleTranslationState, + reader: &mut wasmer_compiler::FunctionReader, + func: &mut ir::Function, + environ: &mut FE, + local_function_index: LocalFunctionIndex, + ) -> WasmResult<()> { + environ.push_params_on_stack(local_function_index); + self.translate_from_reader(module_translation_state, reader, func, environ) + } + + /// Translate a binary WebAssembly function from a `FunctionBinaryReader`. + pub fn translate_from_reader( + &mut self, + module_translation_state: &ModuleTranslationState, + reader: &mut wasmer_compiler::FunctionReader, + func: &mut ir::Function, + environ: &mut FE, + ) -> WasmResult<()> { + let _tt = timing::wasm_translate_function(); + let _span = tracing::info_span!( + "translate_from_reader", + bytes = reader.get_binary_reader().bytes_remaining(), + name = %func.name, + signature = %func.signature, + ) + .entered(); + debug_assert_eq!(func.dfg.num_blocks(), 0, "Function must be empty"); + debug_assert_eq!(func.dfg.num_insts(), 0, "Function must be empty"); + + // This clears the `FunctionBuilderContext`. + let mut builder = FunctionBuilder::new(func, &mut self.func_ctx); + builder.set_srcloc(ir::SourceLoc::new( + reader.get_binary_reader().original_position() as u32, + )); + let entry_block = builder.create_block(); + builder.append_block_params_for_function_params(entry_block); + builder.switch_to_block(entry_block); // This also creates values for the arguments. + builder.seal_block(entry_block); // Declare all predecessors known. + + // Make sure the entry block is inserted in the layout before we make any callbacks to + // `environ`. The callback functions may need to insert things in the entry block. + builder.ensure_inserted_block(); + + let num_params = declare_wasm_parameters(&mut builder, entry_block, environ); + + // Set up the translation state with a single pushed control block representing the whole + // function and its return values. + let exit_block = builder.create_block(); + builder.append_block_params_for_function_returns(exit_block); + self.state.initialize(&builder.func.signature, exit_block); + + parse_local_decls(reader, &mut builder, num_params, environ)?; + parse_function_body( + module_translation_state, + reader, + &mut builder, + &mut self.state, + environ, + )?; + + builder.finalize(); + Ok(()) + } +} + +/// Declare local variables for the signature parameters that correspond to WebAssembly locals. +/// +/// Return the number of local variables declared. +fn declare_wasm_parameters( + builder: &mut FunctionBuilder, + entry_block: Block, + environ: &FE, +) -> usize { + let sig_len = builder.func.signature.params.len(); + let mut next_local = 0; + for i in 0..sig_len { + let param_type = builder.func.signature.params[i]; + // There may be additional special-purpose parameters in addition to the normal WebAssembly + // signature parameters. For example, a `vmctx` pointer. + if environ.is_wasm_parameter(&builder.func.signature, i) { + // This is a normal WebAssembly signature parameter, so create a local for it. + let local = Variable::new(next_local); + builder.declare_var(local, param_type.value_type); + next_local += 1; + + let param_value = builder.block_params(entry_block)[i]; + builder.def_var(local, param_value); + } + if param_type.purpose == ir::ArgumentPurpose::VMContext { + let param_value = builder.block_params(entry_block)[i]; + builder.set_val_label(param_value, get_vmctx_value_label()); + } + } + + next_local +} + +/// Parse the local variable declarations that precede the function body. +/// +/// Declare local variables, starting from `num_params`. +fn parse_local_decls( + reader: &wasmer_compiler::FunctionReader, + builder: &mut FunctionBuilder, + num_params: usize, + environ: &mut FE, +) -> WasmResult<()> { + let mut next_local = num_params; + let mut local_reader = reader.get_locals_reader()?; + let local_count = local_reader.get_count(); + for _ in 0..local_count { + builder.set_srcloc(ir::SourceLoc::new(local_reader.original_position() as u32)); + let (count, ty) = local_reader.read()?; + declare_locals(builder, count, ty, &mut next_local, environ)?; + } + + Ok(()) +} + +/// Declare `count` local variables of the same type, starting from `next_local`. +/// +/// Fail if the type is not valid for a local. +fn declare_locals( + builder: &mut FunctionBuilder, + count: u32, + wasm_type: wasmparser::Type, + next_local: &mut usize, + environ: &mut FE, +) -> WasmResult<()> { + // All locals are initialized to 0. + use wasmparser::Type::*; + let zeroval = match wasm_type { + I32 => builder.ins().iconst(ir::types::I32, 0), + I64 => builder.ins().iconst(ir::types::I64, 0), + F32 => builder.ins().f32const(ir::immediates::Ieee32::with_bits(0)), + F64 => builder.ins().f64const(ir::immediates::Ieee64::with_bits(0)), + V128 => { + let constant_handle = builder.func.dfg.constants.insert([0; 16].to_vec().into()); + builder.ins().vconst(ir::types::I8X16, constant_handle) + } + ExternRef => builder.ins().null(environ.reference_type()), + FuncRef => builder.ins().null(environ.reference_type()), + ty => return Err(wasm_unsupported!("unsupported local type {:?}", ty)), + }; + + let wasmer_ty = wptype_to_type(wasm_type).unwrap(); + let ty = builder.func.dfg.value_type(zeroval); + for _ in 0..count { + let local = Variable::new(*next_local); + builder.declare_var(local, ty); + builder.def_var(local, zeroval); + builder.set_val_label(zeroval, ValueLabel::new(*next_local)); + environ.push_local_decl_on_stack(wasmer_ty); + *next_local += 1; + } + Ok(()) +} + +/// Parse the function body in `reader`. +/// +/// This assumes that the local variable declarations have already been parsed and function +/// arguments and locals are declared in the builder. +fn parse_function_body( + module_translation_state: &ModuleTranslationState, + reader: &wasmer_compiler::FunctionReader, + builder: &mut FunctionBuilder, + state: &mut FuncTranslationState, + environ: &mut FE, +) -> WasmResult<()> { + // The control stack is initialized with a single block representing the whole function. + debug_assert_eq!(state.control_stack.len(), 1, "State not initialized"); + let mut reader = reader.get_operators_reader()?.into_iter_with_offsets(); + + // Keep going until the final `End` operator which pops the outermost block. + while !state.control_stack.is_empty() { + let (op, pos) = reader.next().unwrap()?; + builder.set_srcloc(ir::SourceLoc::new(pos as u32)); + environ.before_translate_operator(&op, builder, state)?; + translate_operator(module_translation_state, &op, builder, state, environ)?; + environ.after_translate_operator(&op, builder, state)?; + } + + // When returning we drop all values in locals and on the stack. + + // The final `End` operator left us in the exit block where we need to manually add a return + // instruction. + // + // If the exit block is unreachable, it may not have the correct arguments, so we would + // generate a return instruction that doesn't match the signature. + if state.reachable { + debug_assert!(builder.is_pristine()); + if !builder.is_unreachable() { + environ.translate_drop_locals(builder)?; + + let _num_elems_to_drop = state.stack.len() - builder.func.signature.returns.len(); + // drop elements on the stack that we're not returning + /*for val in state + .stack + .iter() + .zip(state.metadata_stack.iter()) + .take(num_elems_to_drop) + .filter(|(_, metadata)| metadata.ref_counted) + .map(|(val, _)| val) + { + environ.translate_externref_dec(builder.cursor(), *val)?; + }*/ + + // TODO: look into what `state.reachable` check above does as well as `!builder.is_unreachable`, do we need that too for ref counting? + + match environ.return_mode() { + ReturnMode::NormalReturns => { + let return_types = wasm_param_types(&builder.func.signature.returns, |i| { + environ.is_wasm_return(&builder.func.signature, i) + }); + bitcast_arguments(&mut state.stack, &return_types, builder); + builder.ins().return_(&state.stack) + } + ReturnMode::FallthroughReturn => builder.ins().fallthrough_return(&state.stack), + }; + } + } + + // Discard any remaining values on the stack. Either we just returned them, + // or the end of the function is unreachable. + state.stack.clear(); + //state.metadata_stack.clear(); + + Ok(()) +} diff --git a/lib/compiler-cranelift/src/translator/mod.rs b/lib/compiler-cranelift/src/translator/mod.rs new file mode 100644 index 0000000000..d7efa42ccc --- /dev/null +++ b/lib/compiler-cranelift/src/translator/mod.rs @@ -0,0 +1,17 @@ +//! Tools for translating wasm function bytecode to Cranelift IR. + +mod code_translator; +mod func_environ; +mod func_state; +mod func_translator; +mod translation_utils; +mod unwind; + +pub use self::func_environ::{FuncEnvironment, GlobalVariable, ReturnMode, TargetEnvironment}; +pub use self::func_state::FuncTranslationState; +pub use self::func_translator::FuncTranslator; +pub use self::translation_utils::{ + get_vmctx_value_label, irlibcall_to_libcall, irreloc_to_relocationkind, + signature_to_cranelift_ir, transform_jump_table, type_to_irtype, +}; +pub(crate) use self::unwind::{compiled_function_unwind_info, CraneliftUnwindInfo}; diff --git a/lib/compiler-cranelift/src/translator/translation_utils.rs b/lib/compiler-cranelift/src/translator/translation_utils.rs new file mode 100644 index 0000000000..81e940ac18 --- /dev/null +++ b/lib/compiler-cranelift/src/translator/translation_utils.rs @@ -0,0 +1,162 @@ +//! Helper functions and structures for the translation. + +use super::func_environ::TargetEnvironment; +use crate::std::string::ToString; +use core::u32; +use cranelift_codegen::binemit::Reloc; +use cranelift_codegen::ir::{self, AbiParam}; +use cranelift_codegen::isa::TargetFrontendConfig; +use cranelift_entity::{EntityRef as CraneliftEntityRef, SecondaryMap as CraneliftSecondaryMap}; +use cranelift_frontend::FunctionBuilder; +use wasmer_compiler::wasm_unsupported; +use wasmer_compiler::wasmparser; +use wasmer_compiler::{JumpTable, RelocationKind}; +use wasmer_compiler::{WasmError, WasmResult}; +use wasmer_types::entity::{EntityRef, SecondaryMap}; +use wasmer_types::{FunctionType, Type}; +use wasmer_vm::libcalls::LibCall; + +/// Helper function translate a Function signature into Cranelift Ir +pub fn signature_to_cranelift_ir( + signature: &FunctionType, + target_config: TargetFrontendConfig, +) -> ir::Signature { + let mut sig = ir::Signature::new(target_config.default_call_conv); + sig.params.extend(signature.params().iter().map(|&ty| { + let cret_arg: ir::Type = type_to_irtype(ty, target_config) + .expect("only numeric types are supported in function signatures"); + AbiParam::new(cret_arg) + })); + sig.returns.extend(signature.results().iter().map(|&ty| { + let cret_arg: ir::Type = type_to_irtype(ty, target_config) + .expect("only numeric types are supported in function signatures"); + AbiParam::new(cret_arg) + })); + // The Vmctx signature + sig.params.insert( + 0, + AbiParam::special(target_config.pointer_type(), ir::ArgumentPurpose::VMContext), + ); + sig +} + +/// Helper function translating wasmparser types to Cranelift types when possible. +pub fn reference_type(target_config: TargetFrontendConfig) -> WasmResult { + match target_config.pointer_type() { + ir::types::I32 => Ok(ir::types::R32), + ir::types::I64 => Ok(ir::types::R64), + _ => Err(WasmError::Unsupported( + "unsupported pointer type".to_string(), + )), + } +} + +/// Helper function translating wasmparser types to Cranelift types when possible. +pub fn type_to_irtype(ty: Type, target_config: TargetFrontendConfig) -> WasmResult { + match ty { + Type::I32 => Ok(ir::types::I32), + Type::I64 => Ok(ir::types::I64), + Type::F32 => Ok(ir::types::F32), + Type::F64 => Ok(ir::types::F64), + Type::V128 => Ok(ir::types::I8X16), + Type::ExternRef | Type::FuncRef => reference_type(target_config), + // ty => Err(wasm_unsupported!("type_to_type: wasm type {:?}", ty)), + } +} + +/// Transform Cranelift LibCall into runtime LibCall +pub fn irlibcall_to_libcall(libcall: ir::LibCall) -> LibCall { + match libcall { + ir::LibCall::Probestack => LibCall::Probestack, + ir::LibCall::CeilF32 => LibCall::CeilF32, + ir::LibCall::CeilF64 => LibCall::CeilF64, + ir::LibCall::FloorF32 => LibCall::FloorF32, + ir::LibCall::FloorF64 => LibCall::FloorF64, + ir::LibCall::TruncF32 => LibCall::TruncF32, + ir::LibCall::TruncF64 => LibCall::TruncF64, + ir::LibCall::NearestF32 => LibCall::NearestF32, + ir::LibCall::NearestF64 => LibCall::NearestF64, + _ => panic!("Unsupported libcall"), + } +} + +/// Transform Cranelift Reloc to compiler Relocation +pub fn irreloc_to_relocationkind(reloc: Reloc) -> RelocationKind { + match reloc { + Reloc::Abs4 => RelocationKind::Abs4, + Reloc::Abs8 => RelocationKind::Abs8, + Reloc::X86PCRel4 => RelocationKind::X86PCRel4, + Reloc::X86PCRelRodata4 => RelocationKind::X86PCRelRodata4, + Reloc::X86CallPCRel4 => RelocationKind::X86CallPCRel4, + Reloc::X86CallPLTRel4 => RelocationKind::X86CallPLTRel4, + Reloc::X86GOTPCRel4 => RelocationKind::X86GOTPCRel4, + _ => panic!("The relocation {} is not yet supported.", reloc), + } +} + +/// Create a `Block` with the given Wasm parameters. +pub fn block_with_params( + builder: &mut FunctionBuilder, + params: &[wasmparser::Type], + environ: &PE, +) -> WasmResult { + let block = builder.create_block(); + for ty in params.iter() { + match ty { + wasmparser::Type::I32 => { + builder.append_block_param(block, ir::types::I32); + } + wasmparser::Type::I64 => { + builder.append_block_param(block, ir::types::I64); + } + wasmparser::Type::F32 => { + builder.append_block_param(block, ir::types::F32); + } + wasmparser::Type::F64 => { + builder.append_block_param(block, ir::types::F64); + } + wasmparser::Type::ExternRef | wasmparser::Type::FuncRef => { + builder.append_block_param(block, environ.reference_type()); + } + wasmparser::Type::V128 => { + builder.append_block_param(block, ir::types::I8X16); + } + ty => { + return Err(wasm_unsupported!( + "block_with_params: type {:?} in multi-value block's signature", + ty + )) + } + } + } + Ok(block) +} + +/// Turns a `wasmparser` `f32` into a `Cranelift` one. +pub fn f32_translation(x: wasmparser::Ieee32) -> ir::immediates::Ieee32 { + ir::immediates::Ieee32::with_bits(x.bits()) +} + +/// Turns a `wasmparser` `f64` into a `Cranelift` one. +pub fn f64_translation(x: wasmparser::Ieee64) -> ir::immediates::Ieee64 { + ir::immediates::Ieee64::with_bits(x.bits()) +} + +/// Special VMContext value label. It is tracked as 0xffff_fffe label. +pub fn get_vmctx_value_label() -> ir::ValueLabel { + const VMCTX_LABEL: u32 = 0xffff_fffe; + ir::ValueLabel::from_u32(VMCTX_LABEL) +} + +/// Transforms Cranelift JumpTable's into runtime JumpTables +pub fn transform_jump_table( + jt_offsets: CraneliftSecondaryMap, +) -> SecondaryMap { + let mut func_jt_offsets = SecondaryMap::with_capacity(jt_offsets.capacity()); + + for (key, value) in jt_offsets.iter() { + let new_key = JumpTable::new(key.index()); + func_jt_offsets[new_key] = *value; + } + func_jt_offsets +} diff --git a/lib/compiler-cranelift/src/translator/unwind.rs b/lib/compiler-cranelift/src/translator/unwind.rs new file mode 100644 index 0000000000..e77d762c49 --- /dev/null +++ b/lib/compiler-cranelift/src/translator/unwind.rs @@ -0,0 +1,67 @@ +//! A `Compilation` contains the compiled function bodies for a WebAssembly +//! module. + +#[cfg(feature = "unwind")] +use cranelift_codegen::isa::unwind::{systemv::UnwindInfo as DwarfFDE, UnwindInfo}; +use cranelift_codegen::print_errors::pretty_error; +use cranelift_codegen::{isa, Context}; +use wasmer_compiler::{CompileError, CompiledFunctionUnwindInfo}; + +/// Cranelift specific unwind info +pub(crate) enum CraneliftUnwindInfo { + #[cfg(feature = "unwind")] + /// Windows Unwind info + WindowsX64(Vec), + /// Dwarf FDE + #[cfg(feature = "unwind")] + FDE(DwarfFDE), + /// No Unwind info attached + None, +} + +impl CraneliftUnwindInfo { + /// Transform the `CraneliftUnwindInfo` to the Windows format. + /// + /// We skip the DWARF as it is not needed for trampolines (which are the + /// main users of this function) + pub fn maybe_into_to_windows_unwind(self) -> Option { + match self { + #[cfg(feature = "unwind")] + Self::WindowsX64(unwind_info) => { + Some(CompiledFunctionUnwindInfo::WindowsX64(unwind_info)) + } + _ => None, + } + } +} + +#[cfg(feature = "unwind")] +/// Constructs unwind info object from Cranelift IR +pub(crate) fn compiled_function_unwind_info( + isa: &dyn isa::TargetIsa, + context: &Context, +) -> Result { + let unwind_info = context + .create_unwind_info(isa) + .map_err(|error| CompileError::Codegen(pretty_error(&context.func, Some(isa), error)))?; + + match unwind_info { + Some(UnwindInfo::WindowsX64(unwind)) => { + let size = unwind.emit_size(); + let mut data: Vec = vec![0; size]; + unwind.emit(&mut data[..]); + Ok(CraneliftUnwindInfo::WindowsX64(data)) + } + Some(UnwindInfo::SystemV(unwind)) => Ok(CraneliftUnwindInfo::FDE(unwind)), + Some(_) | None => Ok(CraneliftUnwindInfo::None), + } +} + +#[cfg(not(feature = "unwind"))] +/// Constructs unwind info object from Cranelift IR +pub(crate) fn compiled_function_unwind_info( + isa: &dyn isa::TargetIsa, + context: &Context, +) -> Result { + Ok(CraneliftUnwindInfo::None) +} diff --git a/lib/compiler-llvm/Cargo.toml b/lib/compiler-llvm/Cargo.toml new file mode 100644 index 0000000000..a9ad2ad5c3 --- /dev/null +++ b/lib/compiler-llvm/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "wasmer-compiler-llvm" +version = "2.1.0" +description = "LLVM compiler for Wasmer WebAssembly runtime" +categories = ["wasm"] +keywords = ["wasm", "webassembly", "compiler", "llvm"] +authors = ["Wasmer Engineering Team "] +repository = "https://github.com/wasmerio/wasmer" +documentation = "https://docs.rs/wasmer-compiler-llvm/" +license = "MIT" +readme = "README.md" +edition = "2018" + +[dependencies] +wasmer-compiler = { path = "../compiler", version = "=2.4.0", package = "wasmer-compiler-near", features = ["translator"] } +wasmer-vm = { path = "../vm", version = "=2.4.0", package = "wasmer-vm-near" } +wasmer-types = { path = "../types", version = "=2.4.0", package = "wasmer-types-near" } +target-lexicon = { version = "0.12.2", default-features = false } +smallvec = "1.6" +object = { version = "0.27", default-features = false, features = ["read"] } +libc = { version = "^0.2", default-features = false } +byteorder = "1" +itertools = "0.10" +rayon = "1.5" + +[dependencies.inkwell] +package = "inkwell" +version = "0.1.0-beta.4" +default-features = false +features = ["llvm12-0", "target-x86", "target-aarch64"] + +[build-dependencies] +cc = "1.0" +lazy_static = "1.4" +regex = "1.3" +semver = "1.0" +rustc_version = "0.4" + +[features] +test = [] diff --git a/lib/compiler-llvm/README.md b/lib/compiler-llvm/README.md new file mode 100644 index 0000000000..51368ebe23 --- /dev/null +++ b/lib/compiler-llvm/README.md @@ -0,0 +1,48 @@ +# `wasmer-compiler-llvm` [![Build Status](https://github.com/wasmerio/wasmer/workflows/build/badge.svg?style=flat-square)](https://github.com/wasmerio/wasmer/actions?query=workflow%3Abuild) [![Join Wasmer Slack](https://img.shields.io/static/v1?label=Slack&message=join%20chat&color=brighgreen&style=flat-square)](https://slack.wasmer.io) [![MIT License](https://img.shields.io/github/license/wasmerio/wasmer.svg?style=flat-square)](https://github.com/wasmerio/wasmer/blob/master/LICENSE) [![crates.io](https://img.shields.io/crates/v/wasmer-compiler-llvm.svg)](https://crates.io/crates/wasmer-compiler-llvm) + +This crate contains a compiler implementation based on [the LLVM Compiler Infrastructure][LLVM]. + +## Usage + +```rust +use wasmer::{Store, Universal}; +use wasmer_compiler_llvm::LLVM; + +let compiler = LLVM::new(); +// Put it into an engine and add it to the store +let store = Store::new(&Universal::new(compiler).engine()); +``` + +*Note: you can find a [full working example using LLVM compiler here][example].* + +## When to use LLVM + +We recommend using LLVM as the default compiler when running WebAssembly +files on any **production** system, as it offers maximum peformance near +to native speeds. + +## Requirements + +The LLVM compiler requires a valid installation of LLVM in your system. +It currently requires **LLVM 12**. + + +You can install LLVM easily on your Debian-like system via this command: + +```bash +wget https://apt.llvm.org/llvm.sh -O /tmp/llvm.sh +sudo bash /tmp/llvm.sh 12 +``` + +Or in macOS: + +```bash +brew install llvm +``` + +Or via any of the [pre-built binaries that LLVM offers][llvm-pre-built]. + + +[LLVM]: https://llvm.org/ +[example]: https://github.com/wasmerio/wasmer/blob/master/examples/compiler_llvm.rs +[llvm-pre-built]: https://releases.llvm.org/download.html diff --git a/lib/compiler-llvm/src/abi/aarch64_systemv.rs b/lib/compiler-llvm/src/abi/aarch64_systemv.rs new file mode 100644 index 0000000000..cee33c6060 --- /dev/null +++ b/lib/compiler-llvm/src/abi/aarch64_systemv.rs @@ -0,0 +1,629 @@ +use crate::abi::Abi; +use crate::translator::intrinsics::{type_to_llvm, Intrinsics}; +use inkwell::{ + attributes::{Attribute, AttributeLoc}, + builder::Builder, + context::Context, + types::{AnyType, BasicMetadataTypeEnum, BasicType, FunctionType, StructType}, + values::{BasicValue, BasicValueEnum, CallSiteValue, FunctionValue, IntValue, PointerValue}, + AddressSpace, +}; +use wasmer_compiler::CompileError; +use wasmer_types::{FunctionType as FuncSig, Type}; +use wasmer_vm::VMOffsets; + +use std::convert::TryInto; + +/// Implementation of the [`Abi`] trait for the Aarch64 ABI on Linux. +pub struct Aarch64SystemV {} + +impl Abi for Aarch64SystemV { + // Given a function definition, retrieve the parameter that is the vmctx pointer. + fn get_vmctx_ptr_param<'ctx>(&self, func_value: &FunctionValue<'ctx>) -> PointerValue<'ctx> { + func_value + .get_nth_param( + if func_value + .get_enum_attribute( + AttributeLoc::Param(0), + Attribute::get_named_enum_kind_id("sret"), + ) + .is_some() + { + 1 + } else { + 0 + }, + ) + .unwrap() + .into_pointer_value() + } + + // Given a wasm function type, produce an llvm function declaration. + fn func_type_to_llvm<'ctx>( + &self, + context: &'ctx Context, + intrinsics: &Intrinsics<'ctx>, + offsets: Option<&VMOffsets>, + sig: &FuncSig, + ) -> Result<(FunctionType<'ctx>, Vec<(Attribute, AttributeLoc)>), CompileError> { + let user_param_types = sig.params().iter().map(|&ty| type_to_llvm(intrinsics, ty)); + + let param_types = + std::iter::once(Ok(intrinsics.ctx_ptr_ty.as_basic_type_enum())).chain(user_param_types); + + let vmctx_attributes = |i: u32| { + vec![ + ( + context.create_enum_attribute(Attribute::get_named_enum_kind_id("nofree"), 0), + AttributeLoc::Param(i), + ), + ( + if let Some(offsets) = offsets { + context.create_enum_attribute( + Attribute::get_named_enum_kind_id("dereferenceable"), + offsets.size_of_vmctx().into(), + ) + } else { + context + .create_enum_attribute(Attribute::get_named_enum_kind_id("nonnull"), 0) + }, + AttributeLoc::Param(i), + ), + ( + context.create_enum_attribute( + Attribute::get_named_enum_kind_id("align"), + std::mem::align_of::() + .try_into() + .unwrap(), + ), + AttributeLoc::Param(i), + ), + ] + }; + + Ok(match sig.results() { + [] => ( + intrinsics.void_ty.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [_] => { + let single_value = sig.results()[0]; + ( + type_to_llvm(intrinsics, single_value)?.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ) + } + [Type::F32, Type::F32] => { + let f32_ty = intrinsics.f32_ty.as_basic_type_enum(); + ( + context.struct_type(&[f32_ty, f32_ty], false).fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ) + } + [Type::F64, Type::F64] => { + let f64_ty = intrinsics.f64_ty.as_basic_type_enum(); + ( + context.struct_type(&[f64_ty, f64_ty], false).fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ) + } + [Type::F32, Type::F32, Type::F32] => { + let f32_ty = intrinsics.f32_ty.as_basic_type_enum(); + ( + context + .struct_type(&[f32_ty, f32_ty, f32_ty], false) + .fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ) + } + [Type::F32, Type::F32, Type::F32, Type::F32] => { + let f32_ty = intrinsics.f32_ty.as_basic_type_enum(); + ( + context + .struct_type(&[f32_ty, f32_ty, f32_ty, f32_ty], false) + .fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ) + } + _ => { + let sig_returns_bitwidths = sig + .results() + .iter() + .map(|ty| match ty { + Type::I32 | Type::F32 => 32, + Type::I64 | Type::F64 => 64, + Type::V128 => 128, + Type::ExternRef | Type::FuncRef => 64, /* pointer */ + }) + .collect::>(); + match sig_returns_bitwidths.as_slice() { + [32, 32] => ( + intrinsics.i64_ty.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [32, 64] + | [64, 32] + | [64, 64] + | [32, 32, 32] + | [64, 32, 32] + | [32, 32, 64] + | [32, 32, 32, 32] => ( + intrinsics.i64_ty.array_type(2).fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + _ => { + let basic_types: Vec<_> = sig + .results() + .iter() + .map(|&ty| type_to_llvm(intrinsics, ty)) + .collect::>()?; + + let sret = context.struct_type(&basic_types, false); + let sret_ptr = sret.ptr_type(AddressSpace::Generic); + + let param_types = + std::iter::once(Ok(sret_ptr.as_basic_type_enum())).chain(param_types); + + let mut attributes = vec![( + context.create_type_attribute( + Attribute::get_named_enum_kind_id("sret"), + sret.as_any_type_enum(), + ), + AttributeLoc::Param(0), + )]; + attributes.append(&mut vmctx_attributes(1)); + + ( + intrinsics.void_ty.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + attributes, + ) + } + } + } + }) + } + + // Marshall wasm stack values into function parameters. + fn args_to_call<'ctx>( + &self, + alloca_builder: &Builder<'ctx>, + func_sig: &FuncSig, + ctx_ptr: PointerValue<'ctx>, + llvm_fn_ty: &FunctionType<'ctx>, + values: &[BasicValueEnum<'ctx>], + ) -> Vec> { + // If it's an sret, allocate the return space. + let sret = if llvm_fn_ty.get_return_type().is_none() && func_sig.results().len() > 1 { + Some( + alloca_builder.build_alloca( + llvm_fn_ty.get_param_types()[0] + .into_pointer_type() + .get_element_type() + .into_struct_type(), + "sret", + ), + ) + } else { + None + }; + + let values = std::iter::once(ctx_ptr.as_basic_value_enum()).chain(values.iter().copied()); + + if let Some(sret) = sret { + std::iter::once(sret.as_basic_value_enum()) + .chain(values) + .collect() + } else { + values.collect() + } + } + + // Given a CallSite, extract the returned values and return them in a Vec. + fn rets_from_call<'ctx>( + &self, + builder: &Builder<'ctx>, + intrinsics: &Intrinsics<'ctx>, + call_site: CallSiteValue<'ctx>, + func_sig: &FuncSig, + ) -> Vec> { + let split_i64 = |value: IntValue<'ctx>| -> (IntValue<'ctx>, IntValue<'ctx>) { + assert!(value.get_type() == intrinsics.i64_ty); + let low = builder.build_int_truncate(value, intrinsics.i32_ty, ""); + let lshr = + builder.build_right_shift(value, intrinsics.i64_ty.const_int(32, false), false, ""); + let high = builder.build_int_truncate(lshr, intrinsics.i32_ty, ""); + (low, high) + }; + + let casted = |value: BasicValueEnum<'ctx>, ty: Type| -> BasicValueEnum<'ctx> { + match ty { + Type::I32 => { + assert!( + value.get_type() == intrinsics.i32_ty.as_basic_type_enum() + || value.get_type() == intrinsics.f32_ty.as_basic_type_enum() + ); + builder.build_bitcast(value, intrinsics.i32_ty, "") + } + Type::F32 => { + assert!( + value.get_type() == intrinsics.i32_ty.as_basic_type_enum() + || value.get_type() == intrinsics.f32_ty.as_basic_type_enum() + ); + builder.build_bitcast(value, intrinsics.f32_ty, "") + } + Type::I64 => { + assert!( + value.get_type() == intrinsics.i64_ty.as_basic_type_enum() + || value.get_type() == intrinsics.f64_ty.as_basic_type_enum() + ); + builder.build_bitcast(value, intrinsics.i64_ty, "") + } + Type::F64 => { + assert!( + value.get_type() == intrinsics.i64_ty.as_basic_type_enum() + || value.get_type() == intrinsics.f64_ty.as_basic_type_enum() + ); + builder.build_bitcast(value, intrinsics.f64_ty, "") + } + Type::V128 => { + assert!(value.get_type() == intrinsics.i128_ty.as_basic_type_enum()); + value + } + Type::ExternRef | Type::FuncRef => { + assert!(value.get_type() == intrinsics.funcref_ty.as_basic_type_enum()); + value + } + } + }; + + if let Some(basic_value) = call_site.try_as_basic_value().left() { + if func_sig.results().len() > 1 { + if basic_value.get_type() == intrinsics.i64_ty.as_basic_type_enum() { + assert!(func_sig.results().len() == 2); + let value = basic_value.into_int_value(); + let (low, high) = split_i64(value); + let low = casted(low.into(), func_sig.results()[0]); + let high = casted(high.into(), func_sig.results()[1]); + return vec![low, high]; + } + if basic_value.is_struct_value() { + let struct_value = basic_value.into_struct_value(); + return (0..struct_value.get_type().count_fields()) + .map(|i| builder.build_extract_value(struct_value, i, "").unwrap()) + .collect::>(); + } + let array_value = basic_value.into_array_value(); + let low = builder + .build_extract_value(array_value, 0, "") + .unwrap() + .into_int_value(); + let high = builder + .build_extract_value(array_value, 1, "") + .unwrap() + .into_int_value(); + let func_sig_returns_bitwidths = func_sig + .results() + .iter() + .map(|ty| match ty { + Type::I32 | Type::F32 => 32, + Type::I64 | Type::F64 => 64, + Type::V128 => 128, + Type::ExternRef | Type::FuncRef => 64, /* pointer */ + }) + .collect::>(); + + match func_sig_returns_bitwidths.as_slice() { + [32, 64] => { + let (low, _) = split_i64(low); + let low = casted(low.into(), func_sig.results()[0]); + let high = casted(high.into(), func_sig.results()[1]); + vec![low, high] + } + [64, 32] => { + let (high, _) = split_i64(high); + let low = casted(low.into(), func_sig.results()[0]); + let high = casted(high.into(), func_sig.results()[1]); + vec![low, high] + } + [64, 64] => { + let low = casted(low.into(), func_sig.results()[0]); + let high = casted(high.into(), func_sig.results()[1]); + vec![low, high] + } + [32, 32, 32] => { + let (v1, v2) = split_i64(low); + let (v3, _) = split_i64(high); + let v1 = casted(v1.into(), func_sig.results()[0]); + let v2 = casted(v2.into(), func_sig.results()[1]); + let v3 = casted(v3.into(), func_sig.results()[2]); + vec![v1, v2, v3] + } + [32, 32, 64] => { + let (v1, v2) = split_i64(low); + let v1 = casted(v1.into(), func_sig.results()[0]); + let v2 = casted(v2.into(), func_sig.results()[1]); + let v3 = casted(high.into(), func_sig.results()[2]); + vec![v1, v2, v3] + } + [64, 32, 32] => { + let v1 = casted(low.into(), func_sig.results()[0]); + let (v2, v3) = split_i64(high); + let v2 = casted(v2.into(), func_sig.results()[1]); + let v3 = casted(v3.into(), func_sig.results()[2]); + vec![v1, v2, v3] + } + [32, 32, 32, 32] => { + let (v1, v2) = split_i64(low); + let (v3, v4) = split_i64(high); + let v1 = casted(v1.into(), func_sig.results()[0]); + let v2 = casted(v2.into(), func_sig.results()[1]); + let v3 = casted(v3.into(), func_sig.results()[2]); + let v4 = casted(v4.into(), func_sig.results()[3]); + vec![v1, v2, v3, v4] + } + _ => unreachable!("expected an sret for this type"), + } + } else { + assert!(func_sig.results().len() == 1); + vec![basic_value] + } + } else { + assert!(call_site.count_arguments() > 0); // Either sret or vmctx. + if call_site + .get_enum_attribute( + AttributeLoc::Param(0), + Attribute::get_named_enum_kind_id("sret"), + ) + .is_some() + { + let sret = call_site + .try_as_basic_value() + .right() + .unwrap() + .get_operand(0) + .unwrap() + .left() + .unwrap() + .into_pointer_value(); + let struct_value = builder.build_load(sret, "").into_struct_value(); + let mut rets: Vec<_> = Vec::new(); + for i in 0..struct_value.get_type().count_fields() { + let value = builder.build_extract_value(struct_value, i, "").unwrap(); + rets.push(value); + } + assert!(func_sig.results().len() == rets.len()); + rets + } else { + assert!(func_sig.results().is_empty()); + vec![] + } + } + } + + fn is_sret(&self, func_sig: &FuncSig) -> Result { + let func_sig_returns_bitwidths = func_sig + .results() + .iter() + .map(|ty| match ty { + Type::I32 | Type::F32 => 32, + Type::I64 | Type::F64 => 64, + Type::V128 => 128, + Type::ExternRef | Type::FuncRef => 64, /* pointer */ + }) + .collect::>(); + + Ok(!matches!( + func_sig_returns_bitwidths.as_slice(), + [] | [_] + | [32, 32] + | [32, 64] + | [64, 32] + | [64, 64] + | [32, 32, 32] + | [32, 32, 64] + | [64, 32, 32] + | [32, 32, 32, 32] + )) + } + + fn pack_values_for_register_return<'ctx>( + &self, + intrinsics: &Intrinsics<'ctx>, + builder: &Builder<'ctx>, + values: &[BasicValueEnum<'ctx>], + func_type: &FunctionType<'ctx>, + ) -> Result, CompileError> { + let is_32 = |value: BasicValueEnum| { + (value.is_int_value() && value.into_int_value().get_type() == intrinsics.i32_ty) + || (value.is_float_value() + && value.into_float_value().get_type() == intrinsics.f32_ty) + }; + let is_64 = |value: BasicValueEnum| { + (value.is_int_value() && value.into_int_value().get_type() == intrinsics.i64_ty) + || (value.is_float_value() + && value.into_float_value().get_type() == intrinsics.f64_ty) + }; + + let pack_i32s = |low: BasicValueEnum<'ctx>, high: BasicValueEnum<'ctx>| { + assert!(low.get_type() == intrinsics.i32_ty.as_basic_type_enum()); + assert!(high.get_type() == intrinsics.i32_ty.as_basic_type_enum()); + let (low, high) = (low.into_int_value(), high.into_int_value()); + let low = builder.build_int_z_extend(low, intrinsics.i64_ty, ""); + let high = builder.build_int_z_extend(high, intrinsics.i64_ty, ""); + let high = builder.build_left_shift(high, intrinsics.i64_ty.const_int(32, false), ""); + builder.build_or(low, high, "").as_basic_value_enum() + }; + + let to_i64 = |v: BasicValueEnum<'ctx>| { + if v.is_float_value() { + let v = v.into_float_value(); + if v.get_type() == intrinsics.f32_ty { + let v = builder + .build_bitcast(v, intrinsics.i32_ty, "") + .into_int_value(); + let v = builder.build_int_z_extend(v, intrinsics.i64_ty, ""); + v.as_basic_value_enum() + } else { + debug_assert!(v.get_type() == intrinsics.f64_ty); + let v = builder.build_bitcast(v, intrinsics.i64_ty, ""); + v.as_basic_value_enum() + } + } else { + let v = v.into_int_value(); + if v.get_type() == intrinsics.i32_ty { + let v = builder.build_int_z_extend(v, intrinsics.i64_ty, ""); + v.as_basic_value_enum() + } else { + debug_assert!(v.get_type() == intrinsics.i64_ty); + v.as_basic_value_enum() + } + } + }; + + let build_struct = |ty: StructType<'ctx>, values: &[BasicValueEnum<'ctx>]| { + let mut struct_value = ty.get_undef(); + for (i, v) in values.iter().enumerate() { + struct_value = builder + .build_insert_value(struct_value, *v, i as u32, "") + .unwrap() + .into_struct_value(); + } + struct_value.as_basic_value_enum() + }; + + let build_2xi64 = |low: BasicValueEnum<'ctx>, high: BasicValueEnum<'ctx>| { + let low = to_i64(low); + let high = to_i64(high); + let value = intrinsics.i64_ty.array_type(2).get_undef(); + let value = builder.build_insert_value(value, low, 0, "").unwrap(); + let value = builder.build_insert_value(value, high, 1, "").unwrap(); + value.as_basic_value_enum() + }; + + Ok(match *values { + [one_value] => one_value, + [v1, v2] + if v1.is_float_value() + && v2.is_float_value() + && v1.into_float_value().get_type() == v2.into_float_value().get_type() => + { + build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[v1, v2], + ) + } + [v1, v2] if is_32(v1) && is_32(v2) => { + let v1 = builder.build_bitcast(v1, intrinsics.i32_ty, ""); + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + pack_i32s(v1, v2) + } + [v1, v2] => build_2xi64(v1, v2), + [v1, v2, v3] + if is_32(v1) + && is_32(v2) + && is_32(v3) + && v1.is_float_value() + && v2.is_float_value() + && v3.is_float_value() => + { + build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[v1, v2, v3], + ) + } + [v1, v2, v3] if is_32(v1) && is_32(v2) => { + let v1 = builder.build_bitcast(v1, intrinsics.i32_ty, ""); + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + let v1v2_pack = pack_i32s(v1, v2); + build_2xi64(v1v2_pack, v3) + } + [v1, v2, v3] if is_64(v1) && is_32(v2) && is_32(v3) => { + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + let v3 = builder.build_bitcast(v3, intrinsics.i32_ty, ""); + let v2v3_pack = pack_i32s(v2, v3); + build_2xi64(v1, v2v3_pack) + } + [v1, v2, v3, v4] + if is_32(v1) + && is_32(v2) + && is_32(v3) + && is_32(v4) + && v1.is_float_value() + && v2.is_float_value() + && v3.is_float_value() + && v4.is_float_value() => + { + build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[v1, v2, v3, v4], + ) + } + [v1, v2, v3, v4] if is_32(v1) && is_32(v2) && is_32(v3) && is_32(v4) => { + let v1 = builder.build_bitcast(v1, intrinsics.i32_ty, ""); + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + let v1v2_pack = pack_i32s(v1, v2); + let v3 = builder.build_bitcast(v3, intrinsics.i32_ty, ""); + let v4 = builder.build_bitcast(v4, intrinsics.i32_ty, ""); + let v3v4_pack = pack_i32s(v3, v4); + build_2xi64(v1v2_pack, v3v4_pack) + } + _ => { + unreachable!("called to perform register return on struct return or void function") + } + }) + } +} diff --git a/lib/compiler-llvm/src/abi/mod.rs b/lib/compiler-llvm/src/abi/mod.rs new file mode 100644 index 0000000000..b79f999874 --- /dev/null +++ b/lib/compiler-llvm/src/abi/mod.rs @@ -0,0 +1,87 @@ +// LLVM implements part of the ABI lowering internally, but also requires that +// the user pack and unpack values themselves sometimes. This can help the LLVM +// optimizer by exposing operations to the optimizer, but it requires that the +// frontend know exactly what IR to produce in order to get the right ABI. + +#![deny(dead_code, missing_docs)] + +use crate::translator::intrinsics::Intrinsics; +use inkwell::{ + attributes::{Attribute, AttributeLoc}, + builder::Builder, + context::Context, + targets::TargetMachine, + types::FunctionType, + values::{BasicValueEnum, CallSiteValue, FunctionValue, PointerValue}, +}; +use wasmer_compiler::CompileError; +use wasmer_types::FunctionType as FuncSig; +use wasmer_vm::VMOffsets; + +mod aarch64_systemv; +mod x86_64_systemv; + +use aarch64_systemv::Aarch64SystemV; +use x86_64_systemv::X86_64SystemV; + +pub fn get_abi(target_machine: &TargetMachine) -> Box { + if target_machine + .get_triple() + .as_str() + .to_string_lossy() + .starts_with("aarch64") + { + Box::new(Aarch64SystemV {}) + } else { + Box::new(X86_64SystemV {}) + } +} + +/// We need to produce different LLVM IR for different platforms. (Contrary to +/// popular knowledge LLVM IR is not intended to be portable in that way.) This +/// trait deals with differences between function signatures on different +/// targets. +pub trait Abi { + /// Given a function definition, retrieve the parameter that is the vmctx pointer. + fn get_vmctx_ptr_param<'ctx>(&self, func_value: &FunctionValue<'ctx>) -> PointerValue<'ctx>; + + /// Given a wasm function type, produce an llvm function declaration. + fn func_type_to_llvm<'ctx>( + &self, + context: &'ctx Context, + intrinsics: &Intrinsics<'ctx>, + offsets: Option<&VMOffsets>, + sig: &FuncSig, + ) -> Result<(FunctionType<'ctx>, Vec<(Attribute, AttributeLoc)>), CompileError>; + + /// Marshall wasm stack values into function parameters. + fn args_to_call<'ctx>( + &self, + alloca_builder: &Builder<'ctx>, + func_sig: &FuncSig, + ctx_ptr: PointerValue<'ctx>, + llvm_fn_ty: &FunctionType<'ctx>, + values: &[BasicValueEnum<'ctx>], + ) -> Vec>; + + /// Given a CallSite, extract the returned values and return them in a Vec. + fn rets_from_call<'ctx>( + &self, + builder: &Builder<'ctx>, + intrinsics: &Intrinsics<'ctx>, + call_site: CallSiteValue<'ctx>, + func_sig: &FuncSig, + ) -> Vec>; + + /// Whether the llvm equivalent of this wasm function has an `sret` attribute. + fn is_sret(&self, func_sig: &FuncSig) -> Result; + + /// Pack LLVM IR values representing individual wasm values into the return type for the function. + fn pack_values_for_register_return<'ctx>( + &self, + intrinsics: &Intrinsics<'ctx>, + builder: &Builder<'ctx>, + values: &[BasicValueEnum<'ctx>], + func_type: &FunctionType<'ctx>, + ) -> Result, CompileError>; +} diff --git a/lib/compiler-llvm/src/abi/x86_64_systemv.rs b/lib/compiler-llvm/src/abi/x86_64_systemv.rs new file mode 100644 index 0000000000..d9513d07a4 --- /dev/null +++ b/lib/compiler-llvm/src/abi/x86_64_systemv.rs @@ -0,0 +1,655 @@ +use crate::abi::Abi; +use crate::translator::intrinsics::{type_to_llvm, Intrinsics}; +use inkwell::{ + attributes::{Attribute, AttributeLoc}, + builder::Builder, + context::Context, + types::{AnyType, BasicMetadataTypeEnum, BasicType, FunctionType, StructType}, + values::{ + BasicValue, BasicValueEnum, CallSiteValue, FloatValue, FunctionValue, IntValue, + PointerValue, VectorValue, + }, + AddressSpace, +}; +use wasmer_compiler::CompileError; +use wasmer_types::{FunctionType as FuncSig, Type}; +use wasmer_vm::VMOffsets; + +use std::convert::TryInto; + +/// Implementation of the [`Abi`] trait for the AMD64 SystemV ABI. +pub struct X86_64SystemV {} + +impl Abi for X86_64SystemV { + // Given a function definition, retrieve the parameter that is the vmctx pointer. + fn get_vmctx_ptr_param<'ctx>(&self, func_value: &FunctionValue<'ctx>) -> PointerValue<'ctx> { + func_value + .get_nth_param( + if func_value + .get_enum_attribute( + AttributeLoc::Param(0), + Attribute::get_named_enum_kind_id("sret"), + ) + .is_some() + { + 1 + } else { + 0 + }, + ) + .unwrap() + .into_pointer_value() + } + + // Given a wasm function type, produce an llvm function declaration. + fn func_type_to_llvm<'ctx>( + &self, + context: &'ctx Context, + intrinsics: &Intrinsics<'ctx>, + offsets: Option<&VMOffsets>, + sig: &FuncSig, + ) -> Result<(FunctionType<'ctx>, Vec<(Attribute, AttributeLoc)>), CompileError> { + let user_param_types = sig.params().iter().map(|&ty| type_to_llvm(intrinsics, ty)); + + let param_types = + std::iter::once(Ok(intrinsics.ctx_ptr_ty.as_basic_type_enum())).chain(user_param_types); + + // TODO: figure out how many bytes long vmctx is, and mark it dereferenceable. (no need to mark it nonnull once we do this.) + let vmctx_attributes = |i: u32| { + vec![ + ( + context.create_enum_attribute(Attribute::get_named_enum_kind_id("nofree"), 0), + AttributeLoc::Param(i), + ), + ( + if let Some(offsets) = offsets { + context.create_enum_attribute( + Attribute::get_named_enum_kind_id("dereferenceable"), + offsets.size_of_vmctx().into(), + ) + } else { + context + .create_enum_attribute(Attribute::get_named_enum_kind_id("nonnull"), 0) + }, + AttributeLoc::Param(i), + ), + ( + context.create_enum_attribute( + Attribute::get_named_enum_kind_id("align"), + std::mem::align_of::() + .try_into() + .unwrap(), + ), + AttributeLoc::Param(i), + ), + ] + }; + + let sig_returns_bitwidths = sig + .results() + .iter() + .map(|ty| match ty { + Type::I32 | Type::F32 => 32, + Type::I64 | Type::F64 => 64, + Type::V128 => 128, + Type::ExternRef | Type::FuncRef => 64, /* pointer */ + }) + .collect::>(); + + Ok(match sig_returns_bitwidths.as_slice() { + [] => ( + intrinsics.void_ty.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [_] => { + let single_value = sig.results()[0]; + ( + type_to_llvm(intrinsics, single_value)?.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ) + } + [32, 64] | [64, 32] | [64, 64] => { + let basic_types: Vec<_> = sig + .results() + .iter() + .map(|&ty| type_to_llvm(intrinsics, ty)) + .collect::>()?; + + ( + context.struct_type(&basic_types, false).fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ) + } + [32, 32] if sig.results()[0] == Type::F32 && sig.results()[1] == Type::F32 => ( + intrinsics.f32_ty.vec_type(2).fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [32, 32] => ( + intrinsics.i64_ty.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [32, 32, _] if sig.results()[0] == Type::F32 && sig.results()[1] == Type::F32 => ( + context + .struct_type( + &[ + intrinsics.f32_ty.vec_type(2).as_basic_type_enum(), + type_to_llvm(intrinsics, sig.results()[2])?, + ], + false, + ) + .fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [32, 32, _] => ( + context + .struct_type( + &[ + intrinsics.i64_ty.as_basic_type_enum(), + type_to_llvm(intrinsics, sig.results()[2])?, + ], + false, + ) + .fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [64, 32, 32] if sig.results()[1] == Type::F32 && sig.results()[2] == Type::F32 => ( + context + .struct_type( + &[ + type_to_llvm(intrinsics, sig.results()[0])?, + intrinsics.f32_ty.vec_type(2).as_basic_type_enum(), + ], + false, + ) + .fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [64, 32, 32] => ( + context + .struct_type( + &[ + type_to_llvm(intrinsics, sig.results()[0])?, + intrinsics.i64_ty.as_basic_type_enum(), + ], + false, + ) + .fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + [32, 32, 32, 32] => ( + context + .struct_type( + &[ + if sig.results()[0] == Type::F32 && sig.results()[1] == Type::F32 { + intrinsics.f32_ty.vec_type(2).as_basic_type_enum() + } else { + intrinsics.i64_ty.as_basic_type_enum() + }, + if sig.results()[2] == Type::F32 && sig.results()[3] == Type::F32 { + intrinsics.f32_ty.vec_type(2).as_basic_type_enum() + } else { + intrinsics.i64_ty.as_basic_type_enum() + }, + ], + false, + ) + .fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + vmctx_attributes(0), + ), + _ => { + let basic_types: Vec<_> = sig + .results() + .iter() + .map(|&ty| type_to_llvm(intrinsics, ty)) + .collect::>()?; + + let sret = context.struct_type(&basic_types, false); + let sret_ptr = sret.ptr_type(AddressSpace::Generic); + + let param_types = + std::iter::once(Ok(sret_ptr.as_basic_type_enum())).chain(param_types); + + let mut attributes = vec![( + context.create_type_attribute( + Attribute::get_named_enum_kind_id("sret"), + sret.as_any_type_enum(), + ), + AttributeLoc::Param(0), + )]; + attributes.append(&mut vmctx_attributes(1)); + + ( + intrinsics.void_ty.fn_type( + param_types + .map(|v| v.map(Into::into)) + .collect::, _>>()? + .as_slice(), + false, + ), + attributes, + ) + } + }) + } + + // Marshall wasm stack values into function parameters. + fn args_to_call<'ctx>( + &self, + alloca_builder: &Builder<'ctx>, + func_sig: &FuncSig, + ctx_ptr: PointerValue<'ctx>, + llvm_fn_ty: &FunctionType<'ctx>, + values: &[BasicValueEnum<'ctx>], + ) -> Vec> { + // If it's an sret, allocate the return space. + let sret = if llvm_fn_ty.get_return_type().is_none() && func_sig.results().len() > 1 { + Some( + alloca_builder.build_alloca( + llvm_fn_ty.get_param_types()[0] + .into_pointer_type() + .get_element_type() + .into_struct_type(), + "sret", + ), + ) + } else { + None + }; + + let values = std::iter::once(ctx_ptr.as_basic_value_enum()).chain(values.iter().copied()); + + if let Some(sret) = sret { + std::iter::once(sret.as_basic_value_enum()) + .chain(values) + .collect() + } else { + values.collect() + } + } + + // Given a CallSite, extract the returned values and return them in a Vec. + fn rets_from_call<'ctx>( + &self, + builder: &Builder<'ctx>, + intrinsics: &Intrinsics<'ctx>, + call_site: CallSiteValue<'ctx>, + func_sig: &FuncSig, + ) -> Vec> { + let split_i64 = |value: IntValue<'ctx>| -> (IntValue<'ctx>, IntValue<'ctx>) { + assert!(value.get_type() == intrinsics.i64_ty); + let low = builder.build_int_truncate(value, intrinsics.i32_ty, ""); + let lshr = + builder.build_right_shift(value, intrinsics.i64_ty.const_int(32, false), false, ""); + let high = builder.build_int_truncate(lshr, intrinsics.i32_ty, ""); + (low, high) + }; + + let f32x2_ty = intrinsics.f32_ty.vec_type(2).as_basic_type_enum(); + let extract_f32x2 = |value: VectorValue<'ctx>| -> (FloatValue<'ctx>, FloatValue<'ctx>) { + assert!(value.get_type() == f32x2_ty.into_vector_type()); + let ret0 = builder + .build_extract_element(value, intrinsics.i32_ty.const_int(0, false), "") + .into_float_value(); + let ret1 = builder + .build_extract_element(value, intrinsics.i32_ty.const_int(1, false), "") + .into_float_value(); + (ret0, ret1) + }; + + let casted = |value: BasicValueEnum<'ctx>, ty: Type| -> BasicValueEnum<'ctx> { + match ty { + Type::I32 => { + assert!( + value.get_type() == intrinsics.i32_ty.as_basic_type_enum() + || value.get_type() == intrinsics.f32_ty.as_basic_type_enum() + ); + builder.build_bitcast(value, intrinsics.i32_ty, "") + } + Type::F32 => { + assert!( + value.get_type() == intrinsics.i32_ty.as_basic_type_enum() + || value.get_type() == intrinsics.f32_ty.as_basic_type_enum() + ); + builder.build_bitcast(value, intrinsics.f32_ty, "") + } + _ => panic!("should only be called to repack 32-bit values"), + } + }; + + if let Some(basic_value) = call_site.try_as_basic_value().left() { + if func_sig.results().len() > 1 { + if basic_value.get_type() == intrinsics.i64_ty.as_basic_type_enum() { + assert!(func_sig.results().len() == 2); + let value = basic_value.into_int_value(); + let (low, high) = split_i64(value); + let low = casted(low.into(), func_sig.results()[0]); + let high = casted(high.into(), func_sig.results()[1]); + return vec![low, high]; + } + if basic_value.get_type() == f32x2_ty { + assert!(func_sig.results().len() == 2); + let (ret0, ret1) = extract_f32x2(basic_value.into_vector_value()); + return vec![ret0.into(), ret1.into()]; + } + let struct_value = basic_value.into_struct_value(); + let rets = (0..struct_value.get_type().count_fields()) + .map(|i| builder.build_extract_value(struct_value, i, "").unwrap()) + .collect::>(); + let func_sig_returns_bitwidths = func_sig + .results() + .iter() + .map(|ty| match ty { + Type::I32 | Type::F32 => 32, + Type::I64 | Type::F64 => 64, + Type::V128 => 128, + Type::ExternRef | Type::FuncRef => 64, /* pointer */ + }) + .collect::>(); + + match func_sig_returns_bitwidths.as_slice() { + [32, 64] | [64, 32] | [64, 64] => { + assert!(func_sig.results().len() == 2); + vec![rets[0], rets[1]] + } + [32, 32, _] + if rets[0].get_type() + == intrinsics.f32_ty.vec_type(2).as_basic_type_enum() => + { + assert!(func_sig.results().len() == 3); + let (rets0, rets1) = extract_f32x2(rets[0].into_vector_value()); + vec![rets0.into(), rets1.into(), rets[1]] + } + [32, 32, _] => { + assert!(func_sig.results().len() == 3); + let (low, high) = split_i64(rets[0].into_int_value()); + let low = casted(low.into(), func_sig.results()[0]); + let high = casted(high.into(), func_sig.results()[1]); + vec![low, high, rets[1]] + } + [64, 32, 32] + if rets[1].get_type() + == intrinsics.f32_ty.vec_type(2).as_basic_type_enum() => + { + assert!(func_sig.results().len() == 3); + let (rets1, rets2) = extract_f32x2(rets[1].into_vector_value()); + vec![rets[0], rets1.into(), rets2.into()] + } + [64, 32, 32] => { + assert!(func_sig.results().len() == 3); + let (rets1, rets2) = split_i64(rets[1].into_int_value()); + let rets1 = casted(rets1.into(), func_sig.results()[1]); + let rets2 = casted(rets2.into(), func_sig.results()[2]); + vec![rets[0], rets1, rets2] + } + [32, 32, 32, 32] => { + assert!(func_sig.results().len() == 4); + let (low0, high0) = if rets[0].get_type() + == intrinsics.f32_ty.vec_type(2).as_basic_type_enum() + { + let (x, y) = extract_f32x2(rets[0].into_vector_value()); + (x.into(), y.into()) + } else { + let (x, y) = split_i64(rets[0].into_int_value()); + (x.into(), y.into()) + }; + let (low1, high1) = if rets[1].get_type() + == intrinsics.f32_ty.vec_type(2).as_basic_type_enum() + { + let (x, y) = extract_f32x2(rets[1].into_vector_value()); + (x.into(), y.into()) + } else { + let (x, y) = split_i64(rets[1].into_int_value()); + (x.into(), y.into()) + }; + let low0 = casted(low0, func_sig.results()[0]); + let high0 = casted(high0, func_sig.results()[1]); + let low1 = casted(low1, func_sig.results()[2]); + let high1 = casted(high1, func_sig.results()[3]); + vec![low0, high0, low1, high1] + } + _ => unreachable!("expected an sret for this type"), + } + } else { + assert!(func_sig.results().len() == 1); + vec![basic_value] + } + } else { + assert!(call_site.count_arguments() > 0); // Either sret or vmctx. + if call_site + .get_enum_attribute( + AttributeLoc::Param(0), + Attribute::get_named_enum_kind_id("sret"), + ) + .is_some() + { + let sret = call_site + .try_as_basic_value() + .right() + .unwrap() + .get_operand(0) + .unwrap() + .left() + .unwrap() + .into_pointer_value(); + let struct_value = builder.build_load(sret, "").into_struct_value(); + let mut rets: Vec<_> = Vec::new(); + for i in 0..struct_value.get_type().count_fields() { + let value = builder.build_extract_value(struct_value, i, "").unwrap(); + rets.push(value); + } + assert!(func_sig.results().len() == rets.len()); + rets + } else { + assert!(func_sig.results().is_empty()); + vec![] + } + } + } + + fn is_sret(&self, func_sig: &FuncSig) -> Result { + let func_sig_returns_bitwidths = func_sig + .results() + .iter() + .map(|ty| match ty { + Type::I32 | Type::F32 => 32, + Type::I64 | Type::F64 => 64, + Type::V128 => 128, + Type::ExternRef | Type::FuncRef => 64, /* pointer */ + }) + .collect::>(); + + Ok(!matches!( + func_sig_returns_bitwidths.as_slice(), + [] | [_] + | [32, 32] + | [32, 64] + | [64, 32] + | [64, 64] + | [32, 32, 32] + | [32, 32, 64] + | [64, 32, 32] + | [32, 32, 32, 32] + )) + } + + fn pack_values_for_register_return<'ctx>( + &self, + intrinsics: &Intrinsics<'ctx>, + builder: &Builder<'ctx>, + values: &[BasicValueEnum<'ctx>], + func_type: &FunctionType<'ctx>, + ) -> Result, CompileError> { + let is_32 = |value: BasicValueEnum| { + (value.is_int_value() && value.into_int_value().get_type() == intrinsics.i32_ty) + || (value.is_float_value() + && value.into_float_value().get_type() == intrinsics.f32_ty) + }; + let is_64 = |value: BasicValueEnum| { + (value.is_int_value() && value.into_int_value().get_type() == intrinsics.i64_ty) + || (value.is_float_value() + && value.into_float_value().get_type() == intrinsics.f64_ty) + }; + let is_f32 = |value: BasicValueEnum| { + value.is_float_value() && value.into_float_value().get_type() == intrinsics.f32_ty + }; + + let pack_i32s = |low: BasicValueEnum<'ctx>, high: BasicValueEnum<'ctx>| { + assert!(low.get_type() == intrinsics.i32_ty.as_basic_type_enum()); + assert!(high.get_type() == intrinsics.i32_ty.as_basic_type_enum()); + let (low, high) = (low.into_int_value(), high.into_int_value()); + let low = builder.build_int_z_extend(low, intrinsics.i64_ty, ""); + let high = builder.build_int_z_extend(high, intrinsics.i64_ty, ""); + let high = builder.build_left_shift(high, intrinsics.i64_ty.const_int(32, false), ""); + builder.build_or(low, high, "").as_basic_value_enum() + }; + + let pack_f32s = |first: BasicValueEnum<'ctx>, + second: BasicValueEnum<'ctx>| + -> BasicValueEnum<'ctx> { + assert!(first.get_type() == intrinsics.f32_ty.as_basic_type_enum()); + assert!(second.get_type() == intrinsics.f32_ty.as_basic_type_enum()); + let (first, second) = (first.into_float_value(), second.into_float_value()); + let vec_ty = intrinsics.f32_ty.vec_type(2); + let vec = + builder.build_insert_element(vec_ty.get_undef(), first, intrinsics.i32_zero, ""); + builder + .build_insert_element(vec, second, intrinsics.i32_ty.const_int(1, false), "") + .as_basic_value_enum() + }; + + let build_struct = |ty: StructType<'ctx>, values: &[BasicValueEnum<'ctx>]| { + let mut struct_value = ty.get_undef(); + for (i, v) in values.iter().enumerate() { + struct_value = builder + .build_insert_value(struct_value, *v, i as u32, "") + .unwrap() + .into_struct_value(); + } + struct_value.as_basic_value_enum() + }; + + Ok(match *values { + [one_value] => one_value, + [v1, v2] if is_f32(v1) && is_f32(v2) => pack_f32s(v1, v2), + [v1, v2] if is_32(v1) && is_32(v2) => { + let v1 = builder.build_bitcast(v1, intrinsics.i32_ty, ""); + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + pack_i32s(v1, v2) + } + [v1, v2] => { + assert!(!(is_32(v1) && is_32(v2))); + build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[v1, v2], + ) + } + [v1, v2, v3] if is_f32(v1) && is_f32(v2) => build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[pack_f32s(v1, v2), v3], + ), + [v1, v2, v3] if is_32(v1) && is_32(v2) => { + let v1 = builder.build_bitcast(v1, intrinsics.i32_ty, ""); + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[pack_i32s(v1, v2), v3], + ) + } + [v1, v2, v3] if is_64(v1) && is_f32(v2) && is_f32(v3) => build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[v1, pack_f32s(v2, v3)], + ), + [v1, v2, v3] if is_64(v1) && is_32(v2) && is_32(v3) => { + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + let v3 = builder.build_bitcast(v3, intrinsics.i32_ty, ""); + build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[v1, pack_i32s(v2, v3)], + ) + } + [v1, v2, v3, v4] if is_32(v1) && is_32(v2) && is_32(v3) && is_32(v4) => { + let v1v2_pack = if is_f32(v1) && is_f32(v2) { + pack_f32s(v1, v2) + } else { + let v1 = builder.build_bitcast(v1, intrinsics.i32_ty, ""); + let v2 = builder.build_bitcast(v2, intrinsics.i32_ty, ""); + pack_i32s(v1, v2) + }; + let v3v4_pack = if is_f32(v3) && is_f32(v4) { + pack_f32s(v3, v4) + } else { + let v3 = builder.build_bitcast(v3, intrinsics.i32_ty, ""); + let v4 = builder.build_bitcast(v4, intrinsics.i32_ty, ""); + pack_i32s(v3, v4) + }; + build_struct( + func_type.get_return_type().unwrap().into_struct_type(), + &[v1v2_pack, v3v4_pack], + ) + } + _ => { + unreachable!("called to perform register return on struct return or void function") + } + }) + } +} diff --git a/lib/compiler-llvm/src/compiler.rs b/lib/compiler-llvm/src/compiler.rs new file mode 100644 index 0000000000..48136de807 --- /dev/null +++ b/lib/compiler-llvm/src/compiler.rs @@ -0,0 +1,396 @@ +use crate::config::LLVM; +use crate::trampoline::FuncTrampoline; +use crate::translator::FuncTranslator; +use crate::CompiledKind; +use inkwell::context::Context; +use inkwell::memory_buffer::MemoryBuffer; +use inkwell::module::{Linkage, Module}; +use inkwell::targets::FileType; +use inkwell::DLLStorageClass; +use rayon::iter::ParallelBridge; +use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; +use wasmer_compiler::{ + Architecture, Compilation, CompileError, CompileModuleInfo, Compiler, CustomSection, + CustomSectionProtection, Dwarf, FunctionBodyData, ModuleTranslationState, RelocationTarget, + SectionBody, SectionIndex, Symbol, SymbolRegistry, Target, TrampolinesSection, +}; +use wasmer_types::entity::{EntityRef, PrimaryMap}; +use wasmer_types::{FunctionIndex, LocalFunctionIndex, SignatureIndex}; + +//use std::sync::Mutex; + +/// A compiler that compiles a WebAssembly module with LLVM, translating the Wasm to LLVM IR, +/// optimizing it and then translating to assembly. +pub struct LLVMCompiler { + config: LLVM, +} + +impl LLVMCompiler { + /// Creates a new LLVM compiler + pub fn new(config: LLVM) -> LLVMCompiler { + LLVMCompiler { config } + } + + /// Gets the config for this Compiler + fn config(&self) -> &LLVM { + &self.config + } +} + +struct ShortNames {} + +impl SymbolRegistry for ShortNames { + fn symbol_to_name(&self, symbol: Symbol) -> String { + match symbol { + Symbol::LocalFunction(index) => format!("f{}", index.index()), + Symbol::Section(index) => format!("s{}", index.index()), + Symbol::FunctionCallTrampoline(index) => format!("t{}", index.index()), + Symbol::DynamicFunctionTrampoline(index) => format!("d{}", index.index()), + } + } + + fn name_to_symbol(&self, name: &str) -> Option { + if name.len() < 2 { + return None; + } + let (ty, idx) = name.split_at(1); + let idx = idx.parse::().ok()?; + match ty.chars().next().unwrap() { + 'f' => Some(Symbol::LocalFunction(LocalFunctionIndex::from_u32(idx))), + 's' => Some(Symbol::Section(SectionIndex::from_u32(idx))), + 't' => Some(Symbol::FunctionCallTrampoline(SignatureIndex::from_u32( + idx, + ))), + 'd' => Some(Symbol::DynamicFunctionTrampoline(FunctionIndex::from_u32( + idx, + ))), + _ => None, + } + } +} + +impl LLVMCompiler { + fn compile_native_object<'data, 'module>( + &self, + target: &Target, + compile_info: &'module CompileModuleInfo, + module_translation: &ModuleTranslationState, + function_body_inputs: &PrimaryMap>, + symbol_registry: &dyn SymbolRegistry, + wasmer_metadata: &[u8], + ) -> Result, CompileError> { + let target_machine = self.config().target_machine(target); + let ctx = Context::create(); + + // TODO: https:/github.com/rayon-rs/rayon/issues/822 + + let merged_bitcode = function_body_inputs.into_iter().par_bridge().map_init( + || { + let target_machine = self.config().target_machine(target); + FuncTranslator::new(target_machine) + }, + |func_translator, (i, input)| { + let module = func_translator.translate_to_module( + &compile_info.module, + module_translation, + &i, + input, + self.config(), + &compile_info.memory_styles, + &compile_info.table_styles, + symbol_registry, + )?; + Ok(module.write_bitcode_to_memory().as_slice().to_vec()) + }, + ); + + let trampolines_bitcode = compile_info.module.signatures.iter().par_bridge().map_init( + || { + let target_machine = self.config().target_machine(target); + FuncTrampoline::new(target_machine) + }, + |func_trampoline, (i, sig)| { + let name = symbol_registry.symbol_to_name(Symbol::FunctionCallTrampoline(i)); + let module = func_trampoline.trampoline_to_module(sig, self.config(), &name)?; + Ok(module.write_bitcode_to_memory().as_slice().to_vec()) + }, + ); + + let dynamic_trampolines_bitcode = + compile_info.module.functions.iter().par_bridge().map_init( + || { + let target_machine = self.config().target_machine(target); + ( + FuncTrampoline::new(target_machine), + &compile_info.module.signatures, + ) + }, + |(func_trampoline, signatures), (i, sig)| { + let sig = &signatures[*sig]; + let name = symbol_registry.symbol_to_name(Symbol::DynamicFunctionTrampoline(i)); + let module = + func_trampoline.dynamic_trampoline_to_module(sig, self.config(), &name)?; + Ok(module.write_bitcode_to_memory().as_slice().to_vec()) + }, + ); + + let merged_bitcode = merged_bitcode + .chain(trampolines_bitcode) + .chain(dynamic_trampolines_bitcode) + .collect::, CompileError>>()? + .into_par_iter() + .reduce_with(|bc1, bc2| { + let ctx = Context::create(); + let membuf = MemoryBuffer::create_from_memory_range(&bc1, ""); + let m1 = Module::parse_bitcode_from_buffer(&membuf, &ctx).unwrap(); + let membuf = MemoryBuffer::create_from_memory_range(&bc2, ""); + let m2 = Module::parse_bitcode_from_buffer(&membuf, &ctx).unwrap(); + m1.link_in_module(m2).unwrap(); + m1.write_bitcode_to_memory().as_slice().to_vec() + }); + let merged_module = if let Some(bc) = merged_bitcode { + let membuf = MemoryBuffer::create_from_memory_range(&bc, ""); + Module::parse_bitcode_from_buffer(&membuf, &ctx).unwrap() + } else { + ctx.create_module("") + }; + + let i8_ty = ctx.i8_type(); + let metadata_init = i8_ty.const_array( + wasmer_metadata + .iter() + .map(|v| i8_ty.const_int(*v as u64, false)) + .collect::>() + .as_slice(), + ); + let metadata_gv = + merged_module.add_global(metadata_init.get_type(), None, "WASMER_METADATA"); + metadata_gv.set_initializer(&metadata_init); + metadata_gv.set_linkage(Linkage::DLLExport); + metadata_gv.set_dll_storage_class(DLLStorageClass::Export); + + if self.config().enable_verifier { + merged_module.verify().unwrap(); + } + + let memory_buffer = target_machine + .write_to_memory_buffer(&merged_module, FileType::Object) + .unwrap(); + if let Some(ref callbacks) = self.config.callbacks { + callbacks.obj_memory_buffer(&CompiledKind::Module, &memory_buffer); + } + + Ok(memory_buffer.as_slice().to_vec()) + } +} + +impl Compiler for LLVMCompiler { + fn experimental_native_compile_module<'data, 'module>( + &self, + target: &Target, + compile_info: &'module CompileModuleInfo, + module_translation: &ModuleTranslationState, + // The list of function bodies + function_body_inputs: &PrimaryMap>, + symbol_registry: &dyn SymbolRegistry, + // The metadata to inject into the wasmer_metadata section of the object file. + wasmer_metadata: &[u8], + ) -> Option, CompileError>> { + Some(self.compile_native_object( + target, + compile_info, + module_translation, + function_body_inputs, + symbol_registry, + wasmer_metadata, + )) + } + + /// Compile the module using LLVM, producing a compilation result with + /// associated relocations. + fn compile_module<'data, 'module>( + &self, + target: &Target, + compile_info: &'module CompileModuleInfo, + module_translation: &ModuleTranslationState, + function_body_inputs: PrimaryMap>, + ) -> Result { + //let data = Arc::new(Mutex::new(0)); + let memory_styles = &compile_info.memory_styles; + let table_styles = &compile_info.table_styles; + + let module = &compile_info.module; + + // TODO: merge constants in sections. + + let mut module_custom_sections = PrimaryMap::new(); + let mut frame_section_bytes = vec![]; + let mut frame_section_relocations = vec![]; + let functions = function_body_inputs + .iter() + .collect::)>>() + .par_iter() + .map_init( + || { + let target_machine = self.config().target_machine(target); + FuncTranslator::new(target_machine) + }, + |func_translator, (i, input)| { + // TODO: remove (to serialize) + //let _data = data.lock().unwrap(); + func_translator.translate( + module, + module_translation, + i, + input, + self.config(), + memory_styles, + &table_styles, + &ShortNames {}, + ) + }, + ) + .collect::, CompileError>>()? + .into_iter() + .map(|mut compiled_function| { + let first_section = module_custom_sections.len() as u32; + for (section_index, custom_section) in compiled_function.custom_sections.iter() { + // TODO: remove this call to clone() + let mut custom_section = custom_section.clone(); + for mut reloc in &mut custom_section.relocations { + if let RelocationTarget::CustomSection(index) = reloc.reloc_target { + reloc.reloc_target = RelocationTarget::CustomSection( + SectionIndex::from_u32(first_section + index.as_u32()), + ) + } + } + if compiled_function + .eh_frame_section_indices + .contains(§ion_index) + { + let offset = frame_section_bytes.len() as u32; + for mut reloc in &mut custom_section.relocations { + reloc.offset += offset; + } + frame_section_bytes.extend_from_slice(custom_section.bytes.as_slice()); + frame_section_relocations.extend(custom_section.relocations); + // TODO: we do this to keep the count right, remove it. + module_custom_sections.push(CustomSection { + protection: CustomSectionProtection::Read, + bytes: SectionBody::new_with_vec(vec![]), + relocations: vec![], + }); + } else { + module_custom_sections.push(custom_section); + } + } + for mut reloc in &mut compiled_function.compiled_function.relocations { + if let RelocationTarget::CustomSection(index) = reloc.reloc_target { + reloc.reloc_target = RelocationTarget::CustomSection( + SectionIndex::from_u32(first_section + index.as_u32()), + ) + } + } + compiled_function.compiled_function + }) + .collect::>(); + + let trampolines = match target.triple().architecture { + Architecture::Aarch64(_) => { + let nj = 16; + // We create a jump to an absolute 64bits address + // using x17 as a scratch register, SystemV declare both x16 and x17 as Intra-Procedural scratch register + // but Apple ask to just not use x16 + // LDR x17, #8 51 00 00 58 + // BR x17 20 02 1f d6 + // JMPADDR 00 00 00 00 00 00 00 00 + let onejump = [ + 0x51, 0x00, 0x00, 0x58, 0x20, 0x02, 0x1f, 0xd6, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let trampolines = Some(TrampolinesSection::new( + SectionIndex::from_u32(module_custom_sections.len() as u32), + nj, + onejump.len(), + )); + let mut alljmps = vec![]; + for _ in 0..nj { + alljmps.extend(onejump.iter().copied()); + } + module_custom_sections.push(CustomSection { + protection: CustomSectionProtection::ReadExecute, + bytes: SectionBody::new_with_vec(alljmps), + relocations: vec![], + }); + trampolines + } + _ => None, + }; + + let dwarf = if !frame_section_bytes.is_empty() { + let dwarf = Some(Dwarf::new(SectionIndex::from_u32( + module_custom_sections.len() as u32, + ))); + // Terminating zero-length CIE. + frame_section_bytes.extend(vec![ + 0x00, 0x00, 0x00, 0x00, // Length + 0x00, 0x00, 0x00, 0x00, // CIE ID + 0x10, // Version (must be 1) + 0x00, // Augmentation data + 0x00, // Code alignment factor + 0x00, // Data alignment factor + 0x00, // Return address register + 0x00, 0x00, 0x00, // Padding to a multiple of 4 bytes + ]); + module_custom_sections.push(CustomSection { + protection: CustomSectionProtection::Read, + bytes: SectionBody::new_with_vec(frame_section_bytes), + relocations: frame_section_relocations, + }); + dwarf + } else { + None + }; + + let function_call_trampolines = module + .signatures + .values() + .collect::>() + .par_iter() + .map_init( + || { + let target_machine = self.config().target_machine(target); + FuncTrampoline::new(target_machine) + }, + |func_trampoline, sig| func_trampoline.trampoline(sig, self.config(), ""), + ) + .collect::>() + .into_iter() + .collect::, CompileError>>()?; + + let dynamic_function_trampolines = module + .imported_function_types() + .collect::>() + .par_iter() + .map_init( + || { + let target_machine = self.config().target_machine(target); + FuncTrampoline::new(target_machine) + }, + |func_trampoline, func_type| { + func_trampoline.dynamic_trampoline(&func_type, self.config(), "") + }, + ) + .collect::, CompileError>>()? + .into_iter() + .collect::>(); + + Ok(Compilation::new( + functions, + module_custom_sections, + function_call_trampolines, + dynamic_function_trampolines, + dwarf, + trampolines, + )) + } +} diff --git a/lib/compiler-llvm/src/config.rs b/lib/compiler-llvm/src/config.rs new file mode 100644 index 0000000000..596e0fc018 --- /dev/null +++ b/lib/compiler-llvm/src/config.rs @@ -0,0 +1,220 @@ +use crate::compiler::LLVMCompiler; +use inkwell::targets::{ + CodeModel, InitializationConfig, RelocMode, Target as InkwellTarget, TargetMachine, + TargetTriple, +}; +pub use inkwell::OptimizationLevel as LLVMOptLevel; +use itertools::Itertools; +use std::fmt::Debug; +use std::sync::Arc; +use target_lexicon::Architecture; +use wasmer_compiler::{Compiler, CompilerConfig, Target, Triple}; +use wasmer_types::{FunctionType, LocalFunctionIndex}; + +/// The InkWell ModuleInfo type +pub type InkwellModule<'ctx> = inkwell::module::Module<'ctx>; + +/// The InkWell MemoryBuffer type +pub type InkwellMemoryBuffer = inkwell::memory_buffer::MemoryBuffer; + +/// The compiled function kind, used for debugging in the `LLVMCallbacks`. +#[derive(Debug, Clone)] +pub enum CompiledKind { + // A locally-defined function in the Wasm file. + Local(LocalFunctionIndex), + // A function call trampoline for a given signature. + FunctionCallTrampoline(FunctionType), + // A dynamic function trampoline for a given signature. + DynamicFunctionTrampoline(FunctionType), + // An entire Wasm module. + Module, +} + +/// Callbacks to the different LLVM compilation phases. +pub trait LLVMCallbacks: Debug + Send + Sync { + fn preopt_ir(&self, function: &CompiledKind, module: &InkwellModule); + fn postopt_ir(&self, function: &CompiledKind, module: &InkwellModule); + fn obj_memory_buffer(&self, function: &CompiledKind, memory_buffer: &InkwellMemoryBuffer); +} + +#[derive(Debug, Clone)] +pub struct LLVM { + pub(crate) enable_nan_canonicalization: bool, + pub(crate) enable_verifier: bool, + pub(crate) opt_level: LLVMOptLevel, + is_pic: bool, + pub(crate) callbacks: Option>, +} + +impl LLVM { + /// Creates a new configuration object with the default configuration + /// specified. + pub fn new() -> Self { + Self { + enable_nan_canonicalization: false, + enable_verifier: false, + opt_level: LLVMOptLevel::Aggressive, + is_pic: false, + callbacks: None, + } + } + + /// The optimization levels when optimizing the IR. + pub fn opt_level(&mut self, opt_level: LLVMOptLevel) -> &mut Self { + self.opt_level = opt_level; + self + } + + /// Callbacks that will triggered in the different compilation + /// phases in LLVM. + pub fn callbacks(&mut self, callbacks: Option>) -> &mut Self { + self.callbacks = callbacks; + self + } + + fn reloc_mode(&self) -> RelocMode { + if self.is_pic { + RelocMode::PIC + } else { + RelocMode::Static + } + } + + fn code_model(&self) -> CodeModel { + // We normally use the large code model, but when targeting shared + // objects, we are required to use PIC. If we use PIC anyways, we lose + // any benefit from large code model and there's some cost on all + // platforms, plus some platforms (MachO) don't support PIC + large + // at all. + if self.is_pic { + CodeModel::Small + } else { + CodeModel::Large + } + } + + fn target_triple(&self, target: &Target) -> TargetTriple { + // Hack: we're using is_pic to determine whether this is a native + // build or not. + let operating_system = if target.triple().operating_system + == wasmer_compiler::OperatingSystem::Darwin + && !self.is_pic + { + // LLVM detects static relocation + darwin + 64-bit and + // force-enables PIC because MachO doesn't support that + // combination. They don't check whether they're targeting + // MachO, they check whether the OS is set to Darwin. + // + // Since both linux and darwin use SysV ABI, this should work. + // but not in the case of Aarch64, there the ABI is slightly different + match target.triple().architecture { + _ => wasmer_compiler::OperatingSystem::Linux, + } + } else { + target.triple().operating_system + }; + let binary_format = if self.is_pic { + target.triple().binary_format + } else { + target_lexicon::BinaryFormat::Elf + }; + let triple = Triple { + architecture: target.triple().architecture, + vendor: target.triple().vendor.clone(), + operating_system, + environment: target.triple().environment, + binary_format, + }; + TargetTriple::create(&triple.to_string()) + } + + /// Generates the target machine for the current target + pub fn target_machine(&self, target: &Target) -> TargetMachine { + let triple = target.triple(); + let cpu_features = &target.cpu_features(); + + match triple.architecture { + Architecture::X86_64 | Architecture::X86_32(_) => { + InkwellTarget::initialize_x86(&InitializationConfig { + asm_parser: true, + asm_printer: true, + base: true, + disassembler: true, + info: true, + machine_code: true, + }) + } + Architecture::Aarch64(_) => InkwellTarget::initialize_aarch64(&InitializationConfig { + asm_parser: true, + asm_printer: true, + base: true, + disassembler: true, + info: true, + machine_code: true, + }), + // Architecture::Arm(_) => InkwellTarget::initialize_arm(&InitializationConfig { + // asm_parser: true, + // asm_printer: true, + // base: true, + // disassembler: true, + // info: true, + // machine_code: true, + // }), + _ => unimplemented!("target {} not yet supported in Wasmer", triple), + } + + // The CPU features formatted as LLVM strings + // We can safely map to gcc-like features as the CPUFeatures + // are compliant with the same string representations as gcc. + let llvm_cpu_features = cpu_features + .iter() + .map(|feature| format!("+{}", feature.to_string())) + .join(","); + + let target_triple = self.target_triple(&target); + let llvm_target = InkwellTarget::from_triple(&target_triple).unwrap(); + llvm_target + .create_target_machine( + &target_triple, + "generic", + &llvm_cpu_features, + self.opt_level, + self.reloc_mode(), + self.code_model(), + ) + .unwrap() + } +} + +impl CompilerConfig for LLVM { + /// Emit code suitable for dlopen. + fn enable_pic(&mut self) { + // TODO: although we can emit PIC, the object file parser does not yet + // support all the relocations. + self.is_pic = true; + } + + /// Whether to verify compiler IR. + fn enable_verifier(&mut self) { + self.enable_verifier = true; + } + + fn enable_nan_canonicalization(&mut self) { + self.enable_nan_canonicalization = true; + } + + fn canonicalize_nans(&mut self, enable: bool) { + self.enable_nan_canonicalization = enable; + } + + /// Transform it into the compiler. + fn compiler(self: Box) -> Box { + Box::new(LLVMCompiler::new(*self)) + } +} + +impl Default for LLVM { + fn default() -> LLVM { + Self::new() + } +} diff --git a/lib/compiler-llvm/src/lib.rs b/lib/compiler-llvm/src/lib.rs new file mode 100644 index 0000000000..c1d8dc0acc --- /dev/null +++ b/lib/compiler-llvm/src/lib.rs @@ -0,0 +1,26 @@ +#![deny( + nonstandard_style, + unused_imports, + unused_mut, + unused_variables, + unused_unsafe, + unreachable_patterns +)] +#![cfg_attr( + all(not(target_os = "windows"), not(target_arch = "aarch64")), + deny(dead_code) +)] +#![doc(html_favicon_url = "https://wasmer.io/images/icons/favicon-32x32.png")] +#![doc(html_logo_url = "https://github.com/wasmerio.png?size=200")] + +mod abi; +mod compiler; +mod config; +mod object_file; +mod trampoline; +mod translator; + +pub use crate::compiler::LLVMCompiler; +pub use crate::config::{ + CompiledKind, InkwellMemoryBuffer, InkwellModule, LLVMCallbacks, LLVMOptLevel, LLVM, +}; diff --git a/lib/compiler-llvm/src/object_file.rs b/lib/compiler-llvm/src/object_file.rs new file mode 100644 index 0000000000..87b2186644 --- /dev/null +++ b/lib/compiler-llvm/src/object_file.rs @@ -0,0 +1,355 @@ +use object::{Object, ObjectSection, ObjectSymbol}; + +use std::collections::{HashMap, HashSet}; +use std::convert::TryInto; +use std::num::TryFromIntError; + +use wasmer_compiler::{ + CompileError, CompiledFunctionFrameInfo, CustomSection, CustomSectionProtection, + CustomSections, FunctionAddressMap, FunctionBody, InstructionAddressMap, Relocation, + RelocationKind, RelocationTarget, SectionBody, SectionIndex, SourceLoc, +}; +use wasmer_types::entity::{PrimaryMap, SecondaryMap}; +use wasmer_vm::libcalls::LibCall; + +fn map_tryfromint_err(error: TryFromIntError) -> CompileError { + CompileError::Codegen(format!("int doesn't fit: {}", error)) +} + +fn map_object_err(error: object::read::Error) -> CompileError { + CompileError::Codegen(format!("error parsing object file: {}", error)) +} + +pub struct CompiledFunction { + pub compiled_function: wasmer_compiler::CompiledFunction, + pub custom_sections: CustomSections, + pub eh_frame_section_indices: Vec, +} + +pub fn load_object_file( + contents: &[u8], + root_section: &str, + root_section_reloc_target: RelocationTarget, + mut symbol_name_to_relocation_target: F, +) -> Result +where + F: FnMut(&str) -> Result, CompileError>, +{ + // TODO: use perfect hash function? + let mut libcalls = HashMap::new(); + libcalls.insert("ceilf".to_string(), LibCall::CeilF32); + libcalls.insert("ceil".to_string(), LibCall::CeilF64); + libcalls.insert("floorf".to_string(), LibCall::FloorF32); + libcalls.insert("floor".to_string(), LibCall::FloorF64); + libcalls.insert("nearbyintf".to_string(), LibCall::NearestF32); + libcalls.insert("nearbyint".to_string(), LibCall::NearestF64); + libcalls.insert("truncf".to_string(), LibCall::TruncF32); + libcalls.insert("trunc".to_string(), LibCall::TruncF64); + libcalls.insert("wasmer_vm_f32_ceil".to_string(), LibCall::CeilF32); + libcalls.insert("wasmer_vm_f64_ceil".to_string(), LibCall::CeilF64); + libcalls.insert("wasmer_vm_f32_floor".to_string(), LibCall::FloorF32); + libcalls.insert("wasmer_vm_f64_floor".to_string(), LibCall::FloorF64); + libcalls.insert("wasmer_vm_f32_nearest".to_string(), LibCall::NearestF32); + libcalls.insert("wasmer_vm_f64_nearest".to_string(), LibCall::NearestF64); + libcalls.insert("wasmer_vm_f32_trunc".to_string(), LibCall::TruncF32); + libcalls.insert("wasmer_vm_f64_trunc".to_string(), LibCall::TruncF64); + libcalls.insert("wasmer_vm_memory32_size".to_string(), LibCall::Memory32Size); + libcalls.insert( + "wasmer_vm_imported_memory32_size".to_string(), + LibCall::ImportedMemory32Size, + ); + libcalls.insert("wasmer_vm_table_copy".to_string(), LibCall::TableCopy); + libcalls.insert("wasmer_vm_table_init".to_string(), LibCall::TableInit); + libcalls.insert("wasmer_vm_table_fill".to_string(), LibCall::TableFill); + libcalls.insert("wasmer_vm_table_size".to_string(), LibCall::TableSize); + libcalls.insert( + "wasmer_vm_imported_table_size".to_string(), + LibCall::ImportedTableSize, + ); + libcalls.insert("wasmer_vm_table_get".to_string(), LibCall::TableGet); + libcalls.insert( + "wasmer_vm_imported_table_get".to_string(), + LibCall::ImportedTableGet, + ); + libcalls.insert("wasmer_vm_table_set".to_string(), LibCall::TableSet); + libcalls.insert( + "wasmer_vm_imported_table_set".to_string(), + LibCall::ImportedTableSet, + ); + libcalls.insert("wasmer_vm_table_grow".to_string(), LibCall::TableGrow); + libcalls.insert( + "wasmer_vm_imported_table_grow".to_string(), + LibCall::ImportedTableGrow, + ); + libcalls.insert("wasmer_vm_func_ref".to_string(), LibCall::FuncRef); + libcalls.insert("wasmer_vm_elem_drop".to_string(), LibCall::ElemDrop); + libcalls.insert("wasmer_vm_memory32_copy".to_string(), LibCall::Memory32Copy); + libcalls.insert( + "wasmer_vm_imported_memory32_copy".to_string(), + LibCall::ImportedMemory32Copy, + ); + libcalls.insert("wasmer_vm_memory32_fill".to_string(), LibCall::Memory32Fill); + libcalls.insert( + "wasmer_vm_imported_memory32_fill".to_string(), + LibCall::ImportedMemory32Fill, + ); + libcalls.insert("wasmer_vm_memory32_init".to_string(), LibCall::Memory32Init); + libcalls.insert("wasmer_vm_data_drop".to_string(), LibCall::DataDrop); + libcalls.insert("wasmer_vm_raise_trap".to_string(), LibCall::RaiseTrap); + libcalls.insert("wasmer_vm_probestack".to_string(), LibCall::Probestack); + + let elf = object::File::parse(contents).map_err(map_object_err)?; + + let mut visited: HashSet = HashSet::new(); + let mut worklist: Vec = Vec::new(); + let mut section_targets: HashMap = HashMap::new(); + + let root_section_index = elf + .section_by_name(root_section) + .ok_or_else(|| CompileError::Codegen(format!("no section named {}", root_section)))? + .index(); + + let mut section_to_custom_section = HashMap::new(); + + section_targets.insert(root_section_index, root_section_reloc_target); + + let mut next_custom_section: u32 = 0; + let mut elf_section_to_target = |elf_section_index: object::read::SectionIndex| { + *section_targets.entry(elf_section_index).or_insert_with(|| { + let next = SectionIndex::from_u32(next_custom_section); + section_to_custom_section.insert(elf_section_index, next); + let target = RelocationTarget::CustomSection(next); + next_custom_section += 1; + target + }) + }; + + // From elf section index to list of Relocations. Although we use a Vec, + // the order of relocations is not important. + let mut relocations: HashMap> = HashMap::new(); + + // Each iteration of this loop pulls a section and the relocations + // relocations that apply to it. We begin with the ".root_section" + // section, and then parse all relocation sections that apply to that + // section. Those relocations may refer to additional sections which we + // then add to the worklist until we've visited the closure of + // everything needed to run the code in ".root_section". + // + // `worklist` is the list of sections we have yet to visit. It never + // contains any duplicates or sections we've already visited. `visited` + // contains all the sections we've ever added to the worklist in a set + // so that we can quickly check whether a section is new before adding + // it to worklist. `section_to_custom_section` is filled in with all + // the sections we want to include. + worklist.push(root_section_index); + visited.insert(root_section_index); + + // Also add any .eh_frame sections. + let mut eh_frame_section_indices = vec![]; + for section in elf.sections() { + if section.kind() == object::SectionKind::Elf(object::elf::SHT_X86_64_UNWIND) { + let index = section.index(); + worklist.push(index); + visited.insert(index); + eh_frame_section_indices.push(index); + // This allocates a custom section index for the ELF section. + elf_section_to_target(index); + } + } + + while let Some(section_index) = worklist.pop() { + for (offset, reloc) in elf + .section_by_index(section_index) + .map_err(map_object_err)? + .relocations() + { + let kind = match (reloc.kind(), reloc.size()) { + (object::RelocationKind::Absolute, 64) => RelocationKind::Abs8, + (object::RelocationKind::Elf(object::elf::R_X86_64_PC64), 0) => { + RelocationKind::X86PCRel8 + } + (object::RelocationKind::Elf(object::elf::R_AARCH64_MOVW_UABS_G0_NC), 0) => { + RelocationKind::Arm64Movw0 + } + (object::RelocationKind::Elf(object::elf::R_AARCH64_MOVW_UABS_G1_NC), 0) => { + RelocationKind::Arm64Movw1 + } + (object::RelocationKind::Elf(object::elf::R_AARCH64_MOVW_UABS_G2_NC), 0) => { + RelocationKind::Arm64Movw2 + } + (object::RelocationKind::Elf(object::elf::R_AARCH64_MOVW_UABS_G3), 0) => { + RelocationKind::Arm64Movw3 + } + (object::RelocationKind::PltRelative, 26) => RelocationKind::Arm64Call, + _ => { + return Err(CompileError::Codegen(format!( + "unknown relocation {:?}", + reloc + ))); + } + }; + let addend = reloc.addend(); + let target = match reloc.target() { + object::read::RelocationTarget::Symbol(index) => { + let symbol = elf.symbol_by_index(index).map_err(map_object_err)?; + let symbol_name = symbol.name().map_err(map_object_err)?; + if symbol.kind() == object::SymbolKind::Section { + match symbol.section() { + object::SymbolSection::Section(section_index) => { + if section_index == root_section_index { + root_section_reloc_target + } else { + if visited.insert(section_index) { + worklist.push(section_index); + } + elf_section_to_target(section_index) + } + } + _ => { + return Err(CompileError::Codegen(format!( + "relocation targets unknown section {:?}", + reloc + ))); + } + } + // Maybe a libcall then? + } else if let Some(libcall) = libcalls.get(symbol_name) { + RelocationTarget::LibCall(*libcall) + } else if let Some(reloc_target) = + symbol_name_to_relocation_target(symbol_name)? + { + reloc_target + } else { + return Err(CompileError::Codegen(format!( + "relocation targets unknown symbol {:?}", + reloc + ))); + } + } + + object::read::RelocationTarget::Section(index) => { + if index == root_section_index { + root_section_reloc_target + } else { + if visited.insert(index) { + worklist.push(index); + } + elf_section_to_target(index) + } + } + + object::read::RelocationTarget::Absolute => { + // Wasm-produced object files should never have absolute + // addresses in them because none of the parts of the Wasm + // VM, nor the generated code are loaded at fixed addresses. + return Err(CompileError::Codegen(format!( + "relocation targets absolute address {:?}", + reloc + ))); + } + + // `object::read::RelocationTarget` is a + // non-exhaustive enum (`#[non_exhaustive]`), so it + // could have additional variants added in the + // future. Therefore, when matching against variants + // of non-exhaustive enums, an extra wildcard arm must + // be added to account for any future variants. + t => { + return Err(CompileError::Codegen(format!( + "relocation target is unknown `{:?}`", + t + ))); + } + }; + relocations + .entry(section_index) + .or_default() + .push(Relocation { + kind, + reloc_target: target, + offset: offset.try_into().map_err(map_tryfromint_err)?, + addend, + }); + } + } + + let eh_frame_section_indices = eh_frame_section_indices + .iter() + .map(|index| { + section_to_custom_section.get(index).map_or_else( + || { + Err(CompileError::Codegen(format!( + ".eh_frame section with index={:?} was never loaded", + index + ))) + }, + |idx| Ok(*idx), + ) + }) + .collect::, _>>()?; + + let mut custom_sections = section_to_custom_section + .iter() + .map(|(elf_section_index, custom_section_index)| { + ( + custom_section_index, + CustomSection { + protection: CustomSectionProtection::Read, + bytes: SectionBody::new_with_vec( + elf.section_by_index(*elf_section_index) + .unwrap() + .data() + .unwrap() + .to_vec(), + ), + relocations: relocations + .remove_entry(elf_section_index) + .map_or(vec![], |(_, v)| v), + }, + ) + }) + .collect::>(); + custom_sections.sort_unstable_by_key(|a| a.0); + let custom_sections = custom_sections + .into_iter() + .map(|(_, v)| v) + .collect::>(); + + let function_body = FunctionBody { + body: elf + .section_by_index(root_section_index) + .unwrap() + .data() + .unwrap() + .to_vec(), + unwind_info: None, + }; + + let address_map = FunctionAddressMap { + instructions: vec![InstructionAddressMap { + srcloc: SourceLoc::default(), + code_offset: 0, + code_len: function_body.body.len(), + }], + start_srcloc: SourceLoc::default(), + end_srcloc: SourceLoc::default(), + body_offset: 0, + body_len: function_body.body.len(), + }; + + Ok(CompiledFunction { + compiled_function: wasmer_compiler::CompiledFunction { + body: function_body, + jt_offsets: SecondaryMap::new(), + relocations: relocations + .remove_entry(&root_section_index) + .map_or(vec![], |(_, v)| v), + frame_info: CompiledFunctionFrameInfo { + address_map, + traps: vec![], + }, + }, + custom_sections, + eh_frame_section_indices, + }) +} diff --git a/lib/compiler-llvm/src/trampoline/mod.rs b/lib/compiler-llvm/src/trampoline/mod.rs new file mode 100644 index 0000000000..9adc4220e0 --- /dev/null +++ b/lib/compiler-llvm/src/trampoline/mod.rs @@ -0,0 +1,3 @@ +mod wasm; + +pub use self::wasm::FuncTrampoline; diff --git a/lib/compiler-llvm/src/trampoline/wasm.rs b/lib/compiler-llvm/src/trampoline/wasm.rs new file mode 100644 index 0000000000..81557c98fb --- /dev/null +++ b/lib/compiler-llvm/src/trampoline/wasm.rs @@ -0,0 +1,510 @@ +use crate::abi::{get_abi, Abi}; +use crate::config::{CompiledKind, LLVM}; +use crate::object_file::{load_object_file, CompiledFunction}; +use crate::translator::intrinsics::{type_to_llvm, type_to_llvm_ptr, Intrinsics}; +use inkwell::values::BasicMetadataValueEnum; +use inkwell::{ + attributes::{Attribute, AttributeLoc}, + context::Context, + module::{Linkage, Module}, + passes::PassManager, + targets::{FileType, TargetMachine}, + types::BasicType, + values::FunctionValue, + AddressSpace, DLLStorageClass, +}; +use std::cmp; +use std::convert::TryFrom; +use std::convert::TryInto; +use wasmer_compiler::{CompileError, FunctionBody, RelocationTarget}; +use wasmer_types::{FunctionType, LocalFunctionIndex}; + +pub struct FuncTrampoline { + ctx: Context, + target_machine: TargetMachine, + abi: Box, +} + +const FUNCTION_SECTION: &str = "__TEXT,wasmer_trmpl"; // Needs to be between 1 and 16 chars + +impl FuncTrampoline { + pub fn new(target_machine: TargetMachine) -> Self { + let abi = get_abi(&target_machine); + Self { + ctx: Context::create(), + target_machine, + abi, + } + } + + pub fn trampoline_to_module( + &self, + ty: &FunctionType, + config: &LLVM, + name: &str, + ) -> Result { + // The function type, used for the callbacks. + let function = CompiledKind::FunctionCallTrampoline(ty.clone()); + let module = self.ctx.create_module(""); + let target_machine = &self.target_machine; + let target_triple = target_machine.get_triple(); + let target_data = target_machine.get_target_data(); + module.set_triple(&target_triple); + module.set_data_layout(&target_data.get_data_layout()); + let intrinsics = Intrinsics::declare(&module, &self.ctx, &target_data); + + let (callee_ty, callee_attrs) = + self.abi + .func_type_to_llvm(&self.ctx, &intrinsics, None, ty)?; + let trampoline_ty = intrinsics.void_ty.fn_type( + &[ + intrinsics.ctx_ptr_ty.into(), // vmctx ptr + callee_ty.ptr_type(AddressSpace::Generic).into(), // callee function address + intrinsics.i128_ptr_ty.into(), // in/out values ptr + ], + false, + ); + + let trampoline_func = module.add_function(name, trampoline_ty, Some(Linkage::External)); + trampoline_func + .as_global_value() + .set_section(FUNCTION_SECTION); + trampoline_func + .as_global_value() + .set_linkage(Linkage::DLLExport); + trampoline_func + .as_global_value() + .set_dll_storage_class(DLLStorageClass::Export); + self.generate_trampoline(trampoline_func, ty, &callee_attrs, &self.ctx, &intrinsics)?; + + if let Some(ref callbacks) = config.callbacks { + callbacks.preopt_ir(&function, &module); + } + + let pass_manager = PassManager::create(()); + + if config.enable_verifier { + pass_manager.add_verifier_pass(); + } + + pass_manager.add_early_cse_pass(); + + pass_manager.run_on(&module); + + if let Some(ref callbacks) = config.callbacks { + callbacks.postopt_ir(&function, &module); + } + + Ok(module) + } + + pub fn trampoline( + &self, + ty: &FunctionType, + config: &LLVM, + name: &str, + ) -> Result { + let module = self.trampoline_to_module(ty, config, name)?; + let function = CompiledKind::FunctionCallTrampoline(ty.clone()); + let target_machine = &self.target_machine; + + let memory_buffer = target_machine + .write_to_memory_buffer(&module, FileType::Object) + .unwrap(); + + if let Some(ref callbacks) = config.callbacks { + callbacks.obj_memory_buffer(&function, &memory_buffer); + } + + let mem_buf_slice = memory_buffer.as_slice(); + let CompiledFunction { + compiled_function, + custom_sections, + eh_frame_section_indices, + } = load_object_file( + mem_buf_slice, + FUNCTION_SECTION, + RelocationTarget::LocalFunc(LocalFunctionIndex::from_u32(0)), + |name: &str| { + Err(CompileError::Codegen(format!( + "trampoline generation produced reference to unknown function {}", + name + ))) + }, + )?; + let mut all_sections_are_eh_sections = true; + if eh_frame_section_indices.len() != custom_sections.len() { + all_sections_are_eh_sections = false; + } else { + let mut eh_frame_section_indices = eh_frame_section_indices; + eh_frame_section_indices.sort_unstable(); + for (idx, section_idx) in eh_frame_section_indices.iter().enumerate() { + if idx as u32 != section_idx.as_u32() { + all_sections_are_eh_sections = false; + break; + } + } + } + if !all_sections_are_eh_sections { + return Err(CompileError::Codegen( + "trampoline generation produced non-eh custom sections".into(), + )); + } + if !compiled_function.relocations.is_empty() { + return Err(CompileError::Codegen( + "trampoline generation produced relocations".into(), + )); + } + if !compiled_function.jt_offsets.is_empty() { + return Err(CompileError::Codegen( + "trampoline generation produced jump tables".into(), + )); + } + // Ignore CompiledFunctionFrameInfo. Extra frame info isn't a problem. + + Ok(FunctionBody { + body: compiled_function.body.body, + unwind_info: compiled_function.body.unwind_info, + }) + } + + pub fn dynamic_trampoline_to_module( + &self, + ty: &FunctionType, + config: &LLVM, + name: &str, + ) -> Result { + // The function type, used for the callbacks + let function = CompiledKind::DynamicFunctionTrampoline(ty.clone()); + let module = self.ctx.create_module(""); + let target_machine = &self.target_machine; + let target_data = target_machine.get_target_data(); + let target_triple = target_machine.get_triple(); + module.set_triple(&target_triple); + module.set_data_layout(&target_data.get_data_layout()); + let intrinsics = Intrinsics::declare(&module, &self.ctx, &target_data); + + let (trampoline_ty, trampoline_attrs) = + self.abi + .func_type_to_llvm(&self.ctx, &intrinsics, None, ty)?; + let trampoline_func = module.add_function(name, trampoline_ty, Some(Linkage::External)); + for (attr, attr_loc) in trampoline_attrs { + trampoline_func.add_attribute(attr_loc, attr); + } + trampoline_func + .as_global_value() + .set_section(FUNCTION_SECTION); + trampoline_func + .as_global_value() + .set_linkage(Linkage::DLLExport); + trampoline_func + .as_global_value() + .set_dll_storage_class(DLLStorageClass::Export); + self.generate_dynamic_trampoline(trampoline_func, ty, &self.ctx, &intrinsics)?; + + if let Some(ref callbacks) = config.callbacks { + callbacks.preopt_ir(&function, &module); + } + + let pass_manager = PassManager::create(()); + + if config.enable_verifier { + pass_manager.add_verifier_pass(); + } + + pass_manager.add_early_cse_pass(); + + pass_manager.run_on(&module); + + if let Some(ref callbacks) = config.callbacks { + callbacks.postopt_ir(&function, &module); + } + + Ok(module) + } + pub fn dynamic_trampoline( + &self, + ty: &FunctionType, + config: &LLVM, + name: &str, + ) -> Result { + let function = CompiledKind::DynamicFunctionTrampoline(ty.clone()); + let target_machine = &self.target_machine; + + let module = self.dynamic_trampoline_to_module(ty, config, name)?; + + let memory_buffer = target_machine + .write_to_memory_buffer(&module, FileType::Object) + .unwrap(); + + if let Some(ref callbacks) = config.callbacks { + callbacks.obj_memory_buffer(&function, &memory_buffer); + } + + let mem_buf_slice = memory_buffer.as_slice(); + let CompiledFunction { + compiled_function, + custom_sections, + eh_frame_section_indices, + } = load_object_file( + mem_buf_slice, + FUNCTION_SECTION, + RelocationTarget::LocalFunc(LocalFunctionIndex::from_u32(0)), + |name: &str| { + Err(CompileError::Codegen(format!( + "trampoline generation produced reference to unknown function {}", + name + ))) + }, + )?; + let mut all_sections_are_eh_sections = true; + if eh_frame_section_indices.len() != custom_sections.len() { + all_sections_are_eh_sections = false; + } else { + let mut eh_frame_section_indices = eh_frame_section_indices; + eh_frame_section_indices.sort_unstable(); + for (idx, section_idx) in eh_frame_section_indices.iter().enumerate() { + if idx as u32 != section_idx.as_u32() { + all_sections_are_eh_sections = false; + break; + } + } + } + if !all_sections_are_eh_sections { + return Err(CompileError::Codegen( + "trampoline generation produced non-eh custom sections".into(), + )); + } + if !compiled_function.relocations.is_empty() { + return Err(CompileError::Codegen( + "trampoline generation produced relocations".into(), + )); + } + if !compiled_function.jt_offsets.is_empty() { + return Err(CompileError::Codegen( + "trampoline generation produced jump tables".into(), + )); + } + // Ignore CompiledFunctionFrameInfo. Extra frame info isn't a problem. + + Ok(FunctionBody { + body: compiled_function.body.body, + unwind_info: compiled_function.body.unwind_info, + }) + } + + fn generate_trampoline<'ctx>( + &self, + trampoline_func: FunctionValue, + func_sig: &FunctionType, + func_attrs: &[(Attribute, AttributeLoc)], + context: &'ctx Context, + intrinsics: &Intrinsics<'ctx>, + ) -> Result<(), CompileError> { + let entry_block = context.append_basic_block(trampoline_func, "entry"); + let builder = context.create_builder(); + builder.position_at_end(entry_block); + + let (callee_vmctx_ptr, func_ptr, args_rets_ptr) = + match *trampoline_func.get_params().as_slice() { + [callee_vmctx_ptr, func_ptr, args_rets_ptr] => ( + callee_vmctx_ptr, + func_ptr.into_pointer_value(), + args_rets_ptr.into_pointer_value(), + ), + _ => { + return Err(CompileError::Codegen( + "trampoline function unimplemented".to_string(), + )) + } + }; + + let mut args_vec: Vec = + Vec::with_capacity(func_sig.params().len() + 1); + + if self.abi.is_sret(func_sig)? { + let basic_types: Vec<_> = func_sig + .results() + .iter() + .map(|&ty| type_to_llvm(intrinsics, ty)) + .collect::>()?; + + let sret_ty = context.struct_type(&basic_types, false); + args_vec.push(builder.build_alloca(sret_ty, "sret").into()); + } + + args_vec.push(callee_vmctx_ptr.into()); + + for (i, param_ty) in func_sig.params().iter().enumerate() { + let index = intrinsics.i32_ty.const_int(i as _, false); + let item_pointer = + unsafe { builder.build_in_bounds_gep(args_rets_ptr, &[index], "arg_ptr") }; + + let casted_pointer_type = type_to_llvm_ptr(intrinsics, *param_ty)?; + + let typed_item_pointer = + builder.build_pointer_cast(item_pointer, casted_pointer_type, "typed_arg_pointer"); + + let arg = builder.build_load(typed_item_pointer, "arg"); + args_vec.push(arg.into()); + } + + let callable_func = inkwell::values::CallableValue::try_from(func_ptr).unwrap(); + let call_site = builder.build_call(callable_func, args_vec.as_slice().into(), "call"); + for (attr, attr_loc) in func_attrs { + call_site.add_attribute(*attr_loc, *attr); + } + + let rets = self + .abi + .rets_from_call(&builder, intrinsics, call_site, func_sig); + let mut idx = 0; + rets.iter().for_each(|v| { + let ptr = unsafe { + builder.build_gep( + args_rets_ptr, + &[intrinsics.i32_ty.const_int(idx, false)], + "", + ) + }; + let ptr = + builder.build_pointer_cast(ptr, v.get_type().ptr_type(AddressSpace::Generic), ""); + builder.build_store(ptr, *v); + if v.get_type() == intrinsics.i128_ty.as_basic_type_enum() { + idx += 1; + } + idx += 1; + }); + + builder.build_return(None); + Ok(()) + } + + fn generate_dynamic_trampoline<'ctx>( + &self, + trampoline_func: FunctionValue, + func_sig: &FunctionType, + context: &'ctx Context, + intrinsics: &Intrinsics<'ctx>, + ) -> Result<(), CompileError> { + let entry_block = context.append_basic_block(trampoline_func, "entry"); + let builder = context.create_builder(); + builder.position_at_end(entry_block); + + // Allocate stack space for the params and results. + let values = builder.build_alloca( + intrinsics.i128_ty.array_type(cmp::max( + func_sig.params().len().try_into().unwrap(), + func_sig.results().len().try_into().unwrap(), + )), + "", + ); + + // Copy params to 'values'. + let first_user_param = if self.abi.is_sret(func_sig)? { 2 } else { 1 }; + for i in 0..func_sig.params().len() { + let ptr = unsafe { + builder.build_in_bounds_gep( + values, + &[ + intrinsics.i32_zero, + intrinsics.i32_ty.const_int(i.try_into().unwrap(), false), + ], + "", + ) + }; + let ptr = builder + .build_bitcast(ptr, type_to_llvm_ptr(intrinsics, func_sig.params()[i])?, "") + .into_pointer_value(); + builder.build_store( + ptr, + trampoline_func + .get_nth_param(i as u32 + first_user_param) + .unwrap(), + ); + } + + let callee_ty = intrinsics + .void_ty + .fn_type( + &[ + intrinsics.ctx_ptr_ty.into(), // vmctx ptr + intrinsics.i128_ptr_ty.into(), // in/out values ptr + ], + false, + ) + .ptr_type(AddressSpace::Generic); + let vmctx = self.abi.get_vmctx_ptr_param(&trampoline_func); + let callee = builder + .build_load( + builder + .build_bitcast(vmctx, callee_ty.ptr_type(AddressSpace::Generic), "") + .into_pointer_value(), + "", + ) + .into_pointer_value(); + + let values_ptr = builder.build_pointer_cast(values, intrinsics.i128_ptr_ty, ""); + let callable_func = inkwell::values::CallableValue::try_from(callee).unwrap(); + builder.build_call(callable_func, &[vmctx.into(), values_ptr.into()], ""); + + if func_sig.results().is_empty() { + builder.build_return(None); + } else { + let results = func_sig + .results() + .iter() + .enumerate() + .map(|(idx, ty)| { + let ptr = unsafe { + builder.build_gep( + values, + &[ + intrinsics.i32_ty.const_zero(), + intrinsics.i32_ty.const_int(idx.try_into().unwrap(), false), + ], + "", + ) + }; + let ptr = + builder.build_pointer_cast(ptr, type_to_llvm_ptr(intrinsics, *ty)?, ""); + Ok(builder.build_load(ptr, "")) + }) + .collect::, CompileError>>()?; + + if self.abi.is_sret(func_sig)? { + let sret = trampoline_func + .get_first_param() + .unwrap() + .into_pointer_value(); + let mut struct_value = sret + .get_type() + .get_element_type() + .into_struct_type() + .get_undef(); + for (idx, value) in results.iter().enumerate() { + let value = builder.build_bitcast( + *value, + type_to_llvm(&intrinsics, func_sig.results()[idx])?, + "", + ); + struct_value = builder + .build_insert_value(struct_value, value, idx as u32, "") + .unwrap() + .into_struct_value(); + } + builder.build_store(sret, struct_value); + builder.build_return(None); + } else { + builder.build_return(Some(&self.abi.pack_values_for_register_return( + &intrinsics, + &builder, + &results.as_slice(), + &trampoline_func.get_type(), + )?)); + } + } + + Ok(()) + } +} diff --git a/lib/compiler-llvm/src/translator/code.rs b/lib/compiler-llvm/src/translator/code.rs new file mode 100644 index 0000000000..ab63f84d29 --- /dev/null +++ b/lib/compiler-llvm/src/translator/code.rs @@ -0,0 +1,11290 @@ +use super::{ + intrinsics::{ + tbaa_label, type_to_llvm, CtxType, FunctionCache, GlobalCache, Intrinsics, MemoryCache, + }, + // stackmap::{StackmapEntry, StackmapEntryKind, StackmapRegistry, ValueSemantic}, + state::{ControlFrame, ExtraInfo, IfElseState, State}, +}; +use inkwell::{ + attributes::AttributeLoc, + builder::Builder, + context::Context, + module::{Linkage, Module}, + passes::PassManager, + targets::{FileType, TargetMachine}, + types::{BasicType, FloatMathType, IntType, PointerType, VectorType}, + values::{ + BasicMetadataValueEnum, BasicValue, BasicValueEnum, FloatValue, FunctionValue, + InstructionOpcode, InstructionValue, IntValue, PhiValue, PointerValue, VectorValue, + }, + AddressSpace, AtomicOrdering, AtomicRMWBinOp, DLLStorageClass, FloatPredicate, IntPredicate, +}; +use smallvec::SmallVec; + +use crate::abi::{get_abi, Abi}; +use crate::config::{CompiledKind, LLVM}; +use crate::object_file::{load_object_file, CompiledFunction}; +use std::convert::TryFrom; +use wasmer_compiler::wasmparser::{MemoryImmediate, Operator}; +use wasmer_compiler::{ + wptype_to_type, CompileError, FunctionBodyData, ModuleTranslationState, RelocationTarget, + Symbol, SymbolRegistry, +}; +use wasmer_types::entity::PrimaryMap; +use wasmer_types::{ + FunctionIndex, FunctionType, GlobalIndex, LocalFunctionIndex, MemoryIndex, ModuleInfo, + SignatureIndex, TableIndex, Type, +}; +use wasmer_vm::{MemoryStyle, TableStyle, VMOffsets}; + +const FUNCTION_SECTION: &str = "__TEXT,wasmer_function"; + +fn to_compile_error(err: impl std::error::Error) -> CompileError { + CompileError::Codegen(format!("{}", err)) +} + +pub struct FuncTranslator { + ctx: Context, + target_machine: TargetMachine, + abi: Box, +} + +impl FuncTranslator { + pub fn new(target_machine: TargetMachine) -> Self { + let abi = get_abi(&target_machine); + Self { + ctx: Context::create(), + target_machine, + abi, + } + } + + pub fn translate_to_module( + &self, + wasm_module: &ModuleInfo, + module_translation: &ModuleTranslationState, + local_func_index: &LocalFunctionIndex, + function_body: &FunctionBodyData, + config: &LLVM, + memory_styles: &PrimaryMap, + _table_styles: &PrimaryMap, + symbol_registry: &dyn SymbolRegistry, + ) -> Result { + // The function type, used for the callbacks. + let function = CompiledKind::Local(*local_func_index); + let func_index = wasm_module.func_index(*local_func_index); + let function_name = + symbol_registry.symbol_to_name(Symbol::LocalFunction(*local_func_index)); + let module_name = match wasm_module.name.as_ref() { + None => format!(" function {}", function_name), + Some(module_name) => format!("module {} function {}", module_name, function_name), + }; + let module = self.ctx.create_module(module_name.as_str()); + + let target_machine = &self.target_machine; + let target_triple = target_machine.get_triple(); + let target_data = target_machine.get_target_data(); + module.set_triple(&target_triple); + module.set_data_layout(&target_data.get_data_layout()); + let wasm_fn_type = wasm_module + .signatures + .get(wasm_module.functions[func_index]) + .unwrap(); + + // TODO: pointer width + let offsets = VMOffsets::new(target_data.get_pointer_byte_size(None) as u8) + .with_module_info(&wasm_module); + let intrinsics = Intrinsics::declare(&module, &self.ctx, &target_data); + let (func_type, func_attrs) = + self.abi + .func_type_to_llvm(&self.ctx, &intrinsics, Some(&offsets), wasm_fn_type)?; + + let func = module.add_function(&function_name, func_type, Some(Linkage::External)); + for (attr, attr_loc) in &func_attrs { + func.add_attribute(*attr_loc, *attr); + } + + func.add_attribute(AttributeLoc::Function, intrinsics.stack_probe); + func.set_personality_function(intrinsics.personality); + func.as_global_value().set_section(FUNCTION_SECTION); + func.set_linkage(Linkage::DLLExport); + func.as_global_value() + .set_dll_storage_class(DLLStorageClass::Export); + + let entry = self.ctx.append_basic_block(func, "entry"); + let start_of_code = self.ctx.append_basic_block(func, "start_of_code"); + let return_ = self.ctx.append_basic_block(func, "return"); + let alloca_builder = self.ctx.create_builder(); + let cache_builder = self.ctx.create_builder(); + let builder = self.ctx.create_builder(); + cache_builder.position_at_end(entry); + let br = cache_builder.build_unconditional_branch(start_of_code); + alloca_builder.position_before(&br); + cache_builder.position_before(&br); + builder.position_at_end(start_of_code); + + let mut state = State::new(); + builder.position_at_end(return_); + let phis: SmallVec<[PhiValue; 1]> = wasm_fn_type + .results() + .iter() + .map(|&wasm_ty| type_to_llvm(&intrinsics, wasm_ty).map(|ty| builder.build_phi(ty, ""))) + .collect::>()?; + state.push_block(return_, phis); + builder.position_at_end(start_of_code); + + let reader = + wasmer_compiler::FunctionReader::new(function_body.module_offset, function_body.data); + + let mut params = vec![]; + let first_param = + if func_type.get_return_type().is_none() && wasm_fn_type.results().len() > 1 { + 2 + } else { + 1 + }; + let mut is_first_alloca = true; + let mut insert_alloca = |ty, name| { + let alloca = alloca_builder.build_alloca(ty, name); + if is_first_alloca { + alloca_builder.position_at(entry, &alloca.as_instruction_value().unwrap()); + is_first_alloca = false; + } + alloca + }; + + for idx in 0..wasm_fn_type.params().len() { + let ty = wasm_fn_type.params()[idx]; + let ty = type_to_llvm(&intrinsics, ty)?; + let value = func + .get_nth_param((idx as u32).checked_add(first_param).unwrap()) + .unwrap(); + let alloca = insert_alloca(ty, "param"); + cache_builder.build_store(alloca, value); + params.push(alloca); + } + + let mut local_reader = reader.get_locals_reader()?; + let mut locals = vec![]; + let num_locals = local_reader.get_count(); + for _ in 0..num_locals { + let (count, ty) = local_reader.read()?; + let ty = wptype_to_type(ty).map_err(to_compile_error)?; + let ty = type_to_llvm(&intrinsics, ty)?; + for _ in 0..count { + let alloca = insert_alloca(ty, "local"); + cache_builder.build_store(alloca, ty.const_zero()); + locals.push(alloca); + } + } + + let mut params_locals = params.clone(); + params_locals.extend(locals.iter().cloned()); + + let mut fcg = LLVMFunctionCodeGenerator { + context: &self.ctx, + builder, + alloca_builder, + intrinsics: &intrinsics, + state, + function: func, + locals: params_locals, + ctx: CtxType::new(wasm_module, &func, &cache_builder, &*self.abi), + unreachable_depth: 0, + memory_styles, + _table_styles, + module: &module, + module_translation, + wasm_module, + symbol_registry, + abi: &*self.abi, + config, + }; + fcg.ctx.add_func( + func_index, + func.as_global_value().as_pointer_value(), + fcg.ctx.basic(), + &func_attrs, + ); + + let mut operator_reader = reader.get_operators_reader()?.into_iter_with_offsets(); + while fcg.state.has_control_frames() { + let (op, pos) = operator_reader.next().unwrap()?; + fcg.translate_operator(op, pos as u32)?; + } + + fcg.finalize(wasm_fn_type)?; + + if let Some(ref callbacks) = config.callbacks { + callbacks.preopt_ir(&function, &module); + } + + let pass_manager = PassManager::create(()); + + if config.enable_verifier { + pass_manager.add_verifier_pass(); + } + + pass_manager.add_type_based_alias_analysis_pass(); + pass_manager.add_sccp_pass(); + pass_manager.add_prune_eh_pass(); + pass_manager.add_dead_arg_elimination_pass(); + pass_manager.add_lower_expect_intrinsic_pass(); + pass_manager.add_scalar_repl_aggregates_pass(); + pass_manager.add_instruction_combining_pass(); + pass_manager.add_jump_threading_pass(); + pass_manager.add_correlated_value_propagation_pass(); + pass_manager.add_cfg_simplification_pass(); + pass_manager.add_reassociate_pass(); + pass_manager.add_loop_rotate_pass(); + pass_manager.add_loop_unswitch_pass(); + pass_manager.add_ind_var_simplify_pass(); + pass_manager.add_licm_pass(); + pass_manager.add_loop_vectorize_pass(); + pass_manager.add_instruction_combining_pass(); + pass_manager.add_sccp_pass(); + pass_manager.add_reassociate_pass(); + pass_manager.add_cfg_simplification_pass(); + pass_manager.add_gvn_pass(); + pass_manager.add_memcpy_optimize_pass(); + pass_manager.add_dead_store_elimination_pass(); + pass_manager.add_bit_tracking_dce_pass(); + pass_manager.add_instruction_combining_pass(); + pass_manager.add_reassociate_pass(); + pass_manager.add_cfg_simplification_pass(); + pass_manager.add_slp_vectorize_pass(); + pass_manager.add_early_cse_pass(); + + pass_manager.run_on(&module); + + if let Some(ref callbacks) = config.callbacks { + callbacks.postopt_ir(&function, &module); + } + + Ok(module) + } + + pub fn translate( + &self, + wasm_module: &ModuleInfo, + module_translation: &ModuleTranslationState, + local_func_index: &LocalFunctionIndex, + function_body: &FunctionBodyData, + config: &LLVM, + memory_styles: &PrimaryMap, + table_styles: &PrimaryMap, + symbol_registry: &dyn SymbolRegistry, + ) -> Result { + let module = self.translate_to_module( + wasm_module, + module_translation, + local_func_index, + function_body, + config, + memory_styles, + table_styles, + symbol_registry, + )?; + let function = CompiledKind::Local(*local_func_index); + let target_machine = &self.target_machine; + let memory_buffer = target_machine + .write_to_memory_buffer(&module, FileType::Object) + .unwrap(); + + if let Some(ref callbacks) = config.callbacks { + callbacks.obj_memory_buffer(&function, &memory_buffer); + } + + let mem_buf_slice = memory_buffer.as_slice(); + load_object_file( + mem_buf_slice, + FUNCTION_SECTION, + RelocationTarget::LocalFunc(*local_func_index), + |name: &str| { + Ok( + if let Some(Symbol::LocalFunction(local_func_index)) = + symbol_registry.name_to_symbol(name) + { + Some(RelocationTarget::LocalFunc(local_func_index)) + } else { + None + }, + ) + }, + ) + } +} + +impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> { + // Create a vector where each lane contains the same value. + fn splat_vector( + &self, + value: BasicValueEnum<'ctx>, + vec_ty: VectorType<'ctx>, + ) -> VectorValue<'ctx> { + // Use insert_element to insert the element into an undef vector, then use + // shuffle vector to copy that lane to all lanes. + self.builder.build_shuffle_vector( + self.builder.build_insert_element( + vec_ty.get_undef(), + value, + self.intrinsics.i32_zero, + "", + ), + vec_ty.get_undef(), + self.intrinsics + .i32_ty + .vec_type(vec_ty.get_size()) + .const_zero(), + "", + ) + } + + // Convert floating point vector to integer and saturate when out of range. + // https://github.com/WebAssembly/nontrapping-float-to-int-conversions/blob/master/proposals/nontrapping-float-to-int-conversion/Overview.md + fn trunc_sat>( + &self, + fvec_ty: T, + ivec_ty: T::MathConvType, + lower_bound: u64, // Exclusive (least representable value) + upper_bound: u64, // Exclusive (greatest representable value) + int_min_value: u64, + int_max_value: u64, + value: IntValue<'ctx>, + ) -> VectorValue<'ctx> { + // a) Compare vector with itself to identify NaN lanes. + // b) Compare vector with splat of inttofp(upper_bound) to identify + // lanes that need to saturate to max. + // c) Compare vector with splat of inttofp(lower_bound) to identify + // lanes that need to saturate to min. + // d) Use vector select (not shuffle) to pick from either the + // splat vector or the input vector depending on whether the + // comparison indicates that we have an unrepresentable value. Replace + // unrepresentable values with zero. + // e) Now that the value is safe, fpto[su]i it. + // f) Use our previous comparison results to replace certain zeros with + // int_min or int_max. + + let fvec_ty = fvec_ty.as_basic_type_enum().into_vector_type(); + let ivec_ty = ivec_ty.as_basic_type_enum().into_vector_type(); + let fvec_element_ty = fvec_ty.get_element_type().into_float_type(); + let ivec_element_ty = ivec_ty.get_element_type().into_int_type(); + + let is_signed = int_min_value != 0; + let int_min_value = self.splat_vector( + ivec_element_ty + .const_int(int_min_value, is_signed) + .as_basic_value_enum(), + ivec_ty, + ); + let int_max_value = self.splat_vector( + ivec_element_ty + .const_int(int_max_value, is_signed) + .as_basic_value_enum(), + ivec_ty, + ); + let lower_bound = if is_signed { + self.builder.build_signed_int_to_float( + ivec_element_ty.const_int(lower_bound, is_signed), + fvec_element_ty, + "", + ) + } else { + self.builder.build_unsigned_int_to_float( + ivec_element_ty.const_int(lower_bound, is_signed), + fvec_element_ty, + "", + ) + }; + let upper_bound = if is_signed { + self.builder.build_signed_int_to_float( + ivec_element_ty.const_int(upper_bound, is_signed), + fvec_element_ty, + "", + ) + } else { + self.builder.build_unsigned_int_to_float( + ivec_element_ty.const_int(upper_bound, is_signed), + fvec_element_ty, + "", + ) + }; + + let value = self + .builder + .build_bitcast(value, fvec_ty, "") + .into_vector_value(); + let zero = fvec_ty.const_zero(); + let lower_bound = self.splat_vector(lower_bound.as_basic_value_enum(), fvec_ty); + let upper_bound = self.splat_vector(upper_bound.as_basic_value_enum(), fvec_ty); + let nan_cmp = self + .builder + .build_float_compare(FloatPredicate::UNO, value, zero, "nan"); + let above_upper_bound_cmp = self.builder.build_float_compare( + FloatPredicate::OGT, + value, + upper_bound, + "above_upper_bound", + ); + let below_lower_bound_cmp = self.builder.build_float_compare( + FloatPredicate::OLT, + value, + lower_bound, + "below_lower_bound", + ); + let not_representable = self.builder.build_or( + self.builder.build_or(nan_cmp, above_upper_bound_cmp, ""), + below_lower_bound_cmp, + "not_representable_as_int", + ); + let value = self + .builder + .build_select(not_representable, zero, value, "safe_to_convert") + .into_vector_value(); + let value = if is_signed { + self.builder + .build_float_to_signed_int(value, ivec_ty, "as_int") + } else { + self.builder + .build_float_to_unsigned_int(value, ivec_ty, "as_int") + }; + let value = self + .builder + .build_select(above_upper_bound_cmp, int_max_value, value, "") + .into_vector_value(); + self.builder + .build_select(below_lower_bound_cmp, int_min_value, value, "") + .into_vector_value() + } + + // Convert floating point vector to integer and saturate when out of range. + // https://github.com/WebAssembly/nontrapping-float-to-int-conversions/blob/master/proposals/nontrapping-float-to-int-conversion/Overview.md + fn trunc_sat_into_int>( + &self, + fvec_ty: T, + ivec_ty: T::MathConvType, + lower_bound: u64, // Exclusive (least representable value) + upper_bound: u64, // Exclusive (greatest representable value) + int_min_value: u64, + int_max_value: u64, + value: IntValue<'ctx>, + ) -> IntValue<'ctx> { + let res = self.trunc_sat( + fvec_ty, + ivec_ty, + lower_bound, + upper_bound, + int_min_value, + int_max_value, + value, + ); + self.builder + .build_bitcast(res, self.intrinsics.i128_ty, "") + .into_int_value() + } + + // Convert floating point vector to integer and saturate when out of range. + // https://github.com/WebAssembly/nontrapping-float-to-int-conversions/blob/master/proposals/nontrapping-float-to-int-conversion/Overview.md + fn trunc_sat_scalar( + &self, + int_ty: IntType<'ctx>, + lower_bound: u64, // Exclusive (least representable value) + upper_bound: u64, // Exclusive (greatest representable value) + int_min_value: u64, + int_max_value: u64, + value: FloatValue<'ctx>, + ) -> IntValue<'ctx> { + // TODO: this is a scalarized version of the process in trunc_sat. Either + // we should merge with trunc_sat, or we should simplify this function. + + // a) Compare value with itself to identify NaN. + // b) Compare value inttofp(upper_bound) to identify values that need to + // saturate to max. + // c) Compare value with inttofp(lower_bound) to identify values that need + // to saturate to min. + // d) Use select to pick from either zero or the input vector depending on + // whether the comparison indicates that we have an unrepresentable + // value. + // e) Now that the value is safe, fpto[su]i it. + // f) Use our previous comparison results to replace certain zeros with + // int_min or int_max. + + let is_signed = int_min_value != 0; + let int_min_value = int_ty.const_int(int_min_value, is_signed); + let int_max_value = int_ty.const_int(int_max_value, is_signed); + + let lower_bound = if is_signed { + self.builder.build_signed_int_to_float( + int_ty.const_int(lower_bound, is_signed), + value.get_type(), + "", + ) + } else { + self.builder.build_unsigned_int_to_float( + int_ty.const_int(lower_bound, is_signed), + value.get_type(), + "", + ) + }; + let upper_bound = if is_signed { + self.builder.build_signed_int_to_float( + int_ty.const_int(upper_bound, is_signed), + value.get_type(), + "", + ) + } else { + self.builder.build_unsigned_int_to_float( + int_ty.const_int(upper_bound, is_signed), + value.get_type(), + "", + ) + }; + + let zero = value.get_type().const_zero(); + + let nan_cmp = self + .builder + .build_float_compare(FloatPredicate::UNO, value, zero, "nan"); + let above_upper_bound_cmp = self.builder.build_float_compare( + FloatPredicate::OGT, + value, + upper_bound, + "above_upper_bound", + ); + let below_lower_bound_cmp = self.builder.build_float_compare( + FloatPredicate::OLT, + value, + lower_bound, + "below_lower_bound", + ); + let not_representable = self.builder.build_or( + self.builder.build_or(nan_cmp, above_upper_bound_cmp, ""), + below_lower_bound_cmp, + "not_representable_as_int", + ); + let value = self + .builder + .build_select(not_representable, zero, value, "safe_to_convert") + .into_float_value(); + let value = if is_signed { + self.builder + .build_float_to_signed_int(value, int_ty, "as_int") + } else { + self.builder + .build_float_to_unsigned_int(value, int_ty, "as_int") + }; + let value = self + .builder + .build_select(above_upper_bound_cmp, int_max_value, value, "") + .into_int_value(); + let value = self + .builder + .build_select(below_lower_bound_cmp, int_min_value, value, "") + .into_int_value(); + self.builder + .build_bitcast(value, int_ty, "") + .into_int_value() + } + + fn trap_if_not_representable_as_int( + &self, + lower_bound: u64, // Inclusive (not a trapping value) + upper_bound: u64, // Inclusive (not a trapping value) + value: FloatValue, + ) { + let float_ty = value.get_type(); + let int_ty = if float_ty == self.intrinsics.f32_ty { + self.intrinsics.i32_ty + } else { + self.intrinsics.i64_ty + }; + + let lower_bound = self + .builder + .build_bitcast(int_ty.const_int(lower_bound, false), float_ty, "") + .into_float_value(); + let upper_bound = self + .builder + .build_bitcast(int_ty.const_int(upper_bound, false), float_ty, "") + .into_float_value(); + + // The 'U' in the float predicate is short for "unordered" which means that + // the comparison will compare true if either operand is a NaN. Thus, NaNs + // are out of bounds. + let above_upper_bound_cmp = self.builder.build_float_compare( + FloatPredicate::UGT, + value, + upper_bound, + "above_upper_bound", + ); + let below_lower_bound_cmp = self.builder.build_float_compare( + FloatPredicate::ULT, + value, + lower_bound, + "below_lower_bound", + ); + let out_of_bounds = self.builder.build_or( + above_upper_bound_cmp, + below_lower_bound_cmp, + "out_of_bounds", + ); + + let failure_block = self + .context + .append_basic_block(self.function, "conversion_failure_block"); + let continue_block = self + .context + .append_basic_block(self.function, "conversion_success_block"); + + self.builder + .build_conditional_branch(out_of_bounds, failure_block, continue_block); + self.builder.position_at_end(failure_block); + let is_nan = self + .builder + .build_float_compare(FloatPredicate::UNO, value, value, "is_nan"); + let trap_code = self.builder.build_select( + is_nan, + self.intrinsics.trap_bad_conversion_to_integer, + self.intrinsics.trap_illegal_arithmetic, + "", + ); + self.builder + .build_call(self.intrinsics.throw_trap, &[trap_code.into()], "throw"); + self.builder.build_unreachable(); + self.builder.position_at_end(continue_block); + } + + fn trap_if_zero_or_overflow(&self, left: IntValue, right: IntValue) { + let int_type = left.get_type(); + + let (min_value, neg_one_value) = if int_type == self.intrinsics.i32_ty { + let min_value = int_type.const_int(i32::min_value() as u64, false); + let neg_one_value = int_type.const_int(-1i32 as u32 as u64, false); + (min_value, neg_one_value) + } else if int_type == self.intrinsics.i64_ty { + let min_value = int_type.const_int(i64::min_value() as u64, false); + let neg_one_value = int_type.const_int(-1i64 as u64, false); + (min_value, neg_one_value) + } else { + unreachable!() + }; + + let divisor_is_zero = self.builder.build_int_compare( + IntPredicate::EQ, + right, + int_type.const_zero(), + "divisor_is_zero", + ); + let should_trap = self.builder.build_or( + divisor_is_zero, + self.builder.build_and( + self.builder + .build_int_compare(IntPredicate::EQ, left, min_value, "left_is_min"), + self.builder.build_int_compare( + IntPredicate::EQ, + right, + neg_one_value, + "right_is_neg_one", + ), + "div_will_overflow", + ), + "div_should_trap", + ); + + let should_trap = self + .builder + .build_call( + self.intrinsics.expect_i1, + &[ + should_trap.into(), + self.intrinsics.i1_ty.const_zero().into(), + ], + "should_trap_expect", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let shouldnt_trap_block = self + .context + .append_basic_block(self.function, "shouldnt_trap_block"); + let should_trap_block = self + .context + .append_basic_block(self.function, "should_trap_block"); + self.builder + .build_conditional_branch(should_trap, should_trap_block, shouldnt_trap_block); + self.builder.position_at_end(should_trap_block); + let trap_code = self.builder.build_select( + divisor_is_zero, + self.intrinsics.trap_integer_division_by_zero, + self.intrinsics.trap_illegal_arithmetic, + "", + ); + self.builder + .build_call(self.intrinsics.throw_trap, &[trap_code.into()], "throw"); + self.builder.build_unreachable(); + self.builder.position_at_end(shouldnt_trap_block); + } + + fn trap_if_zero(&self, value: IntValue) { + let int_type = value.get_type(); + let should_trap = self.builder.build_int_compare( + IntPredicate::EQ, + value, + int_type.const_zero(), + "divisor_is_zero", + ); + + let should_trap = self + .builder + .build_call( + self.intrinsics.expect_i1, + &[ + should_trap.into(), + self.intrinsics.i1_ty.const_zero().into(), + ], + "should_trap_expect", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let shouldnt_trap_block = self + .context + .append_basic_block(self.function, "shouldnt_trap_block"); + let should_trap_block = self + .context + .append_basic_block(self.function, "should_trap_block"); + self.builder + .build_conditional_branch(should_trap, should_trap_block, shouldnt_trap_block); + self.builder.position_at_end(should_trap_block); + self.builder.build_call( + self.intrinsics.throw_trap, + &[self.intrinsics.trap_integer_division_by_zero.into()], + "throw", + ); + self.builder.build_unreachable(); + self.builder.position_at_end(shouldnt_trap_block); + } + + fn v128_into_int_vec( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + int_vec_ty: VectorType<'ctx>, + ) -> (VectorValue<'ctx>, ExtraInfo) { + let (value, info) = if info.has_pending_f32_nan() { + let value = self + .builder + .build_bitcast(value, self.intrinsics.f32x4_ty, ""); + (self.canonicalize_nans(value), info.strip_pending()) + } else if info.has_pending_f64_nan() { + let value = self + .builder + .build_bitcast(value, self.intrinsics.f64x2_ty, ""); + (self.canonicalize_nans(value), info.strip_pending()) + } else { + (value, info) + }; + ( + self.builder + .build_bitcast(value, int_vec_ty, "") + .into_vector_value(), + info, + ) + } + + fn v128_into_i8x16( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + ) -> (VectorValue<'ctx>, ExtraInfo) { + self.v128_into_int_vec(value, info, self.intrinsics.i8x16_ty) + } + + fn v128_into_i16x8( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + ) -> (VectorValue<'ctx>, ExtraInfo) { + self.v128_into_int_vec(value, info, self.intrinsics.i16x8_ty) + } + + fn v128_into_i32x4( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + ) -> (VectorValue<'ctx>, ExtraInfo) { + self.v128_into_int_vec(value, info, self.intrinsics.i32x4_ty) + } + + fn v128_into_i64x2( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + ) -> (VectorValue<'ctx>, ExtraInfo) { + self.v128_into_int_vec(value, info, self.intrinsics.i64x2_ty) + } + + // If the value is pending a 64-bit canonicalization, do it now. + // Return a f32x4 vector. + fn v128_into_f32x4( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + ) -> (VectorValue<'ctx>, ExtraInfo) { + let (value, info) = if info.has_pending_f64_nan() { + let value = self + .builder + .build_bitcast(value, self.intrinsics.f64x2_ty, ""); + (self.canonicalize_nans(value), info.strip_pending()) + } else { + (value, info) + }; + ( + self.builder + .build_bitcast(value, self.intrinsics.f32x4_ty, "") + .into_vector_value(), + info, + ) + } + + // If the value is pending a 32-bit canonicalization, do it now. + // Return a f64x2 vector. + fn v128_into_f64x2( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + ) -> (VectorValue<'ctx>, ExtraInfo) { + let (value, info) = if info.has_pending_f32_nan() { + let value = self + .builder + .build_bitcast(value, self.intrinsics.f32x4_ty, ""); + (self.canonicalize_nans(value), info.strip_pending()) + } else { + (value, info) + }; + ( + self.builder + .build_bitcast(value, self.intrinsics.f64x2_ty, "") + .into_vector_value(), + info, + ) + } + + fn apply_pending_canonicalization( + &self, + value: BasicValueEnum<'ctx>, + info: ExtraInfo, + ) -> BasicValueEnum<'ctx> { + if !self.config.enable_nan_canonicalization { + return value; + } + + if info.has_pending_f32_nan() { + if value.get_type().is_vector_type() + || value.get_type() == self.intrinsics.i128_ty.as_basic_type_enum() + { + let ty = value.get_type(); + let value = self + .builder + .build_bitcast(value, self.intrinsics.f32x4_ty, ""); + let value = self.canonicalize_nans(value); + self.builder.build_bitcast(value, ty, "") + } else { + self.canonicalize_nans(value) + } + } else if info.has_pending_f64_nan() { + if value.get_type().is_vector_type() + || value.get_type() == self.intrinsics.i128_ty.as_basic_type_enum() + { + let ty = value.get_type(); + let value = self + .builder + .build_bitcast(value, self.intrinsics.f64x2_ty, ""); + let value = self.canonicalize_nans(value); + self.builder.build_bitcast(value, ty, "") + } else { + self.canonicalize_nans(value) + } + } else { + value + } + } + + // Replaces any NaN with the canonical QNaN, otherwise leaves the value alone. + fn canonicalize_nans(&self, value: BasicValueEnum<'ctx>) -> BasicValueEnum<'ctx> { + if !self.config.enable_nan_canonicalization { + return value; + } + + let f_ty = value.get_type(); + if f_ty.is_vector_type() { + let value = value.into_vector_value(); + let f_ty = f_ty.into_vector_type(); + let zero = f_ty.const_zero(); + let nan_cmp = self + .builder + .build_float_compare(FloatPredicate::UNO, value, zero, "nan"); + let canonical_qnan = f_ty + .get_element_type() + .into_float_type() + .const_float(std::f64::NAN); + let canonical_qnan = self.splat_vector(canonical_qnan.as_basic_value_enum(), f_ty); + self.builder + .build_select(nan_cmp, canonical_qnan, value, "") + .as_basic_value_enum() + } else { + let value = value.into_float_value(); + let f_ty = f_ty.into_float_type(); + let zero = f_ty.const_zero(); + let nan_cmp = self + .builder + .build_float_compare(FloatPredicate::UNO, value, zero, "nan"); + let canonical_qnan = f_ty.const_float(std::f64::NAN); + self.builder + .build_select(nan_cmp, canonical_qnan, value, "") + .as_basic_value_enum() + } + } + + fn quiet_nan(&self, value: BasicValueEnum<'ctx>) -> BasicValueEnum<'ctx> { + let intrinsic = if value + .get_type() + .eq(&self.intrinsics.f32_ty.as_basic_type_enum()) + { + Some(self.intrinsics.add_f32) + } else if value + .get_type() + .eq(&self.intrinsics.f64_ty.as_basic_type_enum()) + { + Some(self.intrinsics.add_f64) + } else if value + .get_type() + .eq(&self.intrinsics.f32x4_ty.as_basic_type_enum()) + { + Some(self.intrinsics.add_f32x4) + } else if value + .get_type() + .eq(&self.intrinsics.f64x2_ty.as_basic_type_enum()) + { + Some(self.intrinsics.add_f64x2) + } else { + None + }; + + match intrinsic { + Some(intrinsic) => self + .builder + .build_call( + intrinsic, + &[ + value.into(), + value.get_type().const_zero().into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(), + None => value, + } + } + + // If this memory access must trap when out of bounds (i.e. it is a memory + // access written in the user program as opposed to one used by our VM) + // then mark that it can't be delete. + fn mark_memaccess_nodelete( + &mut self, + memory_index: MemoryIndex, + memaccess: InstructionValue<'ctx>, + ) -> Result<(), CompileError> { + if let MemoryCache::Static { base_ptr: _ } = self.ctx.memory( + memory_index, + self.intrinsics, + self.module, + self.memory_styles, + ) { + // The best we've got is `volatile`. + // TODO: convert unwrap fail to CompileError + memaccess.set_volatile(true).unwrap(); + } + Ok(()) + } + + fn annotate_user_memaccess( + &mut self, + memory_index: MemoryIndex, + _memarg: &MemoryImmediate, + alignment: u32, + memaccess: InstructionValue<'ctx>, + ) -> Result<(), CompileError> { + match memaccess.get_opcode() { + InstructionOpcode::Load | InstructionOpcode::Store => { + memaccess.set_alignment(alignment).unwrap(); + } + _ => {} + }; + self.mark_memaccess_nodelete(memory_index, memaccess)?; + tbaa_label( + &self.module, + self.intrinsics, + format!("memory {}", memory_index.as_u32()), + memaccess, + ); + Ok(()) + } + + fn resolve_memory_ptr( + &mut self, + memory_index: MemoryIndex, + memarg: &MemoryImmediate, + ptr_ty: PointerType<'ctx>, + var_offset: IntValue<'ctx>, + value_size: usize, + ) -> Result, CompileError> { + let builder = &self.builder; + let intrinsics = &self.intrinsics; + let context = &self.context; + let function = &self.function; + + // Compute the offset into the storage. + let imm_offset = intrinsics.i64_ty.const_int(memarg.offset as u64, false); + let var_offset = builder.build_int_z_extend(var_offset, intrinsics.i64_ty, ""); + let offset = builder.build_int_add(var_offset, imm_offset, ""); + + // Look up the memory base (as pointer) and bounds (as unsigned integer). + let base_ptr = + match self + .ctx + .memory(memory_index, intrinsics, self.module, self.memory_styles) + { + MemoryCache::Dynamic { + ptr_to_base_ptr, + ptr_to_current_length, + } => { + // Bounds check it. + let minimum = self.wasm_module.memories[memory_index].minimum; + let value_size_v = intrinsics.i64_ty.const_int(value_size as u64, false); + let ptr_in_bounds = if offset.is_const() { + // When the offset is constant, if it's below the minimum + // memory size, we've statically shown that it's safe. + let load_offset_end = offset.const_add(value_size_v); + let ptr_in_bounds = load_offset_end.const_int_compare( + IntPredicate::ULE, + intrinsics.i64_ty.const_int(minimum.bytes().0 as u64, false), + ); + if ptr_in_bounds.get_zero_extended_constant() == Some(1) { + Some(ptr_in_bounds) + } else { + None + } + } else { + None + } + .unwrap_or_else(|| { + let load_offset_end = builder.build_int_add(offset, value_size_v, ""); + + let current_length = builder + .build_load(ptr_to_current_length, "") + .into_int_value(); + tbaa_label( + self.module, + self.intrinsics, + format!("memory {} length", memory_index.as_u32()), + current_length.as_instruction_value().unwrap(), + ); + let current_length = + builder.build_int_z_extend(current_length, intrinsics.i64_ty, ""); + + builder.build_int_compare( + IntPredicate::ULE, + load_offset_end, + current_length, + "", + ) + }); + if !ptr_in_bounds.is_constant_int() + || ptr_in_bounds.get_zero_extended_constant().unwrap() != 1 + { + // LLVM may have folded this into 'i1 true' in which case we know + // the pointer is in bounds. LLVM may also have folded it into a + // constant expression, not known to be either true or false yet. + // If it's false, unknown-but-constant, or not-a-constant, emit a + // runtime bounds check. LLVM may yet succeed at optimizing it away. + let ptr_in_bounds = builder + .build_call( + intrinsics.expect_i1, + &[ + ptr_in_bounds.into(), + intrinsics.i1_ty.const_int(1, true).into(), + ], + "ptr_in_bounds_expect", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let in_bounds_continue_block = + context.append_basic_block(*function, "in_bounds_continue_block"); + let not_in_bounds_block = + context.append_basic_block(*function, "not_in_bounds_block"); + builder.build_conditional_branch( + ptr_in_bounds, + in_bounds_continue_block, + not_in_bounds_block, + ); + builder.position_at_end(not_in_bounds_block); + builder.build_call( + intrinsics.throw_trap, + &[intrinsics.trap_memory_oob.into()], + "throw", + ); + builder.build_unreachable(); + builder.position_at_end(in_bounds_continue_block); + } + let ptr_to_base = builder.build_load(ptr_to_base_ptr, "").into_pointer_value(); + tbaa_label( + self.module, + self.intrinsics, + format!("memory base_ptr {}", memory_index.as_u32()), + ptr_to_base.as_instruction_value().unwrap(), + ); + ptr_to_base + } + MemoryCache::Static { base_ptr } => base_ptr, + }; + let value_ptr = unsafe { builder.build_gep(base_ptr, &[offset], "") }; + Ok(builder + .build_bitcast(value_ptr, ptr_ty, "") + .into_pointer_value()) + } + + fn trap_if_misaligned(&self, memarg: &MemoryImmediate, ptr: PointerValue<'ctx>) { + let align = memarg.align; + let value = self + .builder + .build_ptr_to_int(ptr, self.intrinsics.i64_ty, ""); + let and = self.builder.build_and( + value, + self.intrinsics.i64_ty.const_int((align - 1).into(), false), + "misaligncheck", + ); + let aligned = + self.builder + .build_int_compare(IntPredicate::EQ, and, self.intrinsics.i64_zero, ""); + let aligned = self + .builder + .build_call( + self.intrinsics.expect_i1, + &[ + aligned.into(), + self.intrinsics.i1_ty.const_int(1, false).into(), + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let continue_block = self + .context + .append_basic_block(self.function, "aligned_access_continue_block"); + let not_aligned_block = self + .context + .append_basic_block(self.function, "misaligned_trap_block"); + self.builder + .build_conditional_branch(aligned, continue_block, not_aligned_block); + + self.builder.position_at_end(not_aligned_block); + self.builder.build_call( + self.intrinsics.throw_trap, + &[self.intrinsics.trap_unaligned_atomic.into()], + "throw", + ); + self.builder.build_unreachable(); + + self.builder.position_at_end(continue_block); + } + + fn finalize(&mut self, wasm_fn_type: &FunctionType) -> Result<(), CompileError> { + let func_type = self.function.get_type(); + + let results = self.state.popn_save_extra(wasm_fn_type.results().len())?; + let results = results + .into_iter() + .map(|(v, i)| self.apply_pending_canonicalization(v, i)); + if wasm_fn_type.results().is_empty() { + self.builder.build_return(None); + } else if self.abi.is_sret(wasm_fn_type)? { + let sret = self + .function + .get_first_param() + .unwrap() + .into_pointer_value(); + let mut struct_value = sret + .get_type() + .get_element_type() + .into_struct_type() + .get_undef(); + for (idx, value) in results.enumerate() { + let value = self.builder.build_bitcast( + value, + type_to_llvm(&self.intrinsics, wasm_fn_type.results()[idx])?, + "", + ); + struct_value = self + .builder + .build_insert_value(struct_value, value, idx as u32, "") + .unwrap() + .into_struct_value(); + } + self.builder.build_store(sret, struct_value); + self.builder.build_return(None); + } else { + self.builder + .build_return(Some(&self.abi.pack_values_for_register_return( + &self.intrinsics, + &self.builder, + &results.collect::>(), + &func_type, + )?)); + } + Ok(()) + } +} + +/* +fn emit_stack_map<'ctx>( + intrinsics: &Intrinsics<'ctx>, + builder: &Builder<'ctx>, + local_function_id: usize, + target: &mut StackmapRegistry, + kind: StackmapEntryKind, + locals: &[PointerValue], + state: &State<'ctx>, + _ctx: &mut CtxType<'ctx>, + opcode_offset: usize, +) { + let stackmap_id = target.entries.len(); + + let mut params = Vec::with_capacity(2 + locals.len() + state.stack.len()); + + params.push( + intrinsics + .i64_ty + .const_int(stackmap_id as u64, false) + .as_basic_value_enum(), + ); + params.push(intrinsics.i32_ty.const_zero().as_basic_value_enum()); + + let locals: Vec<_> = locals.iter().map(|x| x.as_basic_value_enum()).collect(); + let mut value_semantics: Vec = + Vec::with_capacity(locals.len() + state.stack.len()); + + params.extend_from_slice(&locals); + value_semantics.extend((0..locals.len()).map(ValueSemantic::WasmLocal)); + + params.extend(state.stack.iter().map(|x| x.0)); + value_semantics.extend((0..state.stack.len()).map(ValueSemantic::WasmStack)); + + // FIXME: Information needed for Abstract -> Runtime state transform is not fully preserved + // to accelerate compilation and reduce memory usage. Check this again when we try to support + // "full" LLVM OSR. + + assert_eq!(params.len(), value_semantics.len() + 2); + + builder.build_call(intrinsics.experimental_stackmap, ¶ms, ""); + + target.entries.push(StackmapEntry { + kind, + local_function_id, + local_count: locals.len(), + stack_count: state.stack.len(), + opcode_offset, + value_semantics, + is_start: true, + }); +} + +fn finalize_opcode_stack_map<'ctx>( + intrinsics: &Intrinsics<'ctx>, + builder: &Builder<'ctx>, + local_function_id: usize, + target: &mut StackmapRegistry, + kind: StackmapEntryKind, + opcode_offset: usize, +) { + let stackmap_id = target.entries.len(); + builder.build_call( + intrinsics.experimental_stackmap, + &[ + intrinsics + .i64_ty + .const_int(stackmap_id as u64, false) + .as_basic_value_enum(), + intrinsics.i32_ty.const_zero().as_basic_value_enum(), + ], + "opcode_stack_map_end", + ); + target.entries.push(StackmapEntry { + kind, + local_function_id, + local_count: 0, + stack_count: 0, + opcode_offset, + value_semantics: vec![], + is_start: false, + }); +} + */ + +pub struct LLVMFunctionCodeGenerator<'ctx, 'a> { + context: &'ctx Context, + builder: Builder<'ctx>, + alloca_builder: Builder<'ctx>, + intrinsics: &'a Intrinsics<'ctx>, + state: State<'ctx>, + function: FunctionValue<'ctx>, + locals: Vec>, // Contains params and locals + ctx: CtxType<'ctx, 'a>, + unreachable_depth: usize, + memory_styles: &'a PrimaryMap, + _table_styles: &'a PrimaryMap, + + // This is support for stackmaps: + /* + stackmaps: Rc>, + index: usize, + opcode_offset: usize, + track_state: bool, + */ + module: &'a Module<'ctx>, + module_translation: &'a ModuleTranslationState, + wasm_module: &'a ModuleInfo, + symbol_registry: &'a dyn SymbolRegistry, + abi: &'a dyn Abi, + config: &'a LLVM, +} + +impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> { + fn translate_operator(&mut self, op: Operator, _source_loc: u32) -> Result<(), CompileError> { + // TODO: remove this vmctx by moving everything into CtxType. Values + // computed off vmctx usually benefit from caching. + let vmctx = &self.ctx.basic().into_pointer_value(); + + //let opcode_offset: Option = None; + + if !self.state.reachable { + match op { + Operator::Block { ty: _ } | Operator::Loop { ty: _ } | Operator::If { ty: _ } => { + self.unreachable_depth += 1; + return Ok(()); + } + Operator::Else => { + if self.unreachable_depth != 0 { + return Ok(()); + } + } + Operator::End => { + if self.unreachable_depth != 0 { + self.unreachable_depth -= 1; + return Ok(()); + } + } + _ => { + return Ok(()); + } + } + } + + match op { + /*************************** + * Control Flow instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#control-flow-instructions + ***************************/ + Operator::Block { ty } => { + let current_block = self + .builder + .get_insert_block() + .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?; + + let end_block = self.context.append_basic_block(self.function, "end"); + self.builder.position_at_end(end_block); + + let phis: SmallVec<[PhiValue<'ctx>; 1]> = self + .module_translation + .blocktype_params_results(ty)? + .1 + .iter() + .map(|&wp_ty| { + wptype_to_type(wp_ty) + .map_err(to_compile_error) + .and_then(|wasm_ty| { + type_to_llvm(self.intrinsics, wasm_ty) + .map(|ty| self.builder.build_phi(ty, "")) + }) + }) + .collect::>()?; + + self.state.push_block(end_block, phis); + self.builder.position_at_end(current_block); + } + Operator::Loop { ty } => { + let loop_body = self.context.append_basic_block(self.function, "loop_body"); + let loop_next = self.context.append_basic_block(self.function, "loop_outer"); + let pre_loop_block = self.builder.get_insert_block().unwrap(); + + self.builder.build_unconditional_branch(loop_body); + + self.builder.position_at_end(loop_next); + let blocktypes = self.module_translation.blocktype_params_results(ty)?; + let phis = blocktypes + .1 + .iter() + .map(|&wp_ty| { + wptype_to_type(wp_ty) + .map_err(to_compile_error) + .and_then(|wasm_ty| { + type_to_llvm(self.intrinsics, wasm_ty) + .map(|ty| self.builder.build_phi(ty, "")) + }) + }) + .collect::>()?; + self.builder.position_at_end(loop_body); + let loop_phis: SmallVec<[PhiValue<'ctx>; 1]> = blocktypes + .0 + .iter() + .map(|&wp_ty| { + wptype_to_type(wp_ty) + .map_err(to_compile_error) + .and_then(|wasm_ty| { + type_to_llvm(self.intrinsics, wasm_ty) + .map(|ty| self.builder.build_phi(ty, "")) + }) + }) + .collect::>()?; + for phi in loop_phis.iter().rev() { + let (value, info) = self.state.pop1_extra()?; + let value = self.apply_pending_canonicalization(value, info); + phi.add_incoming(&[(&value, pre_loop_block)]); + } + for phi in &loop_phis { + self.state.push1(phi.as_basic_value()); + } + + /* + if self.track_state { + if let Some(offset) = opcode_offset { + let mut stackmaps = self.stackmaps.borrow_mut(); + emit_stack_map( + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Loop, + &self.self.locals, + state, + ctx, + offset, + ); + let signal_mem = ctx.signal_mem(); + let iv = self.builder + .build_store(signal_mem, self.context.i8_type().const_zero()); + // Any 'store' can be made volatile. + iv.set_volatile(true).unwrap(); + finalize_opcode_stack_map( + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Loop, + offset, + ); + } + } + */ + + self.state.push_loop(loop_body, loop_next, loop_phis, phis); + } + Operator::Br { relative_depth } => { + let frame = self.state.frame_at_depth(relative_depth)?; + + let current_block = self + .builder + .get_insert_block() + .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?; + + let phis = if frame.is_loop() { + frame.loop_body_phis() + } else { + frame.phis() + }; + + let len = phis.len(); + let values = self.state.peekn_extra(len)?; + let values = values + .iter() + .map(|(v, info)| self.apply_pending_canonicalization(*v, *info)); + + // For each result of the block we're branching to, + // pop a value off the value stack and load it into + // the corresponding phi. + for (phi, value) in phis.iter().zip(values) { + phi.add_incoming(&[(&value, current_block)]); + } + + self.builder.build_unconditional_branch(*frame.br_dest()); + + self.state.popn(len)?; + self.state.reachable = false; + } + Operator::BrIf { relative_depth } => { + let cond = self.state.pop1()?; + let frame = self.state.frame_at_depth(relative_depth)?; + + let current_block = self + .builder + .get_insert_block() + .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?; + + let phis = if frame.is_loop() { + frame.loop_body_phis() + } else { + frame.phis() + }; + + let param_stack = self.state.peekn_extra(phis.len())?; + let param_stack = param_stack + .iter() + .map(|(v, info)| self.apply_pending_canonicalization(*v, *info)); + + for (phi, value) in phis.iter().zip(param_stack) { + phi.add_incoming(&[(&value, current_block)]); + } + + let else_block = self.context.append_basic_block(self.function, "else"); + + let cond_value = self.builder.build_int_compare( + IntPredicate::NE, + cond.into_int_value(), + self.intrinsics.i32_zero, + "", + ); + self.builder + .build_conditional_branch(cond_value, *frame.br_dest(), else_block); + self.builder.position_at_end(else_block); + } + Operator::BrTable { ref table } => { + let current_block = self + .builder + .get_insert_block() + .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?; + + let mut label_depths = table.targets().collect::, _>>()?; + let default_depth = label_depths.pop().unwrap().0; + + let index = self.state.pop1()?; + + let default_frame = self.state.frame_at_depth(default_depth)?; + + let phis = if default_frame.is_loop() { + default_frame.loop_body_phis() + } else { + default_frame.phis() + }; + let args = self.state.peekn(phis.len())?; + + for (phi, value) in phis.iter().zip(args.iter()) { + phi.add_incoming(&[(value, current_block)]); + } + + let cases: Vec<_> = label_depths + .iter() + .enumerate() + .map(|(case_index, &(depth, _))| { + let frame_result: Result<&ControlFrame, CompileError> = + self.state.frame_at_depth(depth); + let frame = match frame_result { + Ok(v) => v, + Err(e) => return Err(e), + }; + let case_index_literal = + self.context.i32_type().const_int(case_index as u64, false); + let phis = if frame.is_loop() { + frame.loop_body_phis() + } else { + frame.phis() + }; + for (phi, value) in phis.iter().zip(args.iter()) { + phi.add_incoming(&[(value, current_block)]); + } + + Ok((case_index_literal, *frame.br_dest())) + }) + .collect::>()?; + + self.builder.build_switch( + index.into_int_value(), + *default_frame.br_dest(), + &cases[..], + ); + + let args_len = args.len(); + self.state.popn(args_len)?; + self.state.reachable = false; + } + Operator::If { ty } => { + let current_block = self + .builder + .get_insert_block() + .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?; + let if_then_block = self.context.append_basic_block(self.function, "if_then"); + let if_else_block = self.context.append_basic_block(self.function, "if_else"); + let end_block = self.context.append_basic_block(self.function, "if_end"); + + let end_phis = { + self.builder.position_at_end(end_block); + + let phis = self + .module_translation + .blocktype_params_results(ty)? + .1 + .iter() + .map(|&wp_ty| { + wptype_to_type(wp_ty) + .map_err(to_compile_error) + .and_then(|wasm_ty| { + type_to_llvm(self.intrinsics, wasm_ty) + .map(|ty| self.builder.build_phi(ty, "")) + }) + }) + .collect::>()?; + + self.builder.position_at_end(current_block); + phis + }; + + let cond = self.state.pop1()?; + + let cond_value = self.builder.build_int_compare( + IntPredicate::NE, + cond.into_int_value(), + self.intrinsics.i32_zero, + "", + ); + + self.builder + .build_conditional_branch(cond_value, if_then_block, if_else_block); + self.builder.position_at_end(if_else_block); + let block_param_types = self + .module_translation + .blocktype_params_results(ty)? + .0 + .iter() + .map(|&wp_ty| { + wptype_to_type(wp_ty) + .map_err(to_compile_error) + .and_then(|wasm_ty| type_to_llvm(self.intrinsics, wasm_ty)) + }) + .collect::, _>>()?; + let else_phis: SmallVec<[PhiValue<'ctx>; 1]> = block_param_types + .iter() + .map(|&ty| self.builder.build_phi(ty, "")) + .collect(); + self.builder.position_at_end(if_then_block); + let then_phis: SmallVec<[PhiValue<'ctx>; 1]> = block_param_types + .iter() + .map(|&ty| self.builder.build_phi(ty, "")) + .collect(); + for (else_phi, then_phi) in else_phis.iter().rev().zip(then_phis.iter().rev()) { + let (value, info) = self.state.pop1_extra()?; + let value = self.apply_pending_canonicalization(value, info); + else_phi.add_incoming(&[(&value, current_block)]); + then_phi.add_incoming(&[(&value, current_block)]); + } + for phi in then_phis.iter() { + self.state.push1(phi.as_basic_value()); + } + + self.state.push_if( + if_then_block, + if_else_block, + end_block, + then_phis, + else_phis, + end_phis, + ); + } + Operator::Else => { + if self.state.reachable { + let frame = self.state.frame_at_depth(0)?; + let current_block = self.builder.get_insert_block().ok_or_else(|| { + CompileError::Codegen("not currently in a block".to_string()) + })?; + + for phi in frame.phis().to_vec().iter().rev() { + let (value, info) = self.state.pop1_extra()?; + let value = self.apply_pending_canonicalization(value, info); + phi.add_incoming(&[(&value, current_block)]) + } + + let frame = self.state.frame_at_depth(0)?; + self.builder.build_unconditional_branch(*frame.code_after()); + } + + let (if_else_block, if_else_state) = if let ControlFrame::IfElse { + if_else, + if_else_state, + .. + } = self.state.frame_at_depth_mut(0)? + { + (if_else, if_else_state) + } else { + unreachable!() + }; + + *if_else_state = IfElseState::Else; + + self.builder.position_at_end(*if_else_block); + self.state.reachable = true; + + if let ControlFrame::IfElse { else_phis, .. } = self.state.frame_at_depth(0)? { + // Push our own 'else' phi nodes to the stack. + for phi in else_phis.clone().iter() { + self.state.push1(phi.as_basic_value()); + } + }; + } + + Operator::End => { + let frame = self.state.pop_frame()?; + let current_block = self + .builder + .get_insert_block() + .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?; + + if self.state.reachable { + for phi in frame.phis().iter().rev() { + let (value, info) = self.state.pop1_extra()?; + let value = self.apply_pending_canonicalization(value, info); + phi.add_incoming(&[(&value, current_block)]); + } + + self.builder.build_unconditional_branch(*frame.code_after()); + } + + if let ControlFrame::IfElse { + if_else, + next, + if_else_state, + else_phis, + .. + } = &frame + { + if let IfElseState::If = if_else_state { + for (phi, else_phi) in frame.phis().iter().zip(else_phis.iter()) { + phi.add_incoming(&[(&else_phi.as_basic_value(), *if_else)]); + } + self.builder.position_at_end(*if_else); + self.builder.build_unconditional_branch(*next); + } + } + + self.builder.position_at_end(*frame.code_after()); + self.state.reset_stack(&frame); + + self.state.reachable = true; + + // Push each phi value to the value stack. + for phi in frame.phis() { + if phi.count_incoming() != 0 { + self.state.push1(phi.as_basic_value()); + } else { + let basic_ty = phi.as_basic_value().get_type(); + let placeholder_value = basic_ty.const_zero(); + self.state.push1(placeholder_value); + phi.as_instruction().erase_from_basic_block(); + } + } + } + Operator::Return => { + let current_block = self + .builder + .get_insert_block() + .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?; + + let frame = self.state.outermost_frame()?; + for phi in frame.phis().to_vec().iter().rev() { + let (arg, info) = self.state.pop1_extra()?; + let arg = self.apply_pending_canonicalization(arg, info); + phi.add_incoming(&[(&arg, current_block)]); + } + let frame = self.state.outermost_frame()?; + self.builder.build_unconditional_branch(*frame.br_dest()); + + self.state.reachable = false; + } + + Operator::Unreachable => { + // Emit an unreachable instruction. + // If llvm cannot prove that this is never reached, + // it will emit a `ud2` instruction on x86_64 arches. + + // Comment out this `if` block to allow spectests to pass. + // TODO: fix this + /* + if let Some(offset) = opcode_offset { + if self.track_state { + let mut stackmaps = self.stackmaps.borrow_mut(); + emit_stack_map( + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Trappable, + &self.self.locals, + state, + ctx, + offset, + ); + self.builder.build_call(self.intrinsics.trap, &[], "trap"); + finalize_opcode_stack_map( + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Trappable, + offset, + ); + } + } + */ + + self.builder.build_call( + self.intrinsics.throw_trap, + &[self.intrinsics.trap_unreachable.into()], + "throw", + ); + self.builder.build_unreachable(); + + self.state.reachable = false; + } + + /*************************** + * Basic instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#basic-instructions + ***************************/ + Operator::Nop => { + // Do nothing. + } + Operator::Drop => { + self.state.pop1()?; + } + + // Generate const values. + Operator::I32Const { value } => { + let i = self.intrinsics.i32_ty.const_int(value as u64, false); + let info = if is_f32_arithmetic(value as u32) { + ExtraInfo::arithmetic_f32() + } else { + Default::default() + }; + self.state.push1_extra(i, info); + } + Operator::I64Const { value } => { + let i = self.intrinsics.i64_ty.const_int(value as u64, false); + let info = if is_f64_arithmetic(value as u64) { + ExtraInfo::arithmetic_f64() + } else { + Default::default() + }; + self.state.push1_extra(i, info); + } + Operator::F32Const { value } => { + let bits = self.intrinsics.i32_ty.const_int(value.bits() as u64, false); + let info = if is_f32_arithmetic(value.bits()) { + ExtraInfo::arithmetic_f32() + } else { + Default::default() + }; + let f = self + .builder + .build_bitcast(bits, self.intrinsics.f32_ty, "f"); + self.state.push1_extra(f, info); + } + Operator::F64Const { value } => { + let bits = self.intrinsics.i64_ty.const_int(value.bits(), false); + let info = if is_f64_arithmetic(value.bits()) { + ExtraInfo::arithmetic_f64() + } else { + Default::default() + }; + let f = self + .builder + .build_bitcast(bits, self.intrinsics.f64_ty, "f"); + self.state.push1_extra(f, info); + } + Operator::V128Const { value } => { + let mut hi: [u8; 8] = Default::default(); + let mut lo: [u8; 8] = Default::default(); + hi.copy_from_slice(&value.bytes()[0..8]); + lo.copy_from_slice(&value.bytes()[8..16]); + let packed = [u64::from_le_bytes(hi), u64::from_le_bytes(lo)]; + let i = self + .intrinsics + .i128_ty + .const_int_arbitrary_precision(&packed); + let mut quad1: [u8; 4] = Default::default(); + let mut quad2: [u8; 4] = Default::default(); + let mut quad3: [u8; 4] = Default::default(); + let mut quad4: [u8; 4] = Default::default(); + quad1.copy_from_slice(&value.bytes()[0..4]); + quad2.copy_from_slice(&value.bytes()[4..8]); + quad3.copy_from_slice(&value.bytes()[8..12]); + quad4.copy_from_slice(&value.bytes()[12..16]); + let mut info: ExtraInfo = Default::default(); + if is_f32_arithmetic(u32::from_le_bytes(quad1)) + && is_f32_arithmetic(u32::from_le_bytes(quad2)) + && is_f32_arithmetic(u32::from_le_bytes(quad3)) + && is_f32_arithmetic(u32::from_le_bytes(quad4)) + { + info |= ExtraInfo::arithmetic_f32(); + } + if is_f64_arithmetic(packed[0]) && is_f64_arithmetic(packed[1]) { + info |= ExtraInfo::arithmetic_f64(); + } + self.state.push1_extra(i, info); + } + + Operator::I8x16Splat => { + let (v, i) = self.state.pop1_extra()?; + let v = v.into_int_value(); + let v = self + .builder + .build_int_truncate(v, self.intrinsics.i8_ty, ""); + let res = self.splat_vector(v.as_basic_value_enum(), self.intrinsics.i8x16_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, i); + } + Operator::I16x8Splat => { + let (v, i) = self.state.pop1_extra()?; + let v = v.into_int_value(); + let v = self + .builder + .build_int_truncate(v, self.intrinsics.i16_ty, ""); + let res = self.splat_vector(v.as_basic_value_enum(), self.intrinsics.i16x8_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, i); + } + Operator::I32x4Splat => { + let (v, i) = self.state.pop1_extra()?; + let res = self.splat_vector(v, self.intrinsics.i32x4_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, i); + } + Operator::I64x2Splat => { + let (v, i) = self.state.pop1_extra()?; + let res = self.splat_vector(v, self.intrinsics.i64x2_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, i); + } + Operator::F32x4Splat => { + let (v, i) = self.state.pop1_extra()?; + let res = self.splat_vector(v, self.intrinsics.f32x4_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // The spec is unclear, we interpret splat as preserving NaN + // payload bits. + self.state.push1_extra(res, i); + } + Operator::F64x2Splat => { + let (v, i) = self.state.pop1_extra()?; + let res = self.splat_vector(v, self.intrinsics.f64x2_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // The spec is unclear, we interpret splat as preserving NaN + // payload bits. + self.state.push1_extra(res, i); + } + + // Operate on self.locals. + Operator::LocalGet { local_index } => { + let pointer_value = self.locals[local_index as usize]; + let v = self.builder.build_load(pointer_value, ""); + tbaa_label( + &self.module, + self.intrinsics, + format!("local {}", local_index), + v.as_instruction_value().unwrap(), + ); + self.state.push1(v); + } + Operator::LocalSet { local_index } => { + let pointer_value = self.locals[local_index as usize]; + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let store = self.builder.build_store(pointer_value, v); + tbaa_label( + &self.module, + self.intrinsics, + format!("local {}", local_index), + store, + ); + } + Operator::LocalTee { local_index } => { + let pointer_value = self.locals[local_index as usize]; + let (v, i) = self.state.peek1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let store = self.builder.build_store(pointer_value, v); + tbaa_label( + &self.module, + self.intrinsics, + format!("local {}", local_index), + store, + ); + } + + Operator::GlobalGet { global_index } => { + let global_index = GlobalIndex::from_u32(global_index); + match self + .ctx + .global(global_index, self.intrinsics, self.module)? + { + GlobalCache::Const { value } => { + self.state.push1(*value); + } + GlobalCache::Mut { ptr_to_value } => { + let value = self.builder.build_load(*ptr_to_value, ""); + tbaa_label( + self.module, + self.intrinsics, + format!("global {}", global_index.as_u32()), + value.as_instruction_value().unwrap(), + ); + self.state.push1(value); + } + } + } + Operator::GlobalSet { global_index } => { + let global_index = GlobalIndex::from_u32(global_index); + match self + .ctx + .global(global_index, self.intrinsics, self.module)? + { + GlobalCache::Const { value: _ } => { + return Err(CompileError::Codegen(format!( + "global.set on immutable global index {}", + global_index.as_u32() + ))) + } + GlobalCache::Mut { ptr_to_value } => { + let ptr_to_value = *ptr_to_value; + let (value, info) = self.state.pop1_extra()?; + let value = self.apply_pending_canonicalization(value, info); + let store = self.builder.build_store(ptr_to_value, value); + tbaa_label( + self.module, + self.intrinsics, + format!("global {}", global_index.as_u32()), + store, + ); + } + } + } + + // `TypedSelect` must be used for extern refs so ref counting should + // be done with TypedSelect. But otherwise they're the same. + Operator::TypedSelect { .. } | Operator::Select => { + let ((v1, i1), (v2, i2), (cond, _)) = self.state.pop3_extra()?; + // We don't bother canonicalizing 'cond' here because we only + // compare it to zero, and that's invariant under + // canonicalization. + + // If the pending bits of v1 and v2 are the same, we can pass + // them along to the result. Otherwise, apply pending + // canonicalizations now. + let (v1, i1, v2, i2) = if i1.has_pending_f32_nan() != i2.has_pending_f32_nan() + || i1.has_pending_f64_nan() != i2.has_pending_f64_nan() + { + ( + self.apply_pending_canonicalization(v1, i1), + i1.strip_pending(), + self.apply_pending_canonicalization(v2, i2), + i2.strip_pending(), + ) + } else { + (v1, i1, v2, i2) + }; + let cond_value = self.builder.build_int_compare( + IntPredicate::NE, + cond.into_int_value(), + self.intrinsics.i32_zero, + "", + ); + let res = self.builder.build_select(cond_value, v1, v2, ""); + let info = { + let mut info = i1.strip_pending() & i2.strip_pending(); + if i1.has_pending_f32_nan() { + debug_assert!(i2.has_pending_f32_nan()); + info |= ExtraInfo::pending_f32_nan(); + } + if i1.has_pending_f64_nan() { + debug_assert!(i2.has_pending_f64_nan()); + info |= ExtraInfo::pending_f64_nan(); + } + info + }; + self.state.push1_extra(res, info); + } + Operator::Call { function_index } => { + let func_index = FunctionIndex::from_u32(function_index); + let sigindex = &self.wasm_module.functions[func_index]; + let func_type = &self.wasm_module.signatures[*sigindex]; + + let FunctionCache { + func, + vmctx: callee_vmctx, + attrs, + } = if let Some(local_func_index) = self.wasm_module.local_func_index(func_index) { + let function_name = self + .symbol_registry + .symbol_to_name(Symbol::LocalFunction(local_func_index)); + self.ctx.local_func( + local_func_index, + func_index, + self.intrinsics, + self.module, + self.context, + func_type, + &function_name, + )? + } else { + self.ctx + .func(func_index, self.intrinsics, self.context, func_type)? + }; + let func = *func; + let callee_vmctx = *callee_vmctx; + let attrs = attrs.clone(); + + /* + let func_ptr = self.llvm.functions.borrow_mut()[&func_index]; + + (params, func_ptr.as_global_value().as_pointer_value()) + */ + let params = self.state.popn_save_extra(func_type.params().len())?; + + // Apply pending canonicalizations. + let params = + params + .iter() + .zip(func_type.params().iter()) + .map(|((v, info), wasm_ty)| match wasm_ty { + Type::F32 => self.builder.build_bitcast( + self.apply_pending_canonicalization(*v, *info), + self.intrinsics.f32_ty, + "", + ), + Type::F64 => self.builder.build_bitcast( + self.apply_pending_canonicalization(*v, *info), + self.intrinsics.f64_ty, + "", + ), + Type::V128 => self.apply_pending_canonicalization(*v, *info), + _ => *v, + }); + + let params = self.abi.args_to_call( + &self.alloca_builder, + func_type, + callee_vmctx.into_pointer_value(), + &func.get_type().get_element_type().into_function_type(), + params.collect::>().as_slice(), + ); + + /* + if self.track_state { + if let Some(offset) = opcode_offset { + let mut stackmaps = self.stackmaps.borrow_mut(); + emit_stack_map( + &info, + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Call, + &self.locals, + state, + ctx, + offset, + ) + } + } + */ + + let callable_func = inkwell::values::CallableValue::try_from(func).unwrap(); + let call_site = self.builder.build_call( + callable_func, + params + .iter() + .copied() + .map(Into::into) + .collect::>() + .as_slice(), + "", + ); + for (attr, attr_loc) in attrs { + call_site.add_attribute(attr_loc, attr); + } + /* + if self.track_state { + if let Some(offset) = opcode_offset { + let mut stackmaps = self.stackmaps.borrow_mut(); + finalize_opcode_stack_map( + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Call, + offset, + ) + } + } + */ + + self.abi + .rets_from_call(&self.builder, &self.intrinsics, call_site, func_type) + .iter() + .for_each(|ret| self.state.push1(*ret)); + } + Operator::CallIndirect { index, table_index } => { + let sigindex = SignatureIndex::from_u32(index); + let func_type = &self.wasm_module.signatures[sigindex]; + let expected_dynamic_sigindex = + self.ctx + .dynamic_sigindex(sigindex, self.intrinsics, self.module); + let (table_base, table_bound) = self.ctx.table( + TableIndex::from_u32(table_index), + self.intrinsics, + self.module, + ); + let func_index = self.state.pop1()?.into_int_value(); + + let truncated_table_bounds = self.builder.build_int_truncate( + table_bound, + self.intrinsics.i32_ty, + "truncated_table_bounds", + ); + + // First, check if the index is outside of the table bounds. + let index_in_bounds = self.builder.build_int_compare( + IntPredicate::ULT, + func_index, + truncated_table_bounds, + "index_in_bounds", + ); + + let index_in_bounds = self + .builder + .build_call( + self.intrinsics.expect_i1, + &[ + index_in_bounds.into(), + self.intrinsics.i1_ty.const_int(1, false).into(), + ], + "index_in_bounds_expect", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let in_bounds_continue_block = self + .context + .append_basic_block(self.function, "in_bounds_continue_block"); + let not_in_bounds_block = self + .context + .append_basic_block(self.function, "not_in_bounds_block"); + self.builder.build_conditional_branch( + index_in_bounds, + in_bounds_continue_block, + not_in_bounds_block, + ); + self.builder.position_at_end(not_in_bounds_block); + self.builder.build_call( + self.intrinsics.throw_trap, + &[self.intrinsics.trap_table_access_oob.into()], + "throw", + ); + self.builder.build_unreachable(); + self.builder.position_at_end(in_bounds_continue_block); + + // We assume the table has the `funcref` (pointer to `anyfunc`) + // element type. + let casted_table_base = self.builder.build_pointer_cast( + table_base, + self.intrinsics.funcref_ty.ptr_type(AddressSpace::Generic), + "casted_table_base", + ); + + let funcref_ptr = unsafe { + self.builder.build_in_bounds_gep( + casted_table_base, + &[func_index], + "funcref_ptr", + ) + }; + + // a funcref (pointer to `anyfunc`) + let anyfunc_struct_ptr = self + .builder + .build_load(funcref_ptr, "anyfunc_struct_ptr") + .into_pointer_value(); + + // trap if we're trying to call a null funcref + { + let funcref_not_null = self + .builder + .build_is_not_null(anyfunc_struct_ptr, "null funcref check"); + + let funcref_continue_deref_block = self + .context + .append_basic_block(self.function, "funcref_continue deref_block"); + + let funcref_is_null_block = self + .context + .append_basic_block(self.function, "funcref_is_null_block"); + self.builder.build_conditional_branch( + funcref_not_null, + funcref_continue_deref_block, + funcref_is_null_block, + ); + self.builder.position_at_end(funcref_is_null_block); + self.builder.build_call( + self.intrinsics.throw_trap, + &[self.intrinsics.trap_call_indirect_null.into()], + "throw", + ); + self.builder.build_unreachable(); + self.builder.position_at_end(funcref_continue_deref_block); + } + + // Load things from the anyfunc data structure. + let (func_ptr, found_dynamic_sigindex, ctx_ptr) = ( + self.builder + .build_load( + self.builder + .build_struct_gep(anyfunc_struct_ptr, 0, "func_ptr_ptr") + .unwrap(), + "func_ptr", + ) + .into_pointer_value(), + self.builder + .build_load( + self.builder + .build_struct_gep(anyfunc_struct_ptr, 1, "sigindex_ptr") + .unwrap(), + "sigindex", + ) + .into_int_value(), + self.builder.build_load( + self.builder + .build_struct_gep(anyfunc_struct_ptr, 2, "ctx_ptr_ptr") + .unwrap(), + "ctx_ptr", + ), + ); + + // Next, check if the table element is initialized. + + // TODO: we may not need this check anymore + let elem_initialized = self.builder.build_is_not_null(func_ptr, ""); + + // Next, check if the signature id is correct. + + let sigindices_equal = self.builder.build_int_compare( + IntPredicate::EQ, + expected_dynamic_sigindex, + found_dynamic_sigindex, + "sigindices_equal", + ); + + let initialized_and_sigindices_match = + self.builder + .build_and(elem_initialized, sigindices_equal, ""); + + // Tell llvm that `expected_dynamic_sigindex` should equal `found_dynamic_sigindex`. + let initialized_and_sigindices_match = self + .builder + .build_call( + self.intrinsics.expect_i1, + &[ + initialized_and_sigindices_match.into(), + self.intrinsics.i1_ty.const_int(1, false).into(), + ], + "initialized_and_sigindices_match_expect", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let continue_block = self + .context + .append_basic_block(self.function, "continue_block"); + let sigindices_notequal_block = self + .context + .append_basic_block(self.function, "sigindices_notequal_block"); + self.builder.build_conditional_branch( + initialized_and_sigindices_match, + continue_block, + sigindices_notequal_block, + ); + + self.builder.position_at_end(sigindices_notequal_block); + let trap_code = self.builder.build_select( + elem_initialized, + self.intrinsics.trap_call_indirect_sig, + self.intrinsics.trap_call_indirect_null, + "", + ); + self.builder + .build_call(self.intrinsics.throw_trap, &[trap_code.into()], "throw"); + self.builder.build_unreachable(); + self.builder.position_at_end(continue_block); + + let (llvm_func_type, llvm_func_attrs) = self.abi.func_type_to_llvm( + &self.context, + &self.intrinsics, + Some(self.ctx.get_offsets()), + func_type, + )?; + + let params = self.state.popn_save_extra(func_type.params().len())?; + + // Apply pending canonicalizations. + let params = + params + .iter() + .zip(func_type.params().iter()) + .map(|((v, info), wasm_ty)| match wasm_ty { + Type::F32 => self.builder.build_bitcast( + self.apply_pending_canonicalization(*v, *info), + self.intrinsics.f32_ty, + "", + ), + Type::F64 => self.builder.build_bitcast( + self.apply_pending_canonicalization(*v, *info), + self.intrinsics.f64_ty, + "", + ), + Type::V128 => self.apply_pending_canonicalization(*v, *info), + _ => *v, + }); + + let params = self.abi.args_to_call( + &self.alloca_builder, + func_type, + ctx_ptr.into_pointer_value(), + &llvm_func_type, + params.collect::>().as_slice(), + ); + + let typed_func_ptr = self.builder.build_pointer_cast( + func_ptr, + llvm_func_type.ptr_type(AddressSpace::Generic), + "typed_func_ptr", + ); + + /* + if self.track_state { + if let Some(offset) = opcode_offset { + let mut stackmaps = self.stackmaps.borrow_mut(); + emit_stack_map( + &info, + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Call, + &self.locals, + state, + ctx, + offset, + ) + } + } + */ + let callable_func = + inkwell::values::CallableValue::try_from(typed_func_ptr).unwrap(); + let call_site = self.builder.build_call( + callable_func, + params + .iter() + .copied() + .map(Into::into) + .collect::>() + .as_slice(), + "indirect_call", + ); + for (attr, attr_loc) in llvm_func_attrs { + call_site.add_attribute(attr_loc, attr); + } + /* + if self.track_state { + if let Some(offset) = opcode_offset { + let mut stackmaps = self.stackmaps.borrow_mut(); + finalize_opcode_stack_map( + self.intrinsics, + self.builder, + self.index, + &mut *stackmaps, + StackmapEntryKind::Call, + offset, + ) + } + } + */ + + self.abi + .rets_from_call(&self.builder, &self.intrinsics, call_site, func_type) + .iter() + .for_each(|ret| self.state.push1(*ret)); + } + + /*************************** + * Integer Arithmetic instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#integer-arithmetic-instructions + ***************************/ + Operator::I32Add | Operator::I64Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let res = self.builder.build_int_add(v1, v2, ""); + self.state.push1(res); + } + Operator::I8x16Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self.builder.build_int_add(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self.builder.build_int_add(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ExtAddPairwiseI8x16S | Operator::I16x8ExtAddPairwiseI8x16U => { + let extend_op = match op { + Operator::I16x8ExtAddPairwiseI8x16S => { + |s: &Self, v| s.builder.build_int_s_extend(v, s.intrinsics.i16x8_ty, "") + } + Operator::I16x8ExtAddPairwiseI8x16U => { + |s: &Self, v| s.builder.build_int_z_extend(v, s.intrinsics.i16x8_ty, "") + } + _ => unreachable!("Unhandled internal variant"), + }; + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + + let left = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[8], + self.intrinsics.i32_consts[10], + self.intrinsics.i32_consts[12], + self.intrinsics.i32_consts[14], + ]), + "", + ); + let left = extend_op(&self, left); + let right = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[7], + self.intrinsics.i32_consts[9], + self.intrinsics.i32_consts[11], + self.intrinsics.i32_consts[13], + self.intrinsics.i32_consts[15], + ]), + "", + ); + let right = extend_op(&self, right); + + let res = self.builder.build_int_add(left, right, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self.builder.build_int_add(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ExtAddPairwiseI16x8S | Operator::I32x4ExtAddPairwiseI16x8U => { + let extend_op = match op { + Operator::I32x4ExtAddPairwiseI16x8S => { + |s: &Self, v| s.builder.build_int_s_extend(v, s.intrinsics.i32x4_ty, "") + } + Operator::I32x4ExtAddPairwiseI16x8U => { + |s: &Self, v| s.builder.build_int_z_extend(v, s.intrinsics.i32x4_ty, "") + } + _ => unreachable!("Unhandled internal variant"), + }; + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + + let left = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[6], + ]), + "", + ); + let left = extend_op(&self, left); + let right = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[7], + ]), + "", + ); + let right = extend_op(&self, right); + + let res = self.builder.build_int_add(left, right, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self.builder.build_int_add(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16AddSatS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.sadd_sat_i8x16, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8AddSatS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.sadd_sat_i16x8, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16AddSatU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.uadd_sat_i8x16, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8AddSatU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.uadd_sat_i16x8, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32Sub | Operator::I64Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let res = self.builder.build_int_sub(v1, v2, ""); + self.state.push1(res); + } + Operator::I8x16Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self.builder.build_int_sub(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self.builder.build_int_sub(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self.builder.build_int_sub(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self.builder.build_int_sub(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16SubSatS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.ssub_sat_i8x16, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8SubSatS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.ssub_sat_i16x8, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16SubSatU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.usub_sat_i8x16, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8SubSatU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_call(self.intrinsics.usub_sat_i16x8, &[v1.into(), v2.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32Mul | Operator::I64Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let res = self.builder.build_int_mul(v1, v2, ""); + self.state.push1(res); + } + Operator::I16x8Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self.builder.build_int_mul(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self.builder.build_int_mul(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self.builder.build_int_mul(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Q15MulrSatS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + + let max_value = self + .intrinsics + .i16_ty + .const_int(i16::max_value() as u64, false); + let max_values = VectorType::const_vector(&[max_value; 8]); + + let v1 = self + .builder + .build_int_s_extend(v1, self.intrinsics.i32x8_ty, ""); + let v2 = self + .builder + .build_int_s_extend(v2, self.intrinsics.i32x8_ty, ""); + let res = self.builder.build_int_mul(v1, v2, ""); + + // magic number specified by the spec + let bit = self.intrinsics.i32_ty.const_int(0x4000, false); + let bits = VectorType::const_vector(&[bit; 8]); + + let res = self.builder.build_int_add(res, bits, ""); + + let fifteen = self.intrinsics.i32_consts[15]; + let fifteens = VectorType::const_vector(&[fifteen; 8]); + + let res = self.builder.build_right_shift(res, fifteens, true, ""); + let saturate_up = { + let max_values = + self.builder + .build_int_s_extend(max_values, self.intrinsics.i32x8_ty, ""); + let saturate_up = + self.builder + .build_int_compare(IntPredicate::SGT, res, max_values, ""); + saturate_up + }; + + let res = self + .builder + .build_int_truncate(res, self.intrinsics.i16x8_ty, ""); + + let res = self + .builder + .build_select(saturate_up, max_values, res, "") + .into_vector_value(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ExtMulLowI8x16S + | Operator::I16x8ExtMulLowI8x16U + | Operator::I16x8ExtMulHighI8x16S + | Operator::I16x8ExtMulHighI8x16U => { + let extend_op = match op { + Operator::I16x8ExtMulLowI8x16S | Operator::I16x8ExtMulHighI8x16S => { + |s: &Self, v| s.builder.build_int_s_extend(v, s.intrinsics.i16x8_ty, "") + } + Operator::I16x8ExtMulLowI8x16U | Operator::I16x8ExtMulHighI8x16U => { + |s: &Self, v| s.builder.build_int_z_extend(v, s.intrinsics.i16x8_ty, "") + } + _ => unreachable!("Unhandled internal variant"), + }; + let shuffle_array = match op { + Operator::I16x8ExtMulLowI8x16S | Operator::I16x8ExtMulLowI8x16U => [ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[8], + self.intrinsics.i32_consts[10], + self.intrinsics.i32_consts[12], + self.intrinsics.i32_consts[14], + ], + Operator::I16x8ExtMulHighI8x16S | Operator::I16x8ExtMulHighI8x16U => [ + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[7], + self.intrinsics.i32_consts[9], + self.intrinsics.i32_consts[11], + self.intrinsics.i32_consts[13], + self.intrinsics.i32_consts[15], + ], + _ => unreachable!("Unhandled internal variant"), + }; + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let val1 = self.builder.build_shuffle_vector( + v1, + v1.get_type().get_undef(), + VectorType::const_vector(&shuffle_array), + "", + ); + let val1 = extend_op(&self, val1); + let val2 = self.builder.build_shuffle_vector( + v2, + v2.get_type().get_undef(), + VectorType::const_vector(&shuffle_array), + "", + ); + let val2 = extend_op(&self, val2); + let res = self.builder.build_int_mul(val1, val2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ExtMulLowI16x8S + | Operator::I32x4ExtMulLowI16x8U + | Operator::I32x4ExtMulHighI16x8S + | Operator::I32x4ExtMulHighI16x8U => { + let extend_op = match op { + Operator::I32x4ExtMulLowI16x8S | Operator::I32x4ExtMulHighI16x8S => { + |s: &Self, v| s.builder.build_int_s_extend(v, s.intrinsics.i32x4_ty, "") + } + Operator::I32x4ExtMulLowI16x8U | Operator::I32x4ExtMulHighI16x8U => { + |s: &Self, v| s.builder.build_int_z_extend(v, s.intrinsics.i32x4_ty, "") + } + _ => unreachable!("Unhandled internal variant"), + }; + let shuffle_array = match op { + Operator::I32x4ExtMulLowI16x8S | Operator::I32x4ExtMulLowI16x8U => [ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[6], + ], + Operator::I32x4ExtMulHighI16x8S | Operator::I32x4ExtMulHighI16x8U => [ + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[7], + ], + _ => unreachable!("Unhandled internal variant"), + }; + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let val1 = self.builder.build_shuffle_vector( + v1, + v1.get_type().get_undef(), + VectorType::const_vector(&shuffle_array), + "", + ); + let val1 = extend_op(&self, val1); + let val2 = self.builder.build_shuffle_vector( + v2, + v2.get_type().get_undef(), + VectorType::const_vector(&shuffle_array), + "", + ); + let val2 = extend_op(&self, val2); + let res = self.builder.build_int_mul(val1, val2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2ExtMulLowI32x4S + | Operator::I64x2ExtMulLowI32x4U + | Operator::I64x2ExtMulHighI32x4S + | Operator::I64x2ExtMulHighI32x4U => { + let extend_op = match op { + Operator::I64x2ExtMulLowI32x4S | Operator::I64x2ExtMulHighI32x4S => { + |s: &Self, v| s.builder.build_int_s_extend(v, s.intrinsics.i64x2_ty, "") + } + Operator::I64x2ExtMulLowI32x4U | Operator::I64x2ExtMulHighI32x4U => { + |s: &Self, v| s.builder.build_int_z_extend(v, s.intrinsics.i64x2_ty, "") + } + _ => unreachable!("Unhandled internal variant"), + }; + let shuffle_array = match op { + Operator::I64x2ExtMulLowI32x4S | Operator::I64x2ExtMulLowI32x4U => { + [self.intrinsics.i32_consts[0], self.intrinsics.i32_consts[2]] + } + Operator::I64x2ExtMulHighI32x4S | Operator::I64x2ExtMulHighI32x4U => { + [self.intrinsics.i32_consts[1], self.intrinsics.i32_consts[3]] + } + _ => unreachable!("Unhandled internal variant"), + }; + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let val1 = self.builder.build_shuffle_vector( + v1, + v1.get_type().get_undef(), + VectorType::const_vector(&shuffle_array), + "", + ); + let val1 = extend_op(&self, val1); + let val2 = self.builder.build_shuffle_vector( + v2, + v2.get_type().get_undef(), + VectorType::const_vector(&shuffle_array), + "", + ); + let val2 = extend_op(&self, val2); + let res = self.builder.build_int_mul(val1, val2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4DotI16x8S => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let low_i16 = [ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[6], + ]; + let high_i16 = [ + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[7], + ]; + let v1_low = self.builder.build_shuffle_vector( + v1, + v1.get_type().get_undef(), + VectorType::const_vector(&low_i16), + "", + ); + let v1_low = self + .builder + .build_int_s_extend(v1_low, self.intrinsics.i32x4_ty, ""); + let v1_high = self.builder.build_shuffle_vector( + v1, + v1.get_type().get_undef(), + VectorType::const_vector(&high_i16), + "", + ); + let v1_high = + self.builder + .build_int_s_extend(v1_high, self.intrinsics.i32x4_ty, ""); + let v2_low = self.builder.build_shuffle_vector( + v2, + v2.get_type().get_undef(), + VectorType::const_vector(&low_i16), + "", + ); + let v2_low = self + .builder + .build_int_s_extend(v2_low, self.intrinsics.i32x4_ty, ""); + let v2_high = self.builder.build_shuffle_vector( + v2, + v2.get_type().get_undef(), + VectorType::const_vector(&high_i16), + "", + ); + let v2_high = + self.builder + .build_int_s_extend(v2_high, self.intrinsics.i32x4_ty, ""); + let low_product = self.builder.build_int_mul(v1_low, v2_low, ""); + let high_product = self.builder.build_int_mul(v1_high, v2_high, ""); + + let res = self.builder.build_int_add(low_product, high_product, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32DivS | Operator::I64DivS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + + self.trap_if_zero_or_overflow(v1, v2); + + let res = self.builder.build_int_signed_div(v1, v2, ""); + self.state.push1(res); + } + Operator::I32DivU | Operator::I64DivU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + + self.trap_if_zero(v2); + + let res = self.builder.build_int_unsigned_div(v1, v2, ""); + self.state.push1(res); + } + Operator::I32RemS | Operator::I64RemS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let int_type = v1.get_type(); + let (min_value, neg_one_value) = if int_type == self.intrinsics.i32_ty { + let min_value = int_type.const_int(i32::min_value() as u64, false); + let neg_one_value = int_type.const_int(-1i32 as u32 as u64, false); + (min_value, neg_one_value) + } else if int_type == self.intrinsics.i64_ty { + let min_value = int_type.const_int(i64::min_value() as u64, false); + let neg_one_value = int_type.const_int(-1i64 as u64, false); + (min_value, neg_one_value) + } else { + unreachable!() + }; + + self.trap_if_zero(v2); + + // "Overflow also leads to undefined behavior; this is a rare + // case, but can occur, for example, by taking the remainder of + // a 32-bit division of -2147483648 by -1. (The remainder + // doesn’t actually overflow, but this rule lets srem be + // implemented using instructions that return both the result + // of the division and the remainder.)" + // -- https://llvm.org/docs/LangRef.html#srem-instruction + // + // In Wasm, the i32.rem_s i32.const -2147483648 i32.const -1 is + // i32.const 0. We implement this by swapping out the left value + // for 0 in this case. + let will_overflow = self.builder.build_and( + self.builder + .build_int_compare(IntPredicate::EQ, v1, min_value, "left_is_min"), + self.builder.build_int_compare( + IntPredicate::EQ, + v2, + neg_one_value, + "right_is_neg_one", + ), + "srem_will_overflow", + ); + let v1 = self + .builder + .build_select(will_overflow, int_type.const_zero(), v1, "") + .into_int_value(); + let res = self.builder.build_int_signed_rem(v1, v2, ""); + self.state.push1(res); + } + Operator::I32RemU | Operator::I64RemU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + + self.trap_if_zero(v2); + + let res = self.builder.build_int_unsigned_rem(v1, v2, ""); + self.state.push1(res); + } + Operator::I32And | Operator::I64And | Operator::V128And => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let res = self.builder.build_and(v1, v2, ""); + self.state.push1(res); + } + Operator::I32Or | Operator::I64Or | Operator::V128Or => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let res = self.builder.build_or(v1, v2, ""); + self.state.push1(res); + } + Operator::I32Xor | Operator::I64Xor | Operator::V128Xor => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let res = self.builder.build_xor(v1, v2, ""); + self.state.push1(res); + } + Operator::V128AndNot => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let v2 = self.builder.build_not(v2, ""); + let res = self.builder.build_and(v1, v2, ""); + self.state.push1(res); + } + Operator::V128Bitselect => { + let ((v1, i1), (v2, i2), (cond, cond_info)) = self.state.pop3_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let cond = self.apply_pending_canonicalization(cond, cond_info); + let v1 = self + .builder + .build_bitcast(v1, self.intrinsics.i1x128_ty, "") + .into_vector_value(); + let v2 = self + .builder + .build_bitcast(v2, self.intrinsics.i1x128_ty, "") + .into_vector_value(); + let cond = self + .builder + .build_bitcast(cond, self.intrinsics.i1x128_ty, "") + .into_vector_value(); + let res = self.builder.build_select(cond, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16Bitmask => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + + let zeros = self.intrinsics.i8x16_ty.const_zero(); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v, zeros, ""); + let res = self + .builder + .build_bitcast(res, self.intrinsics.i16_ty, "") + .into_int_value(); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I16x8Bitmask => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + + let zeros = self.intrinsics.i16x8_ty.const_zero(); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v, zeros, ""); + let res = self + .builder + .build_bitcast(res, self.intrinsics.i8_ty, "") + .into_int_value(); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I32x4Bitmask => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i32x4(v, i); + + let zeros = self.intrinsics.i32x4_ty.const_zero(); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v, zeros, ""); + let res = self + .builder + .build_bitcast(res, self.intrinsics.i4_ty, "") + .into_int_value(); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I64x2Bitmask => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i64x2(v, i); + + let zeros = self.intrinsics.i64x2_ty.const_zero(); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v, zeros, ""); + let res = self + .builder + .build_bitcast(res, self.intrinsics.i2_ty, "") + .into_int_value(); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I32Shl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i32_ty.const_int(31u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let res = self.builder.build_left_shift(v1, v2, ""); + self.state.push1(res); + } + Operator::I64Shl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i64_ty.const_int(63u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let res = self.builder.build_left_shift(v1, v2, ""); + self.state.push1(res); + } + Operator::I8x16Shl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = self + .builder + .build_and(v2, self.intrinsics.i32_consts[7], ""); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i8_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i8x16_ty); + let res = self.builder.build_left_shift(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Shl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = self + .builder + .build_and(v2, self.intrinsics.i32_consts[15], ""); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i16_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i16x8_ty); + let res = self.builder.build_left_shift(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Shl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = + self.builder + .build_and(v2, self.intrinsics.i32_ty.const_int(31, false), ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i32x4_ty); + let res = self.builder.build_left_shift(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Shl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = + self.builder + .build_and(v2, self.intrinsics.i32_ty.const_int(63, false), ""); + let v2 = self + .builder + .build_int_z_extend(v2, self.intrinsics.i64_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i64x2_ty); + let res = self.builder.build_left_shift(v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32ShrS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i32_ty.const_int(31u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let res = self.builder.build_right_shift(v1, v2, true, ""); + self.state.push1(res); + } + Operator::I64ShrS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i64_ty.const_int(63u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let res = self.builder.build_right_shift(v1, v2, true, ""); + self.state.push1(res); + } + Operator::I8x16ShrS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = self + .builder + .build_and(v2, self.intrinsics.i32_consts[7], ""); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i8_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i8x16_ty); + let res = self.builder.build_right_shift(v1, v2, true, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ShrS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = self + .builder + .build_and(v2, self.intrinsics.i32_consts[15], ""); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i16_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i16x8_ty); + let res = self.builder.build_right_shift(v1, v2, true, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ShrS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = + self.builder + .build_and(v2, self.intrinsics.i32_ty.const_int(31, false), ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i32x4_ty); + let res = self.builder.build_right_shift(v1, v2, true, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2ShrS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = + self.builder + .build_and(v2, self.intrinsics.i32_ty.const_int(63, false), ""); + let v2 = self + .builder + .build_int_z_extend(v2, self.intrinsics.i64_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i64x2_ty); + let res = self.builder.build_right_shift(v1, v2, true, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32ShrU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i32_ty.const_int(31u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let res = self.builder.build_right_shift(v1, v2, false, ""); + self.state.push1(res); + } + Operator::I64ShrU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i64_ty.const_int(63u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let res = self.builder.build_right_shift(v1, v2, false, ""); + self.state.push1(res); + } + Operator::I8x16ShrU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = self + .builder + .build_and(v2, self.intrinsics.i32_consts[7], ""); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i8_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i8x16_ty); + let res = self.builder.build_right_shift(v1, v2, false, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ShrU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = self + .builder + .build_and(v2, self.intrinsics.i32_consts[15], ""); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i16_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i16x8_ty); + let res = self.builder.build_right_shift(v1, v2, false, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ShrU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = + self.builder + .build_and(v2, self.intrinsics.i32_ty.const_int(31, false), ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i32x4_ty); + let res = self.builder.build_right_shift(v1, v2, false, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2ShrU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let v2 = + self.builder + .build_and(v2, self.intrinsics.i32_ty.const_int(63, false), ""); + let v2 = self + .builder + .build_int_z_extend(v2, self.intrinsics.i64_ty, ""); + let v2 = self.splat_vector(v2.as_basic_value_enum(), self.intrinsics.i64x2_ty); + let res = self.builder.build_right_shift(v1, v2, false, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32Rotl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i32_ty.const_int(31u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let lhs = self.builder.build_left_shift(v1, v2, ""); + let rhs = { + let negv2 = self.builder.build_int_neg(v2, ""); + let rhs = self.builder.build_and(negv2, mask, ""); + self.builder.build_right_shift(v1, rhs, false, "") + }; + let res = self.builder.build_or(lhs, rhs, ""); + self.state.push1(res); + } + Operator::I64Rotl => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i64_ty.const_int(63u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let lhs = self.builder.build_left_shift(v1, v2, ""); + let rhs = { + let negv2 = self.builder.build_int_neg(v2, ""); + let rhs = self.builder.build_and(negv2, mask, ""); + self.builder.build_right_shift(v1, rhs, false, "") + }; + let res = self.builder.build_or(lhs, rhs, ""); + self.state.push1(res); + } + Operator::I32Rotr => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i32_ty.const_int(31u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let lhs = self.builder.build_right_shift(v1, v2, false, ""); + let rhs = { + let negv2 = self.builder.build_int_neg(v2, ""); + let rhs = self.builder.build_and(negv2, mask, ""); + self.builder.build_left_shift(v1, rhs, "") + }; + let res = self.builder.build_or(lhs, rhs, ""); + self.state.push1(res); + } + Operator::I64Rotr => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let mask = self.intrinsics.i64_ty.const_int(63u64, false); + let v2 = self.builder.build_and(v2, mask, ""); + let lhs = self.builder.build_right_shift(v1, v2, false, ""); + let rhs = { + let negv2 = self.builder.build_int_neg(v2, ""); + let rhs = self.builder.build_and(negv2, mask, ""); + self.builder.build_left_shift(v1, rhs, "") + }; + let res = self.builder.build_or(lhs, rhs, ""); + self.state.push1(res); + } + Operator::I32Clz => { + let (input, info) = self.state.pop1_extra()?; + let input = self.apply_pending_canonicalization(input, info); + let is_zero_undef = self.intrinsics.i1_zero; + let res = self + .builder + .build_call( + self.intrinsics.ctlz_i32, + &[input.into(), is_zero_undef.into()], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::arithmetic_f32()); + } + Operator::I64Clz => { + let (input, info) = self.state.pop1_extra()?; + let input = self.apply_pending_canonicalization(input, info); + let is_zero_undef = self.intrinsics.i1_zero; + let res = self + .builder + .build_call( + self.intrinsics.ctlz_i64, + &[input.into(), is_zero_undef.into()], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::arithmetic_f64()); + } + Operator::I32Ctz => { + let (input, info) = self.state.pop1_extra()?; + let input = self.apply_pending_canonicalization(input, info); + let is_zero_undef = self.intrinsics.i1_zero; + let res = self + .builder + .build_call( + self.intrinsics.cttz_i32, + &[input.into(), is_zero_undef.into()], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::arithmetic_f32()); + } + Operator::I64Ctz => { + let (input, info) = self.state.pop1_extra()?; + let input = self.apply_pending_canonicalization(input, info); + let is_zero_undef = self.intrinsics.i1_zero; + let res = self + .builder + .build_call( + self.intrinsics.cttz_i64, + &[input.into(), is_zero_undef.into()], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::arithmetic_f64()); + } + Operator::I8x16Popcnt => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let res = self + .builder + .build_call(self.intrinsics.ctpop_i8x16, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32Popcnt => { + let (input, info) = self.state.pop1_extra()?; + let input = self.apply_pending_canonicalization(input, info); + let res = self + .builder + .build_call(self.intrinsics.ctpop_i32, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::arithmetic_f32()); + } + Operator::I64Popcnt => { + let (input, info) = self.state.pop1_extra()?; + let input = self.apply_pending_canonicalization(input, info); + let res = self + .builder + .build_call(self.intrinsics.ctpop_i64, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::arithmetic_f64()); + } + Operator::I32Eqz => { + let input = self.state.pop1()?.into_int_value(); + let cond = self.builder.build_int_compare( + IntPredicate::EQ, + input, + self.intrinsics.i32_zero, + "", + ); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra(res, ExtraInfo::arithmetic_f32()); + } + Operator::I64Eqz => { + let input = self.state.pop1()?.into_int_value(); + let cond = self.builder.build_int_compare( + IntPredicate::EQ, + input, + self.intrinsics.i64_zero, + "", + ); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra(res, ExtraInfo::arithmetic_f64()); + } + Operator::I8x16Abs => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + + let seven = self.intrinsics.i8_ty.const_int(7, false); + let seven = VectorType::const_vector(&[seven; 16]); + let all_sign_bits = self.builder.build_right_shift(v, seven, true, ""); + let xor = self.builder.build_xor(v, all_sign_bits, ""); + let res = self.builder.build_int_sub(xor, all_sign_bits, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Abs => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + + let fifteen = self.intrinsics.i16_ty.const_int(15, false); + let fifteen = VectorType::const_vector(&[fifteen; 8]); + let all_sign_bits = self.builder.build_right_shift(v, fifteen, true, ""); + let xor = self.builder.build_xor(v, all_sign_bits, ""); + let res = self.builder.build_int_sub(xor, all_sign_bits, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Abs => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i32x4(v, i); + + let thirtyone = self.intrinsics.i32_ty.const_int(31, false); + let thirtyone = VectorType::const_vector(&[thirtyone; 4]); + let all_sign_bits = self.builder.build_right_shift(v, thirtyone, true, ""); + let xor = self.builder.build_xor(v, all_sign_bits, ""); + let res = self.builder.build_int_sub(xor, all_sign_bits, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Abs => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i64x2(v, i); + + let sixtythree = self.intrinsics.i64_ty.const_int(63, false); + let sixtythree = VectorType::const_vector(&[sixtythree; 2]); + let all_sign_bits = self.builder.build_right_shift(v, sixtythree, true, ""); + let xor = self.builder.build_xor(v, all_sign_bits, ""); + let res = self.builder.build_int_sub(xor, all_sign_bits, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16MinS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16MinU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::ULT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16MaxS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16MaxU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::UGT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8MinS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8MinU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::ULT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8MaxS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8MaxU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::UGT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4MinS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4MinU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::ULT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4MaxS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4MaxU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let cmp = self + .builder + .build_int_compare(IntPredicate::UGT, v1, v2, ""); + let res = self.builder.build_select(cmp, v1, v2, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16RoundingAverageU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + + // This approach is faster on x86-64 when the PAVG[BW] + // instructions are available. On other platforms, an alternative + // implementation appears likely to outperform, described here: + // %a = or %v1, %v2 + // %b = and %a, 1 + // %v1 = lshr %v1, 1 + // %v2 = lshr %v2, 1 + // %sum = add %v1, %v2 + // %res = add %sum, %b + + let ext_ty = self.intrinsics.i16_ty.vec_type(16); + let one = self.intrinsics.i16_ty.const_int(1, false); + let one = VectorType::const_vector(&[one; 16]); + + let v1 = self.builder.build_int_z_extend(v1, ext_ty, ""); + let v2 = self.builder.build_int_z_extend(v2, ext_ty, ""); + let res = + self.builder + .build_int_add(self.builder.build_int_add(one, v1, ""), v2, ""); + let res = self.builder.build_right_shift(res, one, false, ""); + let res = self + .builder + .build_int_truncate(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8RoundingAverageU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + + // This approach is faster on x86-64 when the PAVG[BW] + // instructions are available. On other platforms, an alternative + // implementation appears likely to outperform, described here: + // %a = or %v1, %v2 + // %b = and %a, 1 + // %v1 = lshr %v1, 1 + // %v2 = lshr %v2, 1 + // %sum = add %v1, %v2 + // %res = add %sum, %b + + let ext_ty = self.intrinsics.i32_ty.vec_type(8); + let one = self.intrinsics.i32_consts[1]; + let one = VectorType::const_vector(&[one; 8]); + + let v1 = self.builder.build_int_z_extend(v1, ext_ty, ""); + let v2 = self.builder.build_int_z_extend(v2, ext_ty, ""); + let res = + self.builder + .build_int_add(self.builder.build_int_add(one, v1, ""), v2, ""); + let res = self.builder.build_right_shift(res, one, false, ""); + let res = self + .builder + .build_int_truncate(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + + /*************************** + * Floating-Point Arithmetic instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#floating-point-arithmetic-instructions + ***************************/ + Operator::F32Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let res = self + .builder + .build_call( + self.intrinsics.add_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f32_nan(), + ); + } + Operator::F64Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let res = self + .builder + .build_call( + self.intrinsics.add_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f64_nan(), + ); + } + Operator::F32x4Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f32x4(v1, i1); + let (v2, i2) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.add_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f32_nan(), + ); + } + Operator::F64x2Add => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f64x2(v1, i1); + let (v2, i2) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.add_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f64_nan(), + ); + } + Operator::F32Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let res = self + .builder + .build_call( + self.intrinsics.sub_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f32_nan(), + ); + } + Operator::F64Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let res = self + .builder + .build_call( + self.intrinsics.sub_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f64_nan(), + ); + } + Operator::F32x4Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f32x4(v1, i1); + let (v2, i2) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.sub_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f32_nan(), + ); + } + Operator::F64x2Sub => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f64x2(v1, i1); + let (v2, i2) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.sub_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f64_nan(), + ); + } + Operator::F32Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let res = self + .builder + .build_call( + self.intrinsics.mul_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f32_nan(), + ); + } + Operator::F64Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let res = self + .builder + .build_call( + self.intrinsics.mul_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f64_nan(), + ); + } + Operator::F32x4Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f32x4(v1, i1); + let (v2, i2) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.mul_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f32_nan(), + ); + } + Operator::F64x2Mul => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f64x2(v1, i1); + let (v2, i2) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.mul_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra( + res, + (i1.strip_pending() & i2.strip_pending()) | ExtraInfo::pending_f64_nan(), + ); + } + Operator::F32Div => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let res = self + .builder + .build_call( + self.intrinsics.div_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::pending_f32_nan()); + } + Operator::F64Div => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let res = self + .builder + .build_call( + self.intrinsics.div_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::pending_f64_nan()); + } + Operator::F32x4Div => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.div_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, ExtraInfo::pending_f32_nan()); + } + Operator::F64x2Div => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_call( + self.intrinsics.div_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, ExtraInfo::pending_f64_nan()); + } + Operator::F32Sqrt => { + let input = self.state.pop1()?; + let res = self + .builder + .build_call(self.intrinsics.sqrt_f32, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::pending_f32_nan()); + } + Operator::F64Sqrt => { + let input = self.state.pop1()?; + let res = self + .builder + .build_call(self.intrinsics.sqrt_f64, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::pending_f64_nan()); + } + Operator::F32x4Sqrt => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f32x4(v, i); + let res = self + .builder + .build_call(self.intrinsics.sqrt_f32x4, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let bits = self + .builder + .build_bitcast(res, self.intrinsics.i128_ty, "bits"); + self.state.push1_extra(bits, ExtraInfo::pending_f32_nan()); + } + Operator::F64x2Sqrt => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f64x2(v, i); + let res = self + .builder + .build_call(self.intrinsics.sqrt_f64x2, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let bits = self + .builder + .build_bitcast(res, self.intrinsics.i128_ty, "bits"); + self.state.push1(bits); + } + Operator::F32Min => { + // This implements the same logic as LLVM's @llvm.minimum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let (v1, v2) = self.state.pop2()?; + let v1 = self.canonicalize_nans(v1); + let v2 = self.canonicalize_nans(v2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v1.into(), + self.intrinsics.f32_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v2.into(), + self.intrinsics.f32_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1), + self.builder.build_select( + v2_is_nan, + self.quiet_nan(v2), + self.builder.build_select( + v1_lt_v2, + v1, + self.builder.build_select( + v1_gt_v2, + v2, + self.builder.build_bitcast( + self.builder.build_or( + self.builder + .build_bitcast(v1, self.intrinsics.i32_ty, "") + .into_int_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i32_ty, "") + .into_int_value(), + "", + ), + self.intrinsics.f32_ty, + "", + ), + "", + ), + "", + ), + "", + ), + "", + ); + + self.state.push1(res); + } + Operator::F64Min => { + // This implements the same logic as LLVM's @llvm.minimum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let (v1, v2) = self.state.pop2()?; + let v1 = self.canonicalize_nans(v1); + let v2 = self.canonicalize_nans(v2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v1.into(), + self.intrinsics.f64_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v2.into(), + self.intrinsics.f64_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1), + self.builder.build_select( + v2_is_nan, + self.quiet_nan(v2), + self.builder.build_select( + v1_lt_v2, + v1, + self.builder.build_select( + v1_gt_v2, + v2, + self.builder.build_bitcast( + self.builder.build_or( + self.builder + .build_bitcast(v1, self.intrinsics.i64_ty, "") + .into_int_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i64_ty, "") + .into_int_value(), + "", + ), + self.intrinsics.f64_ty, + "", + ), + "", + ), + "", + ), + "", + ), + "", + ); + + self.state.push1(res); + } + Operator::F32x4Min => { + // This implements the same logic as LLVM's @llvm.minimum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v1.into(), + self.intrinsics.f32x4_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v2.into(), + self.intrinsics.f32x4_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1.into()).into_vector_value(), + self.builder + .build_select( + v2_is_nan, + self.quiet_nan(v2.into()).into_vector_value(), + self.builder + .build_select( + v1_lt_v2, + v1.into(), + self.builder.build_select( + v1_gt_v2, + v2.into(), + self.builder.build_bitcast( + self.builder.build_or( + self.builder + .build_bitcast(v1, self.intrinsics.i32x4_ty, "") + .into_vector_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i32x4_ty, "") + .into_vector_value(), + "", + ), + self.intrinsics.f32x4_ty, + "", + ), + "", + ), + "", + ) + .into_vector_value(), + "", + ) + .into_vector_value(), + "", + ); + + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32x4PMin => { + // Pseudo-min: b < a ? b : a + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _i1) = self.v128_into_f32x4(v1, i1); + let (v2, _i2) = self.v128_into_f32x4(v2, i2); + let cmp = self + .builder + .build_float_compare(FloatPredicate::OLT, v2, v1, ""); + let res = self.builder.build_select(cmp, v2, v1, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Min => { + // This implements the same logic as LLVM's @llvm.minimum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v1.into(), + self.intrinsics.f64x2_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v2.into(), + self.intrinsics.f64x2_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1.into()).into_vector_value(), + self.builder + .build_select( + v2_is_nan, + self.quiet_nan(v2.into()).into_vector_value(), + self.builder + .build_select( + v1_lt_v2, + v1.into(), + self.builder.build_select( + v1_gt_v2, + v2.into(), + self.builder.build_bitcast( + self.builder.build_or( + self.builder + .build_bitcast(v1, self.intrinsics.i64x2_ty, "") + .into_vector_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i64x2_ty, "") + .into_vector_value(), + "", + ), + self.intrinsics.f64x2_ty, + "", + ), + "", + ), + "", + ) + .into_vector_value(), + "", + ) + .into_vector_value(), + "", + ); + + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2PMin => { + // Pseudo-min: b < a ? b : a + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _i1) = self.v128_into_f64x2(v1, i1); + let (v2, _i2) = self.v128_into_f64x2(v2, i2); + let cmp = self + .builder + .build_float_compare(FloatPredicate::OLT, v2, v1, ""); + let res = self.builder.build_select(cmp, v2, v1, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32Max => { + // This implements the same logic as LLVM's @llvm.maximum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let (v1, v2) = self.state.pop2()?; + let v1 = self.canonicalize_nans(v1); + let v2 = self.canonicalize_nans(v2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v1.into(), + self.intrinsics.f32_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v2.into(), + self.intrinsics.f32_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1), + self.builder.build_select( + v2_is_nan, + self.quiet_nan(v2), + self.builder.build_select( + v1_lt_v2, + v2, + self.builder.build_select( + v1_gt_v2, + v1, + self.builder.build_bitcast( + self.builder.build_and( + self.builder + .build_bitcast(v1, self.intrinsics.i32_ty, "") + .into_int_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i32_ty, "") + .into_int_value(), + "", + ), + self.intrinsics.f32_ty, + "", + ), + "", + ), + "", + ), + "", + ), + "", + ); + + self.state.push1(res); + } + Operator::F64Max => { + // This implements the same logic as LLVM's @llvm.maximum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let (v1, v2) = self.state.pop2()?; + let v1 = self.canonicalize_nans(v1); + let v2 = self.canonicalize_nans(v2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v1.into(), + self.intrinsics.f64_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v2.into(), + self.intrinsics.f64_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_int_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1), + self.builder.build_select( + v2_is_nan, + self.quiet_nan(v2), + self.builder.build_select( + v1_lt_v2, + v2, + self.builder.build_select( + v1_gt_v2, + v1, + self.builder.build_bitcast( + self.builder.build_and( + self.builder + .build_bitcast(v1, self.intrinsics.i64_ty, "") + .into_int_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i64_ty, "") + .into_int_value(), + "", + ), + self.intrinsics.f64_ty, + "", + ), + "", + ), + "", + ), + "", + ), + "", + ); + + self.state.push1(res); + } + Operator::F32x4Max => { + // This implements the same logic as LLVM's @llvm.maximum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v1.into(), + self.intrinsics.f32x4_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v2.into(), + self.intrinsics.f32x4_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f32x4, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1.into()).into_vector_value(), + self.builder + .build_select( + v2_is_nan, + self.quiet_nan(v2.into()).into_vector_value(), + self.builder + .build_select( + v1_lt_v2, + v2.into(), + self.builder.build_select( + v1_gt_v2, + v1.into(), + self.builder.build_bitcast( + self.builder.build_and( + self.builder + .build_bitcast(v1, self.intrinsics.i32x4_ty, "") + .into_vector_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i32x4_ty, "") + .into_vector_value(), + "", + ), + self.intrinsics.f32x4_ty, + "", + ), + "", + ), + "", + ) + .into_vector_value(), + "", + ) + .into_vector_value(), + "", + ); + + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32x4PMax => { + // Pseudo-max: a < b ? b : a + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _i1) = self.v128_into_f32x4(v1, i1); + let (v2, _i2) = self.v128_into_f32x4(v2, i2); + let cmp = self + .builder + .build_float_compare(FloatPredicate::OLT, v1, v2, ""); + let res = self.builder.build_select(cmp, v2, v1, ""); + + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Max => { + // This implements the same logic as LLVM's @llvm.maximum + // intrinsic would, but x86 lowering of that intrinsic + // encounters a fatal error in LLVM 11. + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + + let v1_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v1.into(), + self.intrinsics.f64x2_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v2_is_nan = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v2.into(), + self.intrinsics.f64x2_zero.into(), + self.intrinsics.fp_uno_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_lt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_olt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + let v1_gt_v2 = self + .builder + .build_call( + self.intrinsics.cmp_f64x2, + &[ + v1.into(), + v2.into(), + self.intrinsics.fp_ogt_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap() + .into_vector_value(); + + let res = self.builder.build_select( + v1_is_nan, + self.quiet_nan(v1.into()).into_vector_value(), + self.builder + .build_select( + v2_is_nan, + self.quiet_nan(v2.into()).into_vector_value(), + self.builder + .build_select( + v1_lt_v2, + v2.into(), + self.builder.build_select( + v1_gt_v2, + v1.into(), + self.builder.build_bitcast( + self.builder.build_and( + self.builder + .build_bitcast(v1, self.intrinsics.i64x2_ty, "") + .into_vector_value(), + self.builder + .build_bitcast(v2, self.intrinsics.i64x2_ty, "") + .into_vector_value(), + "", + ), + self.intrinsics.f64x2_ty, + "", + ), + "", + ), + "", + ) + .into_vector_value(), + "", + ) + .into_vector_value(), + "", + ); + + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2PMax => { + // Pseudo-max: a < b ? b : a + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _i1) = self.v128_into_f64x2(v1, i1); + let (v2, _i2) = self.v128_into_f64x2(v2, i2); + let cmp = self + .builder + .build_float_compare(FloatPredicate::OLT, v1, v2, ""); + let res = self.builder.build_select(cmp, v2, v1, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32Ceil => { + let (input, info) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.ceil_f32, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, info | ExtraInfo::pending_f32_nan()); + } + Operator::F32x4Ceil => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f32x4(v, i); + let res = self + .builder + .build_call(self.intrinsics.ceil_f32x4, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f32_nan()); + } + Operator::F64Ceil => { + let (input, info) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.ceil_f64, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, info | ExtraInfo::pending_f64_nan()); + } + Operator::F64x2Ceil => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f64x2(v, i); + let res = self + .builder + .build_call(self.intrinsics.ceil_f64x2, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f64_nan()); + } + Operator::F32Floor => { + let (input, info) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.floor_f32, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, info | ExtraInfo::pending_f32_nan()); + } + Operator::F32x4Floor => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f32x4(v, i); + let res = self + .builder + .build_call(self.intrinsics.floor_f32x4, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f32_nan()); + } + Operator::F64Floor => { + let (input, info) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.floor_f64, &[input.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, info | ExtraInfo::pending_f64_nan()); + } + Operator::F64x2Floor => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f64x2(v, i); + let res = self + .builder + .build_call(self.intrinsics.floor_f64x2, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f64_nan()); + } + Operator::F32Trunc => { + let (v, i) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.trunc_f32, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, i | ExtraInfo::pending_f32_nan()); + } + Operator::F32x4Trunc => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f32x4(v, i); + let res = self + .builder + .build_call(self.intrinsics.trunc_f32x4, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f32_nan()); + } + Operator::F64Trunc => { + let (v, i) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.trunc_f64, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, i | ExtraInfo::pending_f64_nan()); + } + Operator::F64x2Trunc => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f64x2(v, i); + let res = self + .builder + .build_call(self.intrinsics.trunc_f64x2, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f64_nan()); + } + Operator::F32Nearest => { + let (v, i) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.nearbyint_f32, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, i | ExtraInfo::pending_f32_nan()); + } + Operator::F32x4Nearest => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f32x4(v, i); + let res = self + .builder + .build_call(self.intrinsics.nearbyint_f32x4, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f32_nan()); + } + Operator::F64Nearest => { + let (v, i) = self.state.pop1_extra()?; + let res = self + .builder + .build_call(self.intrinsics.nearbyint_f64, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + self.state + .push1_extra(res, i | ExtraInfo::pending_f64_nan()); + } + Operator::F64x2Nearest => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f64x2(v, i); + let res = self + .builder + .build_call(self.intrinsics.nearbyint_f64x2, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i | ExtraInfo::pending_f64_nan()); + } + Operator::F32Abs => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let res = self + .builder + .build_call(self.intrinsics.fabs_f32, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + // The exact NaN returned by F32Abs is fully defined. Do not + // adjust. + self.state.push1_extra(res, i.strip_pending()); + } + Operator::F64Abs => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let res = self + .builder + .build_call(self.intrinsics.fabs_f64, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + // The exact NaN returned by F64Abs is fully defined. Do not + // adjust. + self.state.push1_extra(res, i.strip_pending()); + } + Operator::F32x4Abs => { + let (v, i) = self.state.pop1_extra()?; + let v = + self.builder + .build_bitcast(v.into_int_value(), self.intrinsics.f32x4_ty, ""); + let v = self.apply_pending_canonicalization(v, i); + let res = self + .builder + .build_call(self.intrinsics.fabs_f32x4, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // The exact NaN returned by F32x4Abs is fully defined. Do not + // adjust. + self.state.push1_extra(res, i.strip_pending()); + } + Operator::F64x2Abs => { + let (v, i) = self.state.pop1_extra()?; + let v = + self.builder + .build_bitcast(v.into_int_value(), self.intrinsics.f64x2_ty, ""); + let v = self.apply_pending_canonicalization(v, i); + let res = self + .builder + .build_call(self.intrinsics.fabs_f64x2, &[v.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // The exact NaN returned by F32x4Abs is fully defined. Do not + // adjust. + self.state.push1_extra(res, i.strip_pending()); + } + Operator::F32x4Neg => { + let (v, i) = self.state.pop1_extra()?; + let v = + self.builder + .build_bitcast(v.into_int_value(), self.intrinsics.f32x4_ty, ""); + let v = self + .apply_pending_canonicalization(v, i) + .into_vector_value(); + let res = self.builder.build_float_neg(v, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // The exact NaN returned by F32x4Neg is fully defined. Do not + // adjust. + self.state.push1_extra(res, i.strip_pending()); + } + Operator::F64x2Neg => { + let (v, i) = self.state.pop1_extra()?; + let v = + self.builder + .build_bitcast(v.into_int_value(), self.intrinsics.f64x2_ty, ""); + let v = self + .apply_pending_canonicalization(v, i) + .into_vector_value(); + let res = self.builder.build_float_neg(v, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // The exact NaN returned by F64x2Neg is fully defined. Do not + // adjust. + self.state.push1_extra(res, i.strip_pending()); + } + Operator::F32Neg | Operator::F64Neg => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i).into_float_value(); + let res = self.builder.build_float_neg(v, ""); + // The exact NaN returned by F32Neg and F64Neg are fully defined. + // Do not adjust. + self.state.push1_extra(res, i.strip_pending()); + } + Operator::F32Copysign => { + let ((mag, mag_info), (sgn, sgn_info)) = self.state.pop2_extra()?; + let mag = self.apply_pending_canonicalization(mag, mag_info); + let sgn = self.apply_pending_canonicalization(sgn, sgn_info); + let res = self + .builder + .build_call(self.intrinsics.copysign_f32, &[mag.into(), sgn.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + // The exact NaN returned by F32Copysign is fully defined. + // Do not adjust. + self.state.push1_extra(res, mag_info.strip_pending()); + } + Operator::F64Copysign => { + let ((mag, mag_info), (sgn, sgn_info)) = self.state.pop2_extra()?; + let mag = self.apply_pending_canonicalization(mag, mag_info); + let sgn = self.apply_pending_canonicalization(sgn, sgn_info); + let res = self + .builder + .build_call(self.intrinsics.copysign_f64, &[mag.into(), sgn.into()], "") + .try_as_basic_value() + .left() + .unwrap(); + // The exact NaN returned by F32Copysign is fully defined. + // Do not adjust. + self.state.push1_extra(res, mag_info.strip_pending()); + } + + /*************************** + * Integer Comparison instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#integer-comparison-instructions + ***************************/ + Operator::I32Eq | Operator::I64Eq => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self.builder.build_int_compare(IntPredicate::EQ, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16Eq => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::EQ, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Eq => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::EQ, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Eq => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::EQ, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Eq => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::EQ, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32Ne | Operator::I64Ne => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self.builder.build_int_compare(IntPredicate::NE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16Ne => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::NE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Ne => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::NE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Ne => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::NE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Ne => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self.builder.build_int_compare(IntPredicate::NE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32LtS | Operator::I64LtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16LtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8LtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4LtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2LtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32LtU | Operator::I64LtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::ULT, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I8x16LtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::ULT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8LtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::ULT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4LtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::ULT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32LeS | Operator::I64LeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::SLE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16LeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8LeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4LeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2LeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SLE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32LeU | Operator::I64LeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::ULE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16LeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::ULE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8LeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::ULE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4LeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::ULE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32GtS | Operator::I64GtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16GtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8GtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4GtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2GtS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32GtU | Operator::I64GtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::UGT, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16GtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::UGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8GtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::UGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4GtU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::UGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32GeS | Operator::I64GeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::SGE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I8x16GeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8GeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4GeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2GeS => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i64x2(v1, i1); + let (v2, _) = self.v128_into_i64x2(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::SGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32GeU | Operator::I64GeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let (v1, v2) = (v1.into_int_value(), v2.into_int_value()); + let cond = self + .builder + .build_int_compare(IntPredicate::UGE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16GeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let (v2, _) = self.v128_into_i8x16(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::UGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i8x16_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8GeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::UGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4GeU => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let res = self + .builder + .build_int_compare(IntPredicate::UGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + + /*************************** + * Floating-Point Comparison instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#floating-point-comparison-instructions + ***************************/ + Operator::F32Eq | Operator::F64Eq => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let cond = self + .builder + .build_float_compare(FloatPredicate::OEQ, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::F32x4Eq => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OEQ, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Eq => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OEQ, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32Ne | Operator::F64Ne => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let cond = self + .builder + .build_float_compare(FloatPredicate::UNE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::F32x4Ne => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::UNE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Ne => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::UNE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32Lt | Operator::F64Lt => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let cond = self + .builder + .build_float_compare(FloatPredicate::OLT, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::F32x4Lt => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OLT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Lt => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OLT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32Le | Operator::F64Le => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let cond = self + .builder + .build_float_compare(FloatPredicate::OLE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::F32x4Le => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OLE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Le => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OLE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32Gt | Operator::F64Gt => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let cond = self + .builder + .build_float_compare(FloatPredicate::OGT, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::F32x4Gt => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Gt => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OGT, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32Ge | Operator::F64Ge => { + let (v1, v2) = self.state.pop2()?; + let (v1, v2) = (v1.into_float_value(), v2.into_float_value()); + let cond = self + .builder + .build_float_compare(FloatPredicate::OGE, v1, v2, ""); + let res = self + .builder + .build_int_z_extend(cond, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::F32x4Ge => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f32x4(v1, i1); + let (v2, _) = self.v128_into_f32x4(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2Ge => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_f64x2(v1, i1); + let (v2, _) = self.v128_into_f64x2(v2, i2); + let res = self + .builder + .build_float_compare(FloatPredicate::OGE, v1, v2, ""); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + + /*************************** + * Conversion instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#conversion-instructions + ***************************/ + Operator::I32WrapI64 => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self + .builder + .build_int_truncate(v, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I64ExtendI32S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self + .builder + .build_int_s_extend(v, self.intrinsics.i64_ty, ""); + self.state.push1(res); + } + Operator::I64ExtendI32U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self + .builder + .build_int_z_extend(v, self.intrinsics.i64_ty, ""); + self.state.push1_extra(res, ExtraInfo::arithmetic_f64()); + } + Operator::I16x8ExtendLowI8x16S => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + ]), + "", + ); + let res = self + .builder + .build_int_s_extend(low, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ExtendHighI8x16S => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[8], + self.intrinsics.i32_consts[9], + self.intrinsics.i32_consts[10], + self.intrinsics.i32_consts[11], + self.intrinsics.i32_consts[12], + self.intrinsics.i32_consts[13], + self.intrinsics.i32_consts[14], + self.intrinsics.i32_consts[15], + ]), + "", + ); + let res = self + .builder + .build_int_s_extend(low, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ExtendLowI8x16U => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + ]), + "", + ); + let res = self + .builder + .build_int_z_extend(low, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ExtendHighI8x16U => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[8], + self.intrinsics.i32_consts[9], + self.intrinsics.i32_consts[10], + self.intrinsics.i32_consts[11], + self.intrinsics.i32_consts[12], + self.intrinsics.i32_consts[13], + self.intrinsics.i32_consts[14], + self.intrinsics.i32_consts[15], + ]), + "", + ); + let res = self + .builder + .build_int_z_extend(low, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ExtendLowI16x8S => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + ]), + "", + ); + let res = self + .builder + .build_int_s_extend(low, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ExtendHighI16x8S => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + ]), + "", + ); + let res = self + .builder + .build_int_s_extend(low, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ExtendLowI16x8U => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + ]), + "", + ); + let res = self + .builder + .build_int_z_extend(low, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ExtendHighI16x8U => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + ]), + "", + ); + let res = self + .builder + .build_int_z_extend(low, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2ExtendLowI32x4U + | Operator::I64x2ExtendLowI32x4S + | Operator::I64x2ExtendHighI32x4U + | Operator::I64x2ExtendHighI32x4S => { + let extend = match op { + Operator::I64x2ExtendLowI32x4U | Operator::I64x2ExtendHighI32x4U => { + |s: &Self, v| s.builder.build_int_z_extend(v, s.intrinsics.i64x2_ty, "") + } + Operator::I64x2ExtendLowI32x4S | Operator::I64x2ExtendHighI32x4S => { + |s: &Self, v| s.builder.build_int_s_extend(v, s.intrinsics.i64x2_ty, "") + } + _ => unreachable!("Unhandled inner case"), + }; + let indices = match op { + Operator::I64x2ExtendLowI32x4S | Operator::I64x2ExtendLowI32x4U => { + [self.intrinsics.i32_consts[0], self.intrinsics.i32_consts[1]] + } + Operator::I64x2ExtendHighI32x4S | Operator::I64x2ExtendHighI32x4U => { + [self.intrinsics.i32_consts[2], self.intrinsics.i32_consts[3]] + } + _ => unreachable!("Unhandled inner case"), + }; + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i32x4(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&indices), + "", + ); + let res = extend(&self, low); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16NarrowI16x8S => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let min = self.intrinsics.i16_ty.const_int(0xff80, false); + let max = self.intrinsics.i16_ty.const_int(0x007f, false); + let min = VectorType::const_vector(&[min; 8]); + let max = VectorType::const_vector(&[max; 8]); + let apply_min_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SLT, v1, min, ""); + let apply_max_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SGT, v1, max, ""); + let apply_min_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SLT, v2, min, ""); + let apply_max_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SGT, v2, max, ""); + let v1 = self + .builder + .build_select(apply_min_clamp_v1, min, v1, "") + .into_vector_value(); + let v1 = self + .builder + .build_select(apply_max_clamp_v1, max, v1, "") + .into_vector_value(); + let v1 = self + .builder + .build_int_truncate(v1, self.intrinsics.i8_ty.vec_type(8), ""); + let v2 = self + .builder + .build_select(apply_min_clamp_v2, min, v2, "") + .into_vector_value(); + let v2 = self + .builder + .build_select(apply_max_clamp_v2, max, v2, "") + .into_vector_value(); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i8_ty.vec_type(8), ""); + let res = self.builder.build_shuffle_vector( + v1, + v2, + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + self.intrinsics.i32_consts[8], + self.intrinsics.i32_consts[9], + self.intrinsics.i32_consts[10], + self.intrinsics.i32_consts[11], + self.intrinsics.i32_consts[12], + self.intrinsics.i32_consts[13], + self.intrinsics.i32_consts[14], + self.intrinsics.i32_consts[15], + ]), + "", + ); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16NarrowI16x8U => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let (v2, _) = self.v128_into_i16x8(v2, i2); + let min = self.intrinsics.i16x8_ty.const_zero(); + let max = self.intrinsics.i16_ty.const_int(0x00ff, false); + let max = VectorType::const_vector(&[max; 8]); + let apply_min_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SLT, v1, min, ""); + let apply_max_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SGT, v1, max, ""); + let apply_min_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SLT, v2, min, ""); + let apply_max_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SGT, v2, max, ""); + let v1 = self + .builder + .build_select(apply_min_clamp_v1, min, v1, "") + .into_vector_value(); + let v1 = self + .builder + .build_select(apply_max_clamp_v1, max, v1, "") + .into_vector_value(); + let v1 = self + .builder + .build_int_truncate(v1, self.intrinsics.i8_ty.vec_type(8), ""); + let v2 = self + .builder + .build_select(apply_min_clamp_v2, min, v2, "") + .into_vector_value(); + let v2 = self + .builder + .build_select(apply_max_clamp_v2, max, v2, "") + .into_vector_value(); + let v2 = self + .builder + .build_int_truncate(v2, self.intrinsics.i8_ty.vec_type(8), ""); + let res = self.builder.build_shuffle_vector( + v1, + v2, + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + self.intrinsics.i32_consts[8], + self.intrinsics.i32_consts[9], + self.intrinsics.i32_consts[10], + self.intrinsics.i32_consts[11], + self.intrinsics.i32_consts[12], + self.intrinsics.i32_consts[13], + self.intrinsics.i32_consts[14], + self.intrinsics.i32_consts[15], + ]), + "", + ); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8NarrowI32x4S => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let min = self.intrinsics.i32_ty.const_int(0xffff8000, false); + let max = self.intrinsics.i32_ty.const_int(0x00007fff, false); + let min = VectorType::const_vector(&[min; 4]); + let max = VectorType::const_vector(&[max; 4]); + let apply_min_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SLT, v1, min, ""); + let apply_max_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SGT, v1, max, ""); + let apply_min_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SLT, v2, min, ""); + let apply_max_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SGT, v2, max, ""); + let v1 = self + .builder + .build_select(apply_min_clamp_v1, min, v1, "") + .into_vector_value(); + let v1 = self + .builder + .build_select(apply_max_clamp_v1, max, v1, "") + .into_vector_value(); + let v1 = + self.builder + .build_int_truncate(v1, self.intrinsics.i16_ty.vec_type(4), ""); + let v2 = self + .builder + .build_select(apply_min_clamp_v2, min, v2, "") + .into_vector_value(); + let v2 = self + .builder + .build_select(apply_max_clamp_v2, max, v2, "") + .into_vector_value(); + let v2 = + self.builder + .build_int_truncate(v2, self.intrinsics.i16_ty.vec_type(4), ""); + let res = self.builder.build_shuffle_vector( + v1, + v2, + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + ]), + "", + ); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8NarrowI32x4U => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i32x4(v1, i1); + let (v2, _) = self.v128_into_i32x4(v2, i2); + let min = self.intrinsics.i32x4_ty.const_zero(); + let max = self.intrinsics.i32_ty.const_int(0xffff, false); + let max = VectorType::const_vector(&[max; 4]); + let apply_min_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SLT, v1, min, ""); + let apply_max_clamp_v1 = + self.builder + .build_int_compare(IntPredicate::SGT, v1, max, ""); + let apply_min_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SLT, v2, min, ""); + let apply_max_clamp_v2 = + self.builder + .build_int_compare(IntPredicate::SGT, v2, max, ""); + let v1 = self + .builder + .build_select(apply_min_clamp_v1, min, v1, "") + .into_vector_value(); + let v1 = self + .builder + .build_select(apply_max_clamp_v1, max, v1, "") + .into_vector_value(); + let v1 = + self.builder + .build_int_truncate(v1, self.intrinsics.i16_ty.vec_type(4), ""); + let v2 = self + .builder + .build_select(apply_min_clamp_v2, min, v2, "") + .into_vector_value(); + let v2 = self + .builder + .build_select(apply_max_clamp_v2, max, v2, "") + .into_vector_value(); + let v2 = + self.builder + .build_int_truncate(v2, self.intrinsics.i16_ty.vec_type(4), ""); + let res = self.builder.build_shuffle_vector( + v1, + v2, + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + self.intrinsics.i32_consts[4], + self.intrinsics.i32_consts[5], + self.intrinsics.i32_consts[6], + self.intrinsics.i32_consts[7], + ]), + "", + ); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4TruncSatF32x4S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self.trunc_sat_into_int( + self.intrinsics.f32x4_ty, + self.intrinsics.i32x4_ty, + LEF32_GEQ_I32_MIN, + GEF32_LEQ_I32_MAX, + std::i32::MIN as u64, + std::i32::MAX as u64, + v, + ); + self.state.push1(res); + } + Operator::I32x4TruncSatF32x4U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self.trunc_sat_into_int( + self.intrinsics.f32x4_ty, + self.intrinsics.i32x4_ty, + LEF32_GEQ_U32_MIN, + GEF32_LEQ_U32_MAX, + std::u32::MIN as u64, + std::u32::MAX as u64, + v, + ); + self.state.push1(res); + } + Operator::I32x4TruncSatF64x2SZero | Operator::I32x4TruncSatF64x2UZero => { + let ((min, max), (cmp_min, cmp_max)) = match op { + Operator::I32x4TruncSatF64x2SZero => ( + (std::i32::MIN as u64, std::i32::MAX as u64), + (LEF64_GEQ_I32_MIN, GEF64_LEQ_I32_MAX), + ), + Operator::I32x4TruncSatF64x2UZero => ( + (std::u32::MIN as u64, std::u32::MAX as u64), + (LEF64_GEQ_U32_MIN, GEF64_LEQ_U32_MAX), + ), + _ => unreachable!("Unhandled internal variant"), + }; + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self.trunc_sat( + self.intrinsics.f64x2_ty, + self.intrinsics.i32_ty.vec_type(2), + cmp_min, + cmp_max, + min, + max, + v, + ); + + let zero = self.intrinsics.i32_consts[0]; + let zeros = VectorType::const_vector(&[zero; 2]); + let res = self.builder.build_shuffle_vector( + res, + zeros, + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + ]), + "", + ); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + // Operator::I64x2TruncSatF64x2S => { + // let (v, i) = self.state.pop1_extra()?; + // let v = self.apply_pending_canonicalization(v, i); + // let v = v.into_int_value(); + // let res = self.trunc_sat_into_int( + // self.intrinsics.f64x2_ty, + // self.intrinsics.i64x2_ty, + // std::i64::MIN as u64, + // std::i64::MAX as u64, + // std::i64::MIN as u64, + // std::i64::MAX as u64, + // v, + // ); + // self.state.push1(res); + // } + // Operator::I64x2TruncSatF64x2U => { + // let (v, i) = self.state.pop1_extra()?; + // let v = self.apply_pending_canonicalization(v, i); + // let v = v.into_int_value(); + // let res = self.trunc_sat_into_int( + // self.intrinsics.f64x2_ty, + // self.intrinsics.i64x2_ty, + // std::u64::MIN, + // std::u64::MAX, + // std::u64::MIN, + // std::u64::MAX, + // v, + // ); + // self.state.push1(res); + // } + Operator::I32TruncF32S => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xcf000000, // -2147483600.0 + 0x4effffff, // 2147483500.0 + v1, + ); + let res = self + .builder + .build_float_to_signed_int(v1, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I32TruncF64S => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xc1e00000001fffff, // -2147483648.9999995 + 0x41dfffffffffffff, // 2147483647.9999998 + v1, + ); + let res = self + .builder + .build_float_to_signed_int(v1, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I32TruncSatF32S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i32_ty, + LEF32_GEQ_I32_MIN, + GEF32_LEQ_I32_MAX, + std::i32::MIN as u32 as u64, + std::i32::MAX as u32 as u64, + v, + ); + self.state.push1(res); + } + Operator::I32TruncSatF64S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i32_ty, + LEF64_GEQ_I32_MIN, + GEF64_LEQ_I32_MAX, + std::i32::MIN as u64, + std::i32::MAX as u64, + v, + ); + self.state.push1(res); + } + Operator::I64TruncF32S => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xdf000000, // -9223372000000000000.0 + 0x5effffff, // 9223371500000000000.0 + v1, + ); + let res = self + .builder + .build_float_to_signed_int(v1, self.intrinsics.i64_ty, ""); + self.state.push1(res); + } + Operator::I64TruncF64S => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xc3e0000000000000, // -9223372036854776000.0 + 0x43dfffffffffffff, // 9223372036854775000.0 + v1, + ); + let res = self + .builder + .build_float_to_signed_int(v1, self.intrinsics.i64_ty, ""); + self.state.push1(res); + } + Operator::I64TruncSatF32S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i64_ty, + LEF32_GEQ_I64_MIN, + GEF32_LEQ_I64_MAX, + std::i64::MIN as u64, + std::i64::MAX as u64, + v, + ); + self.state.push1(res); + } + Operator::I64TruncSatF64S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i64_ty, + LEF64_GEQ_I64_MIN, + GEF64_LEQ_I64_MAX, + std::i64::MIN as u64, + std::i64::MAX as u64, + v, + ); + self.state.push1(res); + } + Operator::I32TruncF32U => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xbf7fffff, // -0.99999994 + 0x4f7fffff, // 4294967000.0 + v1, + ); + let res = self + .builder + .build_float_to_unsigned_int(v1, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I32TruncF64U => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xbfefffffffffffff, // -0.9999999999999999 + 0x41efffffffffffff, // 4294967295.9999995 + v1, + ); + let res = self + .builder + .build_float_to_unsigned_int(v1, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I32TruncSatF32U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i32_ty, + LEF32_GEQ_U32_MIN, + GEF32_LEQ_U32_MAX, + std::u32::MIN as u64, + std::u32::MAX as u64, + v, + ); + self.state.push1(res); + } + Operator::I32TruncSatF64U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i32_ty, + LEF64_GEQ_U32_MIN, + GEF64_LEQ_U32_MAX, + std::u32::MIN as u64, + std::u32::MAX as u64, + v, + ); + self.state.push1(res); + } + Operator::I64TruncF32U => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xbf7fffff, // -0.99999994 + 0x5f7fffff, // 18446743000000000000.0 + v1, + ); + let res = self + .builder + .build_float_to_unsigned_int(v1, self.intrinsics.i64_ty, ""); + self.state.push1(res); + } + Operator::I64TruncF64U => { + let v1 = self.state.pop1()?.into_float_value(); + self.trap_if_not_representable_as_int( + 0xbfefffffffffffff, // -0.9999999999999999 + 0x43efffffffffffff, // 18446744073709550000.0 + v1, + ); + let res = self + .builder + .build_float_to_unsigned_int(v1, self.intrinsics.i64_ty, ""); + self.state.push1(res); + } + Operator::I64TruncSatF32U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i64_ty, + LEF32_GEQ_U64_MIN, + GEF32_LEQ_U64_MAX, + std::u64::MIN, + std::u64::MAX, + v, + ); + self.state.push1(res); + } + Operator::I64TruncSatF64U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_float_value(); + let res = self.trunc_sat_scalar( + self.intrinsics.i64_ty, + LEF64_GEQ_U64_MIN, + GEF64_LEQ_U64_MAX, + std::u64::MIN, + std::u64::MAX, + v, + ); + self.state.push1(res); + } + Operator::F32DemoteF64 => { + let v = self.state.pop1()?; + let v = v.into_float_value(); + let res = self + .builder + .build_call( + self.intrinsics.fptrunc_f64, + &[ + v.into(), + self.intrinsics.fp_rounding_md, + self.intrinsics.fp_exception_md, + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::pending_f32_nan()); + } + Operator::F64PromoteF32 => { + let v = self.state.pop1()?; + let v = v.into_float_value(); + let res = self + .builder + .build_call( + self.intrinsics.fpext_f32, + &[v.into(), self.intrinsics.fp_exception_md], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1_extra(res, ExtraInfo::pending_f64_nan()); + } + Operator::F32ConvertI32S | Operator::F32ConvertI64S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self + .builder + .build_signed_int_to_float(v, self.intrinsics.f32_ty, ""); + self.state.push1(res); + } + Operator::F64ConvertI32S | Operator::F64ConvertI64S => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self + .builder + .build_signed_int_to_float(v, self.intrinsics.f64_ty, ""); + self.state.push1(res); + } + Operator::F32ConvertI32U | Operator::F32ConvertI64U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self + .builder + .build_unsigned_int_to_float(v, self.intrinsics.f32_ty, ""); + self.state.push1(res); + } + Operator::F64ConvertI32U | Operator::F64ConvertI64U => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let v = v.into_int_value(); + let res = self + .builder + .build_unsigned_int_to_float(v, self.intrinsics.f64_ty, ""); + self.state.push1(res); + } + Operator::F32x4ConvertI32x4S => { + let v = self.state.pop1()?; + let v = self + .builder + .build_bitcast(v, self.intrinsics.i32x4_ty, "") + .into_vector_value(); + let res = self + .builder + .build_signed_int_to_float(v, self.intrinsics.f32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F32x4ConvertI32x4U => { + let v = self.state.pop1()?; + let v = self + .builder + .build_bitcast(v, self.intrinsics.i32x4_ty, "") + .into_vector_value(); + let res = self + .builder + .build_unsigned_int_to_float(v, self.intrinsics.f32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2ConvertLowI32x4S | Operator::F64x2ConvertLowI32x4U => { + let extend = match op { + Operator::F64x2ConvertLowI32x4U => { + |s: &Self, v| s.builder.build_int_z_extend(v, s.intrinsics.i64x2_ty, "") + } + Operator::F64x2ConvertLowI32x4S => { + |s: &Self, v| s.builder.build_int_s_extend(v, s.intrinsics.i64x2_ty, "") + } + _ => unreachable!("Unhandled inner case"), + }; + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i32x4(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + ]), + "", + ); + let res = extend(&self, low); + let res = self + .builder + .build_signed_int_to_float(res, self.intrinsics.f64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::F64x2PromoteLowF32x4 => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f32x4(v, i); + let low = self.builder.build_shuffle_vector( + v, + v.get_type().get_undef(), + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + ]), + "", + ); + let res = self + .builder + .build_float_ext(low, self.intrinsics.f64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, ExtraInfo::pending_f64_nan()); + } + Operator::F32x4DemoteF64x2Zero => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_f64x2(v, i); + let f32x2_ty = self.intrinsics.f32_ty.vec_type(2); + let res = self.builder.build_float_trunc(v, f32x2_ty, ""); + let zeros = f32x2_ty.const_zero(); + let res = self.builder.build_shuffle_vector( + res, + zeros, + VectorType::const_vector(&[ + self.intrinsics.i32_consts[0], + self.intrinsics.i32_consts[1], + self.intrinsics.i32_consts[2], + self.intrinsics.i32_consts[3], + ]), + "", + ); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, ExtraInfo::pending_f32_nan()); + } + // Operator::F64x2ConvertI64x2S => { + // let v = self.state.pop1()?; + // let v = self + // .builder + // .build_bitcast(v, self.intrinsics.i64x2_ty, "") + // .into_vector_value(); + // let res = self + // .builder + // .build_signed_int_to_float(v, self.intrinsics.f64x2_ty, ""); + // let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // self.state.push1(res); + // } + // Operator::F64x2ConvertI64x2U => { + // let v = self.state.pop1()?; + // let v = self + // .builder + // .build_bitcast(v, self.intrinsics.i64x2_ty, "") + // .into_vector_value(); + // let res = self + // .builder + // .build_unsigned_int_to_float(v, self.intrinsics.f64x2_ty, ""); + // let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + // self.state.push1(res); + // } + Operator::I32ReinterpretF32 => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let ret = self.builder.build_bitcast(v, self.intrinsics.i32_ty, ""); + self.state.push1_extra(ret, ExtraInfo::arithmetic_f32()); + } + Operator::I64ReinterpretF64 => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let ret = self.builder.build_bitcast(v, self.intrinsics.i64_ty, ""); + self.state.push1_extra(ret, ExtraInfo::arithmetic_f64()); + } + Operator::F32ReinterpretI32 => { + let (v, i) = self.state.pop1_extra()?; + let ret = self.builder.build_bitcast(v, self.intrinsics.f32_ty, ""); + self.state.push1_extra(ret, i); + } + Operator::F64ReinterpretI64 => { + let (v, i) = self.state.pop1_extra()?; + let ret = self.builder.build_bitcast(v, self.intrinsics.f64_ty, ""); + self.state.push1_extra(ret, i); + } + + /*************************** + * Sign-extension operators. + * https://github.com/WebAssembly/sign-extension-ops/blob/master/proposals/sign-extension-ops/Overview.md + ***************************/ + Operator::I32Extend8S => { + let value = self.state.pop1()?.into_int_value(); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let extended_value = + self.builder + .build_int_s_extend(narrow_value, self.intrinsics.i32_ty, ""); + self.state.push1(extended_value); + } + Operator::I32Extend16S => { + let value = self.state.pop1()?.into_int_value(); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let extended_value = + self.builder + .build_int_s_extend(narrow_value, self.intrinsics.i32_ty, ""); + self.state.push1(extended_value); + } + Operator::I64Extend8S => { + let value = self.state.pop1()?.into_int_value(); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let extended_value = + self.builder + .build_int_s_extend(narrow_value, self.intrinsics.i64_ty, ""); + self.state.push1(extended_value); + } + Operator::I64Extend16S => { + let value = self.state.pop1()?.into_int_value(); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let extended_value = + self.builder + .build_int_s_extend(narrow_value, self.intrinsics.i64_ty, ""); + self.state.push1(extended_value); + } + Operator::I64Extend32S => { + let value = self.state.pop1()?.into_int_value(); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let extended_value = + self.builder + .build_int_s_extend(narrow_value, self.intrinsics.i64_ty, ""); + self.state.push1(extended_value); + } + + /*************************** + * Load and Store instructions. + * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#load-and-store-instructions + ***************************/ + Operator::I32Load { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + result.as_instruction_value().unwrap(), + )?; + self.state.push1(result); + } + Operator::I64Load { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + result.as_instruction_value().unwrap(), + )?; + self.state.push1(result); + } + Operator::F32Load { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.f32_ptr_ty, + offset, + 4, + )?; + let result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + result.as_instruction_value().unwrap(), + )?; + self.state.push1(result); + } + Operator::F64Load { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.f64_ptr_ty, + offset, + 8, + )?; + let result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + result.as_instruction_value().unwrap(), + )?; + self.state.push1(result); + } + Operator::V128Load { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i128_ptr_ty, + offset, + 16, + )?; + let result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + result.as_instruction_value().unwrap(), + )?; + self.state.push1(result); + } + Operator::V128Load8Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _i) = self.v128_into_i8x16(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let element = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + element.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v, element, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load16Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, i) = self.v128_into_i16x8(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let element = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + element.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v, element, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, i); + } + Operator::V128Load32Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, i) = self.v128_into_i32x4(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let element = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + element.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v, element, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, i); + } + Operator::V128Load64Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, i) = self.v128_into_i64x2(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let element = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + element.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v, element, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1_extra(res, i); + } + + Operator::I32Store { ref memarg } => { + let value = self.state.pop1()?; + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let store = self.builder.build_store(effective_address, value); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::I64Store { ref memarg } => { + let value = self.state.pop1()?; + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let store = self.builder.build_store(effective_address, value); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::F32Store { ref memarg } => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.f32_ptr_ty, + offset, + 4, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let store = self.builder.build_store(effective_address, v); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::F64Store { ref memarg } => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.f64_ptr_ty, + offset, + 8, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let store = self.builder.build_store(effective_address, v); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::V128Store { ref memarg } => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i128_ptr_ty, + offset, + 16, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let store = self.builder.build_store(effective_address, v); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::V128Store8Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _i) = self.v128_into_i8x16(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let val = self.builder.build_extract_element(v, idx, ""); + let store = self.builder.build_store(effective_address, val); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::V128Store16Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _i) = self.v128_into_i16x8(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let val = self.builder.build_extract_element(v, idx, ""); + let store = self.builder.build_store(effective_address, val); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::V128Store32Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _i) = self.v128_into_i32x4(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let val = self.builder.build_extract_element(v, idx, ""); + let store = self.builder.build_store(effective_address, val); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::V128Store64Lane { ref memarg, lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _i) = self.v128_into_i64x2(v, i); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(memarg.memory); + + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let val = self.builder.build_extract_element(v, idx, ""); + let store = self.builder.build_store(effective_address, val); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::I32Load8S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_s_extend( + narrow_result.into_int_value(), + self.intrinsics.i32_ty, + "", + ); + self.state.push1(result); + } + Operator::I32Load16S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_s_extend( + narrow_result.into_int_value(), + self.intrinsics.i32_ty, + "", + ); + self.state.push1(result); + } + Operator::I64Load8S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let narrow_result = self + .builder + .build_load(effective_address, "") + .into_int_value(); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = + self.builder + .build_int_s_extend(narrow_result, self.intrinsics.i64_ty, ""); + self.state.push1(result); + } + Operator::I64Load16S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let narrow_result = self + .builder + .build_load(effective_address, "") + .into_int_value(); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = + self.builder + .build_int_s_extend(narrow_result, self.intrinsics.i64_ty, ""); + self.state.push1(result); + } + Operator::I64Load32S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_s_extend( + narrow_result.into_int_value(), + self.intrinsics.i64_ty, + "", + ); + self.state.push1(result); + } + + Operator::I32Load8U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_z_extend( + narrow_result.into_int_value(), + self.intrinsics.i32_ty, + "", + ); + self.state.push1_extra(result, ExtraInfo::arithmetic_f32()); + } + Operator::I32Load16U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_z_extend( + narrow_result.into_int_value(), + self.intrinsics.i32_ty, + "", + ); + self.state.push1_extra(result, ExtraInfo::arithmetic_f32()); + } + Operator::I64Load8U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_z_extend( + narrow_result.into_int_value(), + self.intrinsics.i64_ty, + "", + ); + self.state.push1_extra(result, ExtraInfo::arithmetic_f64()); + } + Operator::I64Load16U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_z_extend( + narrow_result.into_int_value(), + self.intrinsics.i64_ty, + "", + ); + self.state.push1_extra(result, ExtraInfo::arithmetic_f64()); + } + Operator::I64Load32U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let narrow_result = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + narrow_result.as_instruction_value().unwrap(), + )?; + let result = self.builder.build_int_z_extend( + narrow_result.into_int_value(), + self.intrinsics.i64_ty, + "", + ); + self.state.push1_extra(result, ExtraInfo::arithmetic_f64()); + } + + Operator::I32Store8 { ref memarg } | Operator::I64Store8 { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let store = self.builder.build_store(effective_address, narrow_value); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::I32Store16 { ref memarg } | Operator::I64Store16 { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let store = self.builder.build_store(effective_address, narrow_value); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::I64Store32 { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let dead_load = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + dead_load.as_instruction_value().unwrap(), + )?; + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let store = self.builder.build_store(effective_address, narrow_value); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + } + Operator::I8x16Neg => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let res = self.builder.build_int_sub(v.get_type().const_zero(), v, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8Neg => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + let res = self.builder.build_int_sub(v.get_type().const_zero(), v, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4Neg => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i32x4(v, i); + let res = self.builder.build_int_sub(v.get_type().const_zero(), v, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I64x2Neg => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i64x2(v, i); + let res = self.builder.build_int_sub(v.get_type().const_zero(), v, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Not => { + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i).into_int_value(); + let res = self.builder.build_not(v, ""); + self.state.push1(res); + } + Operator::V128AnyTrue => { + // | Operator::I64x2AnyTrue + // Skip canonicalization, it never changes non-zero values to zero or vice versa. + let v = self.state.pop1()?.into_int_value(); + let res = self.builder.build_int_compare( + IntPredicate::NE, + v, + v.get_type().const_zero(), + "", + ); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16AllTrue + | Operator::I16x8AllTrue + | Operator::I32x4AllTrue + | Operator::I64x2AllTrue => { + let vec_ty = match op { + Operator::I8x16AllTrue => self.intrinsics.i8x16_ty, + Operator::I16x8AllTrue => self.intrinsics.i16x8_ty, + Operator::I32x4AllTrue => self.intrinsics.i32x4_ty, + Operator::I64x2AllTrue => self.intrinsics.i64x2_ty, + _ => unreachable!(), + }; + let (v, i) = self.state.pop1_extra()?; + let v = self.apply_pending_canonicalization(v, i).into_int_value(); + let lane_int_ty = self.context.custom_width_int_type(vec_ty.get_size()); + let vec = self + .builder + .build_bitcast(v, vec_ty, "vec") + .into_vector_value(); + let mask = self.builder.build_int_compare( + IntPredicate::NE, + vec, + vec_ty.const_zero(), + "mask", + ); + let cmask = self + .builder + .build_bitcast(mask, lane_int_ty, "cmask") + .into_int_value(); + let res = self.builder.build_int_compare( + IntPredicate::EQ, + cmask, + lane_int_ty.const_int(std::u64::MAX, true), + "", + ); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1_extra( + res, + ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + ); + } + Operator::I8x16ExtractLaneS { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self + .builder + .build_extract_element(v, idx, "") + .into_int_value(); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I8x16ExtractLaneU { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i8x16(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self + .builder + .build_extract_element(v, idx, "") + .into_int_value(); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1_extra(res, ExtraInfo::arithmetic_f32()); + } + Operator::I16x8ExtractLaneS { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self + .builder + .build_extract_element(v, idx, "") + .into_int_value(); + let res = self + .builder + .build_int_s_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1(res); + } + Operator::I16x8ExtractLaneU { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, _) = self.v128_into_i16x8(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self + .builder + .build_extract_element(v, idx, "") + .into_int_value(); + let res = self + .builder + .build_int_z_extend(res, self.intrinsics.i32_ty, ""); + self.state.push1_extra(res, ExtraInfo::arithmetic_f32()); + } + Operator::I32x4ExtractLane { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, i) = self.v128_into_i32x4(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_extract_element(v, idx, ""); + self.state.push1_extra(res, i); + } + Operator::I64x2ExtractLane { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, i) = self.v128_into_i64x2(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_extract_element(v, idx, ""); + self.state.push1_extra(res, i); + } + Operator::F32x4ExtractLane { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, i) = self.v128_into_f32x4(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_extract_element(v, idx, ""); + self.state.push1_extra(res, i); + } + Operator::F64x2ExtractLane { lane } => { + let (v, i) = self.state.pop1_extra()?; + let (v, i) = self.v128_into_f64x2(v, i); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_extract_element(v, idx, ""); + self.state.push1_extra(res, i); + } + Operator::I8x16ReplaceLane { lane } => { + let ((v1, i1), (v2, _)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i8x16(v1, i1); + let v2 = v2.into_int_value(); + let v2 = self.builder.build_int_cast(v2, self.intrinsics.i8_ty, ""); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v1, v2, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I16x8ReplaceLane { lane } => { + let ((v1, i1), (v2, _)) = self.state.pop2_extra()?; + let (v1, _) = self.v128_into_i16x8(v1, i1); + let v2 = v2.into_int_value(); + let v2 = self.builder.build_int_cast(v2, self.intrinsics.i16_ty, ""); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v1, v2, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I32x4ReplaceLane { lane } => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_i32x4(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let i2 = i2.strip_pending(); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v1, v2, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i1 & i2 & ExtraInfo::arithmetic_f32()); + } + Operator::I64x2ReplaceLane { lane } => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_i64x2(v1, i1); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = v2.into_int_value(); + let i2 = i2.strip_pending(); + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v1, v2, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state + .push1_extra(res, i1 & i2 & ExtraInfo::arithmetic_f64()); + } + Operator::F32x4ReplaceLane { lane } => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f32x4(v1, i1); + let push_pending_f32_nan_to_result = + i1.has_pending_f32_nan() && i2.has_pending_f32_nan(); + let (v1, v2) = if !push_pending_f32_nan_to_result { + ( + self.apply_pending_canonicalization(v1.as_basic_value_enum(), i1) + .into_vector_value(), + self.apply_pending_canonicalization(v2.as_basic_value_enum(), i2) + .into_float_value(), + ) + } else { + (v1, v2.into_float_value()) + }; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v1, v2, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + let info = if push_pending_f32_nan_to_result { + ExtraInfo::pending_f32_nan() + } else { + i1.strip_pending() & i2.strip_pending() + }; + self.state.push1_extra(res, info); + } + Operator::F64x2ReplaceLane { lane } => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let (v1, i1) = self.v128_into_f64x2(v1, i1); + let push_pending_f64_nan_to_result = + i1.has_pending_f64_nan() && i2.has_pending_f64_nan(); + let (v1, v2) = if !push_pending_f64_nan_to_result { + ( + self.apply_pending_canonicalization(v1.as_basic_value_enum(), i1) + .into_vector_value(), + self.apply_pending_canonicalization(v2.as_basic_value_enum(), i2) + .into_float_value(), + ) + } else { + (v1, v2.into_float_value()) + }; + let idx = self.intrinsics.i32_ty.const_int(lane.into(), false); + let res = self.builder.build_insert_element(v1, v2, idx, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + let info = if push_pending_f64_nan_to_result { + ExtraInfo::pending_f64_nan() + } else { + i1.strip_pending() & i2.strip_pending() + }; + self.state.push1_extra(res, info); + } + Operator::I8x16Swizzle => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v1 = self + .builder + .build_bitcast(v1, self.intrinsics.i8x16_ty, "") + .into_vector_value(); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = self + .builder + .build_bitcast(v2, self.intrinsics.i8x16_ty, "") + .into_vector_value(); + let lanes = self.intrinsics.i8_ty.const_int(16, false); + let lanes = + self.splat_vector(lanes.as_basic_value_enum(), self.intrinsics.i8x16_ty); + let mut res = self.intrinsics.i8x16_ty.get_undef(); + let idx_out_of_range = self.builder.build_int_compare( + IntPredicate::UGE, + v2, + lanes, + "idx_out_of_range", + ); + let idx_clamped = self + .builder + .build_select( + idx_out_of_range, + self.intrinsics.i8x16_ty.const_zero(), + v2, + "idx_clamped", + ) + .into_vector_value(); + for i in 0..16 { + let idx = self + .builder + .build_extract_element( + idx_clamped, + self.intrinsics.i32_ty.const_int(i, false), + "idx", + ) + .into_int_value(); + let replace_with_zero = self + .builder + .build_extract_element( + idx_out_of_range, + self.intrinsics.i32_ty.const_int(i, false), + "replace_with_zero", + ) + .into_int_value(); + let elem = self + .builder + .build_extract_element(v1, idx, "elem") + .into_int_value(); + let elem_or_zero = self.builder.build_select( + replace_with_zero, + self.intrinsics.i8_zero, + elem, + "elem_or_zero", + ); + res = self.builder.build_insert_element( + res, + elem_or_zero, + self.intrinsics.i32_ty.const_int(i, false), + "", + ); + } + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::I8x16Shuffle { lanes } => { + let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?; + let v1 = self.apply_pending_canonicalization(v1, i1); + let v1 = self + .builder + .build_bitcast(v1, self.intrinsics.i8x16_ty, "") + .into_vector_value(); + let v2 = self.apply_pending_canonicalization(v2, i2); + let v2 = self + .builder + .build_bitcast(v2, self.intrinsics.i8x16_ty, "") + .into_vector_value(); + let mask = VectorType::const_vector( + lanes + .iter() + .map(|l| self.intrinsics.i32_ty.const_int((*l).into(), false)) + .collect::>() + .as_slice(), + ); + let res = self.builder.build_shuffle_vector(v1, v2, mask, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load8x8S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let v = self.builder.build_load(effective_address, ""); + let v = self + .builder + .build_bitcast(v, self.intrinsics.i8_ty.vec_type(8), "") + .into_vector_value(); + let res = self + .builder + .build_int_s_extend(v, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load8x8U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let v = self.builder.build_load(effective_address, ""); + let v = self + .builder + .build_bitcast(v, self.intrinsics.i8_ty.vec_type(8), "") + .into_vector_value(); + let res = self + .builder + .build_int_z_extend(v, self.intrinsics.i16x8_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load16x4S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let v = self.builder.build_load(effective_address, ""); + let v = self + .builder + .build_bitcast(v, self.intrinsics.i16_ty.vec_type(4), "") + .into_vector_value(); + let res = self + .builder + .build_int_s_extend(v, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load16x4U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let v = self.builder.build_load(effective_address, ""); + let v = self + .builder + .build_bitcast(v, self.intrinsics.i16_ty.vec_type(4), "") + .into_vector_value(); + let res = self + .builder + .build_int_z_extend(v, self.intrinsics.i32x4_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load32x2S { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let v = self.builder.build_load(effective_address, ""); + let v = self + .builder + .build_bitcast(v, self.intrinsics.i32_ty.vec_type(2), "") + .into_vector_value(); + let res = self + .builder + .build_int_s_extend(v, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load32x2U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let v = self.builder.build_load(effective_address, ""); + let v = self + .builder + .build_bitcast(v, self.intrinsics.i32_ty.vec_type(2), "") + .into_vector_value(); + let res = self + .builder + .build_int_z_extend(v, self.intrinsics.i64x2_ty, ""); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load32Zero { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let elem = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + elem.as_instruction_value().unwrap(), + )?; + let res = self.builder.build_int_z_extend( + elem.into_int_value(), + self.intrinsics.i128_ty, + "", + ); + self.state.push1(res); + } + Operator::V128Load64Zero { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let elem = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + elem.as_instruction_value().unwrap(), + )?; + let res = self.builder.build_int_z_extend( + elem.into_int_value(), + self.intrinsics.i128_ty, + "", + ); + self.state.push1(res); + } + Operator::V128Load8Splat { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + let elem = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + elem.as_instruction_value().unwrap(), + )?; + let res = self.splat_vector(elem, self.intrinsics.i8x16_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load16Splat { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + let elem = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + elem.as_instruction_value().unwrap(), + )?; + let res = self.splat_vector(elem, self.intrinsics.i16x8_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load32Splat { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + let elem = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + elem.as_instruction_value().unwrap(), + )?; + let res = self.splat_vector(elem, self.intrinsics.i32x4_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::V128Load64Splat { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + let elem = self.builder.build_load(effective_address, ""); + self.annotate_user_memaccess( + memory_index, + memarg, + 1, + elem.as_instruction_value().unwrap(), + )?; + let res = self.splat_vector(elem, self.intrinsics.i64x2_ty); + let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, ""); + self.state.push1(res); + } + Operator::AtomicFence { flags: _ } => { + // Fence is a nop. + // + // Fence was added to preserve information about fences from + // source languages. If in the future Wasm extends the memory + // model, and if we hadn't recorded what fences used to be there, + // it would lead to data races that weren't present in the + // original source language. + } + Operator::I32AtomicLoad { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let result = self.builder.build_load(effective_address, ""); + let load = result.as_instruction_value().unwrap(); + self.annotate_user_memaccess(memory_index, memarg, 4, load)?; + load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + self.state.push1(result); + } + Operator::I64AtomicLoad { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let result = self.builder.build_load(effective_address, ""); + let load = result.as_instruction_value().unwrap(); + self.annotate_user_memaccess(memory_index, memarg, 8, load)?; + load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + self.state.push1(result); + } + Operator::I32AtomicLoad8U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_result = self + .builder + .build_load(effective_address, "") + .into_int_value(); + let load = narrow_result.as_instruction_value().unwrap(); + self.annotate_user_memaccess(memory_index, memarg, 1, load)?; + load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + let result = + self.builder + .build_int_z_extend(narrow_result, self.intrinsics.i32_ty, ""); + self.state.push1_extra(result, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicLoad16U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_result = self + .builder + .build_load(effective_address, "") + .into_int_value(); + let load = narrow_result.as_instruction_value().unwrap(); + self.annotate_user_memaccess(memory_index, memarg, 2, load)?; + load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + let result = + self.builder + .build_int_z_extend(narrow_result, self.intrinsics.i32_ty, ""); + self.state.push1_extra(result, ExtraInfo::arithmetic_f32()); + } + Operator::I64AtomicLoad8U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_result = self + .builder + .build_load(effective_address, "") + .into_int_value(); + let load = narrow_result.as_instruction_value().unwrap(); + self.annotate_user_memaccess(memory_index, memarg, 1, load)?; + load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + let result = + self.builder + .build_int_z_extend(narrow_result, self.intrinsics.i64_ty, ""); + self.state.push1_extra(result, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicLoad16U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_result = self + .builder + .build_load(effective_address, "") + .into_int_value(); + let load = narrow_result.as_instruction_value().unwrap(); + self.annotate_user_memaccess(memory_index, memarg, 2, load)?; + load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + let result = + self.builder + .build_int_z_extend(narrow_result, self.intrinsics.i64_ty, ""); + self.state.push1_extra(result, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicLoad32U { ref memarg } => { + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_result = self + .builder + .build_load(effective_address, "") + .into_int_value(); + let load = narrow_result.as_instruction_value().unwrap(); + self.annotate_user_memaccess(memory_index, memarg, 4, load)?; + load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + let result = + self.builder + .build_int_z_extend(narrow_result, self.intrinsics.i64_ty, ""); + self.state.push1_extra(result, ExtraInfo::arithmetic_f64()); + } + Operator::I32AtomicStore { ref memarg } => { + let value = self.state.pop1()?; + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let store = self.builder.build_store(effective_address, value); + self.annotate_user_memaccess(memory_index, memarg, 4, store)?; + store + .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + } + Operator::I64AtomicStore { ref memarg } => { + let value = self.state.pop1()?; + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let store = self.builder.build_store(effective_address, value); + self.annotate_user_memaccess(memory_index, memarg, 8, store)?; + store + .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + } + Operator::I32AtomicStore8 { ref memarg } | Operator::I64AtomicStore8 { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let store = self.builder.build_store(effective_address, narrow_value); + self.annotate_user_memaccess(memory_index, memarg, 1, store)?; + store + .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + } + Operator::I32AtomicStore16 { ref memarg } + | Operator::I64AtomicStore16 { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let store = self.builder.build_store(effective_address, narrow_value); + self.annotate_user_memaccess(memory_index, memarg, 2, store)?; + store + .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + } + Operator::I64AtomicStore32 { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let store = self.builder.build_store(effective_address, narrow_value); + self.annotate_user_memaccess(memory_index, memarg, 4, store)?; + store + .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) + .unwrap(); + } + Operator::I32AtomicRmw8AddU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Add, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + tbaa_label( + &self.module, + self.intrinsics, + format!("memory {}", memory_index.as_u32()), + old.as_instruction_value().unwrap(), + ); + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmw16AddU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Add, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + tbaa_label( + &self.module, + self.intrinsics, + format!("memory {}", memory_index.as_u32()), + old.as_instruction_value().unwrap(), + ); + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmwAdd { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Add, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + tbaa_label( + &self.module, + self.intrinsics, + format!("memory {}", memory_index.as_u32()), + old.as_instruction_value().unwrap(), + ); + self.state.push1(old); + } + Operator::I64AtomicRmw8AddU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Add, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw16AddU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Add, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw32AddU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Add, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmwAdd { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Add, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I32AtomicRmw8SubU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Sub, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmw16SubU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Sub, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmwSub { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Sub, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I64AtomicRmw8SubU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Sub, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I64AtomicRmw16SubU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Sub, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw32SubU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Sub, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmwSub { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Sub, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I32AtomicRmw8AndU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::And, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmw16AndU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::And, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmwAnd { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::And, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I64AtomicRmw8AndU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::And, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw16AndU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::And, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw32AndU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::And, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmwAnd { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::And, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I32AtomicRmw8OrU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Or, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmw16OrU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Or, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmwOr { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Or, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I64AtomicRmw8OrU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Or, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw16OrU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Or, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw32OrU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Or, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmwOr { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Or, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I32AtomicRmw8XorU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xor, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmw16XorU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xor, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmwXor { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xor, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I64AtomicRmw8XorU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xor, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw16XorU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xor, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw32XorU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xor, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmwXor { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xor, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I32AtomicRmw8XchgU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xchg, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmw16XchgU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xchg, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmwXchg { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xchg, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I64AtomicRmw8XchgU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xchg, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw16XchgU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xchg, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw32XchgU { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_value = + self.builder + .build_int_truncate(value, self.intrinsics.i32_ty, ""); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xchg, + effective_address, + narrow_value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmwXchg { ref memarg } => { + let value = self.state.pop1()?.into_int_value(); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_atomicrmw( + AtomicRMWBinOp::Xchg, + effective_address, + value, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + self.state.push1(old); + } + Operator::I32AtomicRmw8CmpxchgU { ref memarg } => { + let ((cmp, cmp_info), (new, new_info)) = self.state.pop2_extra()?; + let cmp = self.apply_pending_canonicalization(cmp, cmp_info); + let new = self.apply_pending_canonicalization(new, new_info); + let (cmp, new) = (cmp.into_int_value(), new.into_int_value()); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_cmp = self + .builder + .build_int_truncate(cmp, self.intrinsics.i8_ty, ""); + let narrow_new = self + .builder + .build_int_truncate(new, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_cmpxchg( + effective_address, + narrow_cmp, + narrow_new, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_extract_value(old, 0, "") + .unwrap() + .into_int_value(); + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmw16CmpxchgU { ref memarg } => { + let ((cmp, cmp_info), (new, new_info)) = self.state.pop2_extra()?; + let cmp = self.apply_pending_canonicalization(cmp, cmp_info); + let new = self.apply_pending_canonicalization(new, new_info); + let (cmp, new) = (cmp.into_int_value(), new.into_int_value()); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_cmp = self + .builder + .build_int_truncate(cmp, self.intrinsics.i16_ty, ""); + let narrow_new = self + .builder + .build_int_truncate(new, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_cmpxchg( + effective_address, + narrow_cmp, + narrow_new, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_extract_value(old, 0, "") + .unwrap() + .into_int_value(); + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i32_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f32()); + } + Operator::I32AtomicRmwCmpxchg { ref memarg } => { + let ((cmp, cmp_info), (new, new_info)) = self.state.pop2_extra()?; + let cmp = self.apply_pending_canonicalization(cmp, cmp_info); + let new = self.apply_pending_canonicalization(new, new_info); + let (cmp, new) = (cmp.into_int_value(), new.into_int_value()); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_cmpxchg( + effective_address, + cmp, + new, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self.builder.build_extract_value(old, 0, "").unwrap(); + self.state.push1(old); + } + Operator::I64AtomicRmw8CmpxchgU { ref memarg } => { + let ((cmp, cmp_info), (new, new_info)) = self.state.pop2_extra()?; + let cmp = self.apply_pending_canonicalization(cmp, cmp_info); + let new = self.apply_pending_canonicalization(new, new_info); + let (cmp, new) = (cmp.into_int_value(), new.into_int_value()); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i8_ptr_ty, + offset, + 1, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_cmp = self + .builder + .build_int_truncate(cmp, self.intrinsics.i8_ty, ""); + let narrow_new = self + .builder + .build_int_truncate(new, self.intrinsics.i8_ty, ""); + let old = self + .builder + .build_cmpxchg( + effective_address, + narrow_cmp, + narrow_new, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_extract_value(old, 0, "") + .unwrap() + .into_int_value(); + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw16CmpxchgU { ref memarg } => { + let ((cmp, cmp_info), (new, new_info)) = self.state.pop2_extra()?; + let cmp = self.apply_pending_canonicalization(cmp, cmp_info); + let new = self.apply_pending_canonicalization(new, new_info); + let (cmp, new) = (cmp.into_int_value(), new.into_int_value()); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i16_ptr_ty, + offset, + 2, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_cmp = self + .builder + .build_int_truncate(cmp, self.intrinsics.i16_ty, ""); + let narrow_new = self + .builder + .build_int_truncate(new, self.intrinsics.i16_ty, ""); + let old = self + .builder + .build_cmpxchg( + effective_address, + narrow_cmp, + narrow_new, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_extract_value(old, 0, "") + .unwrap() + .into_int_value(); + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmw32CmpxchgU { ref memarg } => { + let ((cmp, cmp_info), (new, new_info)) = self.state.pop2_extra()?; + let cmp = self.apply_pending_canonicalization(cmp, cmp_info); + let new = self.apply_pending_canonicalization(new, new_info); + let (cmp, new) = (cmp.into_int_value(), new.into_int_value()); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i32_ptr_ty, + offset, + 4, + )?; + self.trap_if_misaligned(memarg, effective_address); + let narrow_cmp = self + .builder + .build_int_truncate(cmp, self.intrinsics.i32_ty, ""); + let narrow_new = self + .builder + .build_int_truncate(new, self.intrinsics.i32_ty, ""); + let old = self + .builder + .build_cmpxchg( + effective_address, + narrow_cmp, + narrow_new, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self + .builder + .build_extract_value(old, 0, "") + .unwrap() + .into_int_value(); + let old = self + .builder + .build_int_z_extend(old, self.intrinsics.i64_ty, ""); + self.state.push1_extra(old, ExtraInfo::arithmetic_f64()); + } + Operator::I64AtomicRmwCmpxchg { ref memarg } => { + let ((cmp, cmp_info), (new, new_info)) = self.state.pop2_extra()?; + let cmp = self.apply_pending_canonicalization(cmp, cmp_info); + let new = self.apply_pending_canonicalization(new, new_info); + let (cmp, new) = (cmp.into_int_value(), new.into_int_value()); + let offset = self.state.pop1()?.into_int_value(); + let memory_index = MemoryIndex::from_u32(0); + let effective_address = self.resolve_memory_ptr( + memory_index, + memarg, + self.intrinsics.i64_ptr_ty, + offset, + 8, + )?; + self.trap_if_misaligned(memarg, effective_address); + let old = self + .builder + .build_cmpxchg( + effective_address, + cmp, + new, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent, + ) + .unwrap(); + self.annotate_user_memaccess( + memory_index, + memarg, + 0, + old.as_instruction_value().unwrap(), + )?; + let old = self.builder.build_extract_value(old, 0, "").unwrap(); + self.state.push1(old); + } + + Operator::MemoryGrow { mem, mem_byte: _ } => { + let memory_index = MemoryIndex::from_u32(mem); + let delta = self.state.pop1()?; + let grow_fn_ptr = self.ctx.memory_grow(memory_index, self.intrinsics); + let callable_func = inkwell::values::CallableValue::try_from(grow_fn_ptr).unwrap(); + let grow = self.builder.build_call( + callable_func, + &[ + vmctx.as_basic_value_enum().into(), + delta.into(), + self.intrinsics.i32_ty.const_int(mem.into(), false).into(), + ], + "", + ); + self.state.push1(grow.try_as_basic_value().left().unwrap()); + } + Operator::MemorySize { mem, mem_byte: _ } => { + let memory_index = MemoryIndex::from_u32(mem); + let size_fn_ptr = self.ctx.memory_size(memory_index, self.intrinsics); + let callable_func = inkwell::values::CallableValue::try_from(size_fn_ptr).unwrap(); + let size = self.builder.build_call( + callable_func, + &[ + vmctx.as_basic_value_enum().into(), + self.intrinsics.i32_ty.const_int(mem.into(), false).into(), + ], + "", + ); + size.add_attribute(AttributeLoc::Function, self.intrinsics.readonly); + self.state.push1(size.try_as_basic_value().left().unwrap()); + } + Operator::MemoryInit { segment, mem } => { + let (dest, src, len) = self.state.pop3()?; + let mem = self.intrinsics.i32_ty.const_int(mem.into(), false); + let segment = self.intrinsics.i32_ty.const_int(segment.into(), false); + self.builder.build_call( + self.intrinsics.memory_init, + &[ + vmctx.as_basic_value_enum().into(), + mem.into(), + segment.into(), + dest.into(), + src.into(), + len.into(), + ], + "", + ); + } + Operator::DataDrop { segment } => { + let segment = self.intrinsics.i32_ty.const_int(segment.into(), false); + self.builder.build_call( + self.intrinsics.data_drop, + &[vmctx.as_basic_value_enum().into(), segment.into()], + "", + ); + } + Operator::MemoryCopy { src, dst } => { + // ignored until we support multiple memories + let _dst = dst; + let (memory_copy, src) = if let Some(local_memory_index) = self + .wasm_module + .local_memory_index(MemoryIndex::from_u32(src)) + { + (self.intrinsics.memory_copy, local_memory_index.as_u32()) + } else { + (self.intrinsics.imported_memory_copy, src) + }; + + let (dest_pos, src_pos, len) = self.state.pop3()?; + let src_index = self.intrinsics.i32_ty.const_int(src.into(), false); + self.builder.build_call( + memory_copy, + &[ + vmctx.as_basic_value_enum().into(), + src_index.into(), + dest_pos.into(), + src_pos.into(), + len.into(), + ], + "", + ); + } + Operator::MemoryFill { mem } => { + let (memory_fill, mem) = if let Some(local_memory_index) = self + .wasm_module + .local_memory_index(MemoryIndex::from_u32(mem)) + { + (self.intrinsics.memory_fill, local_memory_index.as_u32()) + } else { + (self.intrinsics.imported_memory_fill, mem) + }; + + let (dst, val, len) = self.state.pop3()?; + let mem_index = self.intrinsics.i32_ty.const_int(mem.into(), false); + self.builder.build_call( + memory_fill, + &[ + vmctx.as_basic_value_enum().into(), + mem_index.into(), + dst.into(), + val.into(), + len.into(), + ], + "", + ); + } + /*************************** + * Reference types. + * https://github.com/WebAssembly/reference-types/blob/master/proposals/reference-types/Overview.md + ***************************/ + Operator::RefNull { ty } => { + let ty = wptype_to_type(ty).map_err(to_compile_error)?; + let ty = type_to_llvm(self.intrinsics, ty)?; + self.state.push1(ty.const_zero()); + } + Operator::RefIsNull => { + let value = self.state.pop1()?.into_pointer_value(); + let is_null = self.builder.build_is_null(value, ""); + let is_null = self + .builder + .build_int_z_extend(is_null, self.intrinsics.i32_ty, ""); + self.state.push1(is_null); + } + Operator::RefFunc { function_index } => { + let index = self + .intrinsics + .i32_ty + .const_int(function_index.into(), false); + let value = self + .builder + .build_call( + self.intrinsics.func_ref, + &[self.ctx.basic().into(), index.into()], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1(value); + } + Operator::TableGet { table } => { + let table_index = self.intrinsics.i32_ty.const_int(table.into(), false); + let elem = self.state.pop1()?; + let table_get = if let Some(_) = self + .wasm_module + .local_table_index(TableIndex::from_u32(table)) + { + self.intrinsics.table_get + } else { + self.intrinsics.imported_table_get + }; + let value = self + .builder + .build_call( + table_get, + &[self.ctx.basic().into(), table_index.into(), elem.into()], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + let value = self.builder.build_bitcast( + value, + type_to_llvm( + self.intrinsics, + self.wasm_module + .tables + .get(TableIndex::from_u32(table)) + .unwrap() + .ty, + )?, + "", + ); + self.state.push1(value); + } + Operator::TableSet { table } => { + let table_index = self.intrinsics.i32_ty.const_int(table.into(), false); + let (elem, value) = self.state.pop2()?; + let value = self + .builder + .build_bitcast(value, self.intrinsics.anyref_ty, ""); + let table_set = if let Some(_) = self + .wasm_module + .local_table_index(TableIndex::from_u32(table)) + { + self.intrinsics.table_set + } else { + self.intrinsics.imported_table_set + }; + self.builder.build_call( + table_set, + &[ + self.ctx.basic().into(), + table_index.into(), + elem.into(), + value.into(), + ], + "", + ); + } + Operator::TableCopy { + dst_table, + src_table, + } => { + let (dst, src, len) = self.state.pop3()?; + let dst_table = self.intrinsics.i32_ty.const_int(dst_table as u64, false); + let src_table = self.intrinsics.i32_ty.const_int(src_table as u64, false); + self.builder.build_call( + self.intrinsics.table_copy, + &[ + self.ctx.basic().into(), + dst_table.into(), + src_table.into(), + dst.into(), + src.into(), + len.into(), + ], + "", + ); + } + Operator::TableInit { segment, table } => { + let (dst, src, len) = self.state.pop3()?; + let segment = self.intrinsics.i32_ty.const_int(segment as u64, false); + let table = self.intrinsics.i32_ty.const_int(table as u64, false); + self.builder.build_call( + self.intrinsics.table_init, + &[ + self.ctx.basic().into(), + table.into(), + segment.into(), + dst.into(), + src.into(), + len.into(), + ], + "", + ); + } + Operator::ElemDrop { segment } => { + let segment = self.intrinsics.i32_ty.const_int(segment as u64, false); + self.builder.build_call( + self.intrinsics.elem_drop, + &[self.ctx.basic().into(), segment.into()], + "", + ); + } + Operator::TableFill { table } => { + let table = self.intrinsics.i32_ty.const_int(table as u64, false); + let (start, elem, len) = self.state.pop3()?; + let elem = self + .builder + .build_bitcast(elem, self.intrinsics.anyref_ty, ""); + self.builder.build_call( + self.intrinsics.table_fill, + &[ + self.ctx.basic().into(), + table.into(), + start.into(), + elem.into(), + len.into(), + ], + "", + ); + } + Operator::TableGrow { table } => { + let (elem, delta) = self.state.pop2()?; + let elem = self + .builder + .build_bitcast(elem, self.intrinsics.anyref_ty, ""); + let (table_grow, table_index) = if let Some(local_table_index) = self + .wasm_module + .local_table_index(TableIndex::from_u32(table)) + { + (self.intrinsics.table_grow, local_table_index.as_u32()) + } else { + (self.intrinsics.imported_table_grow, table) + }; + let table_index = self.intrinsics.i32_ty.const_int(table_index as u64, false); + let size = self + .builder + .build_call( + table_grow, + &[ + self.ctx.basic().into(), + elem.into(), + delta.into(), + table_index.into(), + ], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1(size); + } + Operator::TableSize { table } => { + let (table_size, table_index) = if let Some(local_table_index) = self + .wasm_module + .local_table_index(TableIndex::from_u32(table)) + { + (self.intrinsics.table_size, local_table_index.as_u32()) + } else { + (self.intrinsics.imported_table_size, table) + }; + let table_index = self.intrinsics.i32_ty.const_int(table_index as u64, false); + let size = self + .builder + .build_call( + table_size, + &[self.ctx.basic().into(), table_index.into()], + "", + ) + .try_as_basic_value() + .left() + .unwrap(); + self.state.push1(size); + } + _ => { + return Err(CompileError::Codegen(format!( + "Operator {:?} unimplemented", + op + ))); + } + } + + Ok(()) + } +} + +fn is_f32_arithmetic(bits: u32) -> bool { + // Mask off sign bit. + let bits = bits & 0x7FFF_FFFF; + bits < 0x7FC0_0000 +} + +fn is_f64_arithmetic(bits: u64) -> bool { + // Mask off sign bit. + let bits = bits & 0x7FFF_FFFF_FFFF_FFFF; + bits < 0x7FF8_0000_0000_0000 +} + +// Constants for the bounds of truncation operations. These are the least or +// greatest exact floats in either f32 or f64 representation +// greater-than-or-equal-to (for least) or less-than-or-equal-to (for greatest) +// the i32 or i64 or u32 or u64 min (for least) or max (for greatest), when +// rounding towards zero. + +/// Least Exact Float (32 bits) greater-than-or-equal-to i32::MIN when rounding towards zero. +const LEF32_GEQ_I32_MIN: u64 = std::i32::MIN as u64; +/// Greatest Exact Float (32 bits) less-than-or-equal-to i32::MAX when rounding towards zero. +const GEF32_LEQ_I32_MAX: u64 = 2147483520; // bits as f32: 0x4eff_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to i32::MIN when rounding towards zero. +const LEF64_GEQ_I32_MIN: u64 = std::i32::MIN as u64; +/// Greatest Exact Float (64 bits) less-than-or-equal-to i32::MAX when rounding towards zero. +const GEF64_LEQ_I32_MAX: u64 = std::i32::MAX as u64; +/// Least Exact Float (32 bits) greater-than-or-equal-to u32::MIN when rounding towards zero. +const LEF32_GEQ_U32_MIN: u64 = std::u32::MIN as u64; +/// Greatest Exact Float (32 bits) less-than-or-equal-to u32::MAX when rounding towards zero. +const GEF32_LEQ_U32_MAX: u64 = 4294967040; // bits as f32: 0x4f7f_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to u32::MIN when rounding towards zero. +const LEF64_GEQ_U32_MIN: u64 = std::u32::MIN as u64; +/// Greatest Exact Float (64 bits) less-than-or-equal-to u32::MAX when rounding towards zero. +const GEF64_LEQ_U32_MAX: u64 = 4294967295; // bits as f64: 0x41ef_ffff_ffff_ffff +/// Least Exact Float (32 bits) greater-than-or-equal-to i64::MIN when rounding towards zero. +const LEF32_GEQ_I64_MIN: u64 = std::i64::MIN as u64; +/// Greatest Exact Float (32 bits) less-than-or-equal-to i64::MAX when rounding towards zero. +const GEF32_LEQ_I64_MAX: u64 = 9223371487098961920; // bits as f32: 0x5eff_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to i64::MIN when rounding towards zero. +const LEF64_GEQ_I64_MIN: u64 = std::i64::MIN as u64; +/// Greatest Exact Float (64 bits) less-than-or-equal-to i64::MAX when rounding towards zero. +const GEF64_LEQ_I64_MAX: u64 = 9223372036854774784; // bits as f64: 0x43df_ffff_ffff_ffff +/// Least Exact Float (32 bits) greater-than-or-equal-to u64::MIN when rounding towards zero. +const LEF32_GEQ_U64_MIN: u64 = std::u64::MIN; +/// Greatest Exact Float (32 bits) less-than-or-equal-to u64::MAX when rounding towards zero. +const GEF32_LEQ_U64_MAX: u64 = 18446742974197923840; // bits as f32: 0x5f7f_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to u64::MIN when rounding towards zero. +const LEF64_GEQ_U64_MIN: u64 = std::u64::MIN; +/// Greatest Exact Float (64 bits) less-than-or-equal-to u64::MAX when rounding towards zero. +const GEF64_LEQ_U64_MAX: u64 = 18446744073709549568; // bits as f64: 0x43ef_ffff_ffff_ffff diff --git a/lib/compiler-llvm/src/translator/intrinsics.rs b/lib/compiler-llvm/src/translator/intrinsics.rs new file mode 100644 index 0000000000..400fe5cef5 --- /dev/null +++ b/lib/compiler-llvm/src/translator/intrinsics.rs @@ -0,0 +1,1750 @@ +//! Code for dealing with [LLVM][llvm-intrinsics] and VM intrinsics. +//! +//! VM intrinsics are used to interact with the host VM. +//! +//! [llvm-intrinsics]: https://llvm.org/docs/LangRef.html#intrinsic-functions + +use crate::abi::Abi; +use inkwell::values::BasicMetadataValueEnum; +use inkwell::{ + attributes::{Attribute, AttributeLoc}, + builder::Builder, + context::Context, + module::{Linkage, Module}, + targets::TargetData, + types::{ + BasicMetadataTypeEnum, BasicType, BasicTypeEnum, FloatType, IntType, PointerType, + StructType, VectorType, VoidType, + }, + values::{ + BasicValue, BasicValueEnum, FloatValue, FunctionValue, InstructionValue, IntValue, + PointerValue, VectorValue, + }, + AddressSpace, +}; +use std::collections::{hash_map::Entry, HashMap}; +use wasmer_compiler::CompileError; +use wasmer_types::entity::{EntityRef, PrimaryMap}; +use wasmer_types::{ + FunctionIndex, FunctionType as FuncType, GlobalIndex, LocalFunctionIndex, MemoryIndex, + ModuleInfo as WasmerCompilerModule, Mutability, SignatureIndex, TableIndex, Type, +}; +use wasmer_vm::{MemoryStyle, TrapCode, VMBuiltinFunctionIndex, VMOffsets}; + +pub fn type_to_llvm_ptr<'ctx>( + intrinsics: &Intrinsics<'ctx>, + ty: Type, +) -> Result, CompileError> { + match ty { + Type::I32 => Ok(intrinsics.i32_ptr_ty), + Type::I64 => Ok(intrinsics.i64_ptr_ty), + Type::F32 => Ok(intrinsics.f32_ptr_ty), + Type::F64 => Ok(intrinsics.f64_ptr_ty), + Type::V128 => Ok(intrinsics.i128_ptr_ty), + Type::FuncRef => Ok(intrinsics.funcref_ty.ptr_type(AddressSpace::Generic)), + Type::ExternRef => Ok(intrinsics.externref_ty.ptr_type(AddressSpace::Generic)), + } +} + +pub fn type_to_llvm<'ctx>( + intrinsics: &Intrinsics<'ctx>, + ty: Type, +) -> Result, CompileError> { + match ty { + Type::I32 => Ok(intrinsics.i32_ty.as_basic_type_enum()), + Type::I64 => Ok(intrinsics.i64_ty.as_basic_type_enum()), + Type::F32 => Ok(intrinsics.f32_ty.as_basic_type_enum()), + Type::F64 => Ok(intrinsics.f64_ty.as_basic_type_enum()), + Type::V128 => Ok(intrinsics.i128_ty.as_basic_type_enum()), + Type::FuncRef => Ok(intrinsics.funcref_ty.as_basic_type_enum()), + Type::ExternRef => Ok(intrinsics.externref_ty.as_basic_type_enum()), + } +} + +/// Struct containing LLVM and VM intrinsics. +pub struct Intrinsics<'ctx> { + pub ctlz_i32: FunctionValue<'ctx>, + pub ctlz_i64: FunctionValue<'ctx>, + + pub cttz_i32: FunctionValue<'ctx>, + pub cttz_i64: FunctionValue<'ctx>, + + pub ctpop_i32: FunctionValue<'ctx>, + pub ctpop_i64: FunctionValue<'ctx>, + pub ctpop_i8x16: FunctionValue<'ctx>, + + pub fp_rounding_md: BasicMetadataValueEnum<'ctx>, + pub fp_exception_md: BasicMetadataValueEnum<'ctx>, + pub fp_ogt_md: BasicMetadataValueEnum<'ctx>, + pub fp_olt_md: BasicMetadataValueEnum<'ctx>, + pub fp_uno_md: BasicMetadataValueEnum<'ctx>, + + pub add_f32: FunctionValue<'ctx>, + pub add_f64: FunctionValue<'ctx>, + pub add_f32x4: FunctionValue<'ctx>, + pub add_f64x2: FunctionValue<'ctx>, + + pub sub_f32: FunctionValue<'ctx>, + pub sub_f64: FunctionValue<'ctx>, + pub sub_f32x4: FunctionValue<'ctx>, + pub sub_f64x2: FunctionValue<'ctx>, + + pub mul_f32: FunctionValue<'ctx>, + pub mul_f64: FunctionValue<'ctx>, + pub mul_f32x4: FunctionValue<'ctx>, + pub mul_f64x2: FunctionValue<'ctx>, + + pub div_f32: FunctionValue<'ctx>, + pub div_f64: FunctionValue<'ctx>, + pub div_f32x4: FunctionValue<'ctx>, + pub div_f64x2: FunctionValue<'ctx>, + + pub sqrt_f32: FunctionValue<'ctx>, + pub sqrt_f64: FunctionValue<'ctx>, + pub sqrt_f32x4: FunctionValue<'ctx>, + pub sqrt_f64x2: FunctionValue<'ctx>, + + pub cmp_f32: FunctionValue<'ctx>, + pub cmp_f64: FunctionValue<'ctx>, + pub cmp_f32x4: FunctionValue<'ctx>, + pub cmp_f64x2: FunctionValue<'ctx>, + + pub ceil_f32: FunctionValue<'ctx>, + pub ceil_f64: FunctionValue<'ctx>, + pub ceil_f32x4: FunctionValue<'ctx>, + pub ceil_f64x2: FunctionValue<'ctx>, + + pub floor_f32: FunctionValue<'ctx>, + pub floor_f64: FunctionValue<'ctx>, + pub floor_f32x4: FunctionValue<'ctx>, + pub floor_f64x2: FunctionValue<'ctx>, + + pub trunc_f32: FunctionValue<'ctx>, + pub trunc_f64: FunctionValue<'ctx>, + pub trunc_f32x4: FunctionValue<'ctx>, + pub trunc_f64x2: FunctionValue<'ctx>, + + pub fpext_f32: FunctionValue<'ctx>, + pub fptrunc_f64: FunctionValue<'ctx>, + + pub nearbyint_f32: FunctionValue<'ctx>, + pub nearbyint_f64: FunctionValue<'ctx>, + pub nearbyint_f32x4: FunctionValue<'ctx>, + pub nearbyint_f64x2: FunctionValue<'ctx>, + + pub fabs_f32: FunctionValue<'ctx>, + pub fabs_f64: FunctionValue<'ctx>, + pub fabs_f32x4: FunctionValue<'ctx>, + pub fabs_f64x2: FunctionValue<'ctx>, + + pub copysign_f32: FunctionValue<'ctx>, + pub copysign_f64: FunctionValue<'ctx>, + pub copysign_f32x4: FunctionValue<'ctx>, + pub copysign_f64x2: FunctionValue<'ctx>, + + pub sadd_sat_i8x16: FunctionValue<'ctx>, + pub sadd_sat_i16x8: FunctionValue<'ctx>, + pub uadd_sat_i8x16: FunctionValue<'ctx>, + pub uadd_sat_i16x8: FunctionValue<'ctx>, + + pub ssub_sat_i8x16: FunctionValue<'ctx>, + pub ssub_sat_i16x8: FunctionValue<'ctx>, + pub usub_sat_i8x16: FunctionValue<'ctx>, + pub usub_sat_i16x8: FunctionValue<'ctx>, + + pub expect_i1: FunctionValue<'ctx>, + pub trap: FunctionValue<'ctx>, + pub debug_trap: FunctionValue<'ctx>, + + pub personality: FunctionValue<'ctx>, + pub readonly: Attribute, + pub stack_probe: Attribute, + + pub void_ty: VoidType<'ctx>, + pub i1_ty: IntType<'ctx>, + pub i2_ty: IntType<'ctx>, + pub i4_ty: IntType<'ctx>, + pub i8_ty: IntType<'ctx>, + pub i16_ty: IntType<'ctx>, + pub i32_ty: IntType<'ctx>, + pub i64_ty: IntType<'ctx>, + pub i128_ty: IntType<'ctx>, + pub isize_ty: IntType<'ctx>, + pub f32_ty: FloatType<'ctx>, + pub f64_ty: FloatType<'ctx>, + + pub i1x128_ty: VectorType<'ctx>, + pub i8x16_ty: VectorType<'ctx>, + pub i16x8_ty: VectorType<'ctx>, + pub i32x4_ty: VectorType<'ctx>, + pub i64x2_ty: VectorType<'ctx>, + pub f32x4_ty: VectorType<'ctx>, + pub f64x2_ty: VectorType<'ctx>, + pub i32x8_ty: VectorType<'ctx>, + + pub i8_ptr_ty: PointerType<'ctx>, + pub i16_ptr_ty: PointerType<'ctx>, + pub i32_ptr_ty: PointerType<'ctx>, + pub i64_ptr_ty: PointerType<'ctx>, + pub i128_ptr_ty: PointerType<'ctx>, + pub isize_ptr_ty: PointerType<'ctx>, + pub f32_ptr_ty: PointerType<'ctx>, + pub f64_ptr_ty: PointerType<'ctx>, + + pub anyfunc_ty: StructType<'ctx>, + + pub funcref_ty: PointerType<'ctx>, + pub externref_ty: PointerType<'ctx>, + pub anyref_ty: PointerType<'ctx>, + + pub i1_zero: IntValue<'ctx>, + pub i8_zero: IntValue<'ctx>, + pub i32_zero: IntValue<'ctx>, + pub i64_zero: IntValue<'ctx>, + pub i128_zero: IntValue<'ctx>, + pub isize_zero: IntValue<'ctx>, + pub f32_zero: FloatValue<'ctx>, + pub f64_zero: FloatValue<'ctx>, + pub f32x4_zero: VectorValue<'ctx>, + pub f64x2_zero: VectorValue<'ctx>, + pub i32_consts: [IntValue<'ctx>; 16], + + pub trap_unreachable: BasicValueEnum<'ctx>, + pub trap_call_indirect_null: BasicValueEnum<'ctx>, + pub trap_call_indirect_sig: BasicValueEnum<'ctx>, + pub trap_memory_oob: BasicValueEnum<'ctx>, + pub trap_illegal_arithmetic: BasicValueEnum<'ctx>, + pub trap_integer_division_by_zero: BasicValueEnum<'ctx>, + pub trap_bad_conversion_to_integer: BasicValueEnum<'ctx>, + pub trap_unaligned_atomic: BasicValueEnum<'ctx>, + pub trap_table_access_oob: BasicValueEnum<'ctx>, + + pub experimental_stackmap: FunctionValue<'ctx>, + + // VM libcalls. + pub table_copy: FunctionValue<'ctx>, + pub table_init: FunctionValue<'ctx>, + pub table_fill: FunctionValue<'ctx>, + pub table_size: FunctionValue<'ctx>, + pub imported_table_size: FunctionValue<'ctx>, + pub table_get: FunctionValue<'ctx>, + pub imported_table_get: FunctionValue<'ctx>, + pub table_set: FunctionValue<'ctx>, + pub imported_table_set: FunctionValue<'ctx>, + pub table_grow: FunctionValue<'ctx>, + pub imported_table_grow: FunctionValue<'ctx>, + pub memory_init: FunctionValue<'ctx>, + pub data_drop: FunctionValue<'ctx>, + pub func_ref: FunctionValue<'ctx>, + pub elem_drop: FunctionValue<'ctx>, + pub memory_copy: FunctionValue<'ctx>, + pub imported_memory_copy: FunctionValue<'ctx>, + pub memory_fill: FunctionValue<'ctx>, + pub imported_memory_fill: FunctionValue<'ctx>, + + pub throw_trap: FunctionValue<'ctx>, + + // VM builtins. + pub vmfunction_import_ptr_ty: PointerType<'ctx>, + pub vmfunction_import_body_element: u32, + pub vmfunction_import_vmctx_element: u32, + + pub vmmemory_definition_ptr_ty: PointerType<'ctx>, + pub vmmemory_definition_base_element: u32, + pub vmmemory_definition_current_length_element: u32, + + pub memory32_grow_ptr_ty: PointerType<'ctx>, + pub imported_memory32_grow_ptr_ty: PointerType<'ctx>, + pub memory32_size_ptr_ty: PointerType<'ctx>, + pub imported_memory32_size_ptr_ty: PointerType<'ctx>, + + // Pointer to the VM. + pub ctx_ptr_ty: PointerType<'ctx>, +} + +impl<'ctx> Intrinsics<'ctx> { + /// Create an [`Intrinsics`] for the given [`Context`]. + pub fn declare( + module: &Module<'ctx>, + context: &'ctx Context, + target_data: &TargetData, + ) -> Self { + let void_ty = context.void_type(); + let i1_ty = context.bool_type(); + let i2_ty = context.custom_width_int_type(2); + let i4_ty = context.custom_width_int_type(4); + let i8_ty = context.i8_type(); + let i16_ty = context.i16_type(); + let i32_ty = context.i32_type(); + let i64_ty = context.i64_type(); + let i128_ty = context.i128_type(); + let isize_ty = context.ptr_sized_int_type(target_data, None); + let f32_ty = context.f32_type(); + let f64_ty = context.f64_type(); + + let i1x4_ty = i1_ty.vec_type(4); + let i1x2_ty = i1_ty.vec_type(2); + let i1x128_ty = i1_ty.vec_type(128); + let i8x16_ty = i8_ty.vec_type(16); + let i16x8_ty = i16_ty.vec_type(8); + let i32x4_ty = i32_ty.vec_type(4); + let i64x2_ty = i64_ty.vec_type(2); + let f32x4_ty = f32_ty.vec_type(4); + let f64x2_ty = f64_ty.vec_type(2); + let i32x8_ty = i32_ty.vec_type(8); + + let i8_ptr_ty = i8_ty.ptr_type(AddressSpace::Generic); + let i16_ptr_ty = i16_ty.ptr_type(AddressSpace::Generic); + let i32_ptr_ty = i32_ty.ptr_type(AddressSpace::Generic); + let i64_ptr_ty = i64_ty.ptr_type(AddressSpace::Generic); + let i128_ptr_ty = i128_ty.ptr_type(AddressSpace::Generic); + let isize_ptr_ty = isize_ty.ptr_type(AddressSpace::Generic); + let f32_ptr_ty = f32_ty.ptr_type(AddressSpace::Generic); + let f64_ptr_ty = f64_ty.ptr_type(AddressSpace::Generic); + + let i1_zero = i1_ty.const_int(0, false); + let i8_zero = i8_ty.const_int(0, false); + let i32_zero = i32_ty.const_int(0, false); + let i64_zero = i64_ty.const_int(0, false); + let i128_zero = i128_ty.const_int(0, false); + let isize_zero = isize_ty.const_int(0, false); + let f32_zero = f32_ty.const_float(0.0); + let f64_zero = f64_ty.const_float(0.0); + let f32x4_zero = f32x4_ty.const_zero(); + let f64x2_zero = f64x2_ty.const_zero(); + let i32_consts = [ + i32_ty.const_int(0, false), + i32_ty.const_int(1, false), + i32_ty.const_int(2, false), + i32_ty.const_int(3, false), + i32_ty.const_int(4, false), + i32_ty.const_int(5, false), + i32_ty.const_int(6, false), + i32_ty.const_int(7, false), + i32_ty.const_int(8, false), + i32_ty.const_int(9, false), + i32_ty.const_int(10, false), + i32_ty.const_int(11, false), + i32_ty.const_int(12, false), + i32_ty.const_int(13, false), + i32_ty.const_int(14, false), + i32_ty.const_int(15, false), + ]; + + let md_ty = context.metadata_type(); + + let i8_ptr_ty_basic = i8_ptr_ty.as_basic_type_enum(); + + let i1_ty_basic_md: BasicMetadataTypeEnum = i1_ty.into(); + let i32_ty_basic_md: BasicMetadataTypeEnum = i32_ty.into(); + let i64_ty_basic_md: BasicMetadataTypeEnum = i64_ty.into(); + let f32_ty_basic_md: BasicMetadataTypeEnum = f32_ty.into(); + let f64_ty_basic_md: BasicMetadataTypeEnum = f64_ty.into(); + let i8x16_ty_basic_md: BasicMetadataTypeEnum = i8x16_ty.into(); + let i16x8_ty_basic_md: BasicMetadataTypeEnum = i16x8_ty.into(); + let f32x4_ty_basic_md: BasicMetadataTypeEnum = f32x4_ty.into(); + let f64x2_ty_basic_md: BasicMetadataTypeEnum = f64x2_ty.into(); + let md_ty_basic_md: BasicMetadataTypeEnum = md_ty.into(); + + let ctx_ty = i8_ty; + let ctx_ptr_ty = ctx_ty.ptr_type(AddressSpace::Generic); + let ctx_ptr_ty_basic = ctx_ptr_ty.as_basic_type_enum(); + let ctx_ptr_ty_basic_md: BasicMetadataTypeEnum = ctx_ptr_ty.into(); + + let sigindex_ty = i32_ty; + + let anyfunc_ty = context.struct_type( + &[i8_ptr_ty_basic, sigindex_ty.into(), ctx_ptr_ty_basic], + false, + ); + let funcref_ty = anyfunc_ty.ptr_type(AddressSpace::Generic); + let externref_ty = funcref_ty; + let anyref_ty = i8_ptr_ty; + let anyref_ty_basic_md: BasicMetadataTypeEnum = anyref_ty.into(); + + let ret_i8x16_take_i8x16 = i8x16_ty.fn_type(&[i8x16_ty_basic_md], false); + let ret_i8x16_take_i8x16_i8x16 = + i8x16_ty.fn_type(&[i8x16_ty_basic_md, i8x16_ty_basic_md], false); + let ret_i16x8_take_i16x8_i16x8 = + i16x8_ty.fn_type(&[i16x8_ty_basic_md, i16x8_ty_basic_md], false); + + let ret_i32_take_i32_i1 = i32_ty.fn_type(&[i32_ty_basic_md, i1_ty_basic_md], false); + let ret_i64_take_i64_i1 = i64_ty.fn_type(&[i64_ty_basic_md, i1_ty_basic_md], false); + + let ret_i32_take_i32 = i32_ty.fn_type(&[i32_ty_basic_md], false); + let ret_i64_take_i64 = i64_ty.fn_type(&[i64_ty_basic_md], false); + + let ret_f32_take_f32 = f32_ty.fn_type(&[f32_ty_basic_md], false); + let ret_f64_take_f64 = f64_ty.fn_type(&[f64_ty_basic_md], false); + let ret_f32x4_take_f32x4 = f32x4_ty.fn_type(&[f32x4_ty_basic_md], false); + let ret_f64x2_take_f64x2 = f64x2_ty.fn_type(&[f64x2_ty_basic_md], false); + + let ret_f32_take_f32_f32 = f32_ty.fn_type(&[f32_ty_basic_md, f32_ty_basic_md], false); + let ret_f64_take_f64_f64 = f64_ty.fn_type(&[f64_ty_basic_md, f64_ty_basic_md], false); + let ret_f32x4_take_f32x4_f32x4 = + f32x4_ty.fn_type(&[f32x4_ty_basic_md, f32x4_ty_basic_md], false); + let ret_f64x2_take_f64x2_f64x2 = + f64x2_ty.fn_type(&[f64x2_ty_basic_md, f64x2_ty_basic_md], false); + + let ret_f64_take_f32_md = f64_ty.fn_type(&[f32_ty_basic_md, md_ty_basic_md], false); + let ret_f32_take_f64_md_md = + f32_ty.fn_type(&[f64_ty_basic_md, md_ty_basic_md, md_ty_basic_md], false); + + let ret_i1_take_i1_i1 = i1_ty.fn_type(&[i1_ty_basic_md, i1_ty_basic_md], false); + + let ret_i1_take_f32_f32_md_md = i1_ty.fn_type( + &[ + f32_ty_basic_md, + f32_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + let ret_i1_take_f64_f64_md_md = i1_ty.fn_type( + &[ + f64_ty_basic_md, + f64_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + let ret_i1x4_take_f32x4_f32x4_md_md = i1x4_ty.fn_type( + &[ + f32x4_ty_basic_md, + f32x4_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + let ret_i1x2_take_f64x2_f64x2_md_md = i1x2_ty.fn_type( + &[ + f64x2_ty_basic_md, + f64x2_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + + let ret_f32_take_f32_f32_md_md = f32_ty.fn_type( + &[ + f32_ty_basic_md, + f32_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + let ret_f64_take_f64_f64_md_md = f64_ty.fn_type( + &[ + f64_ty_basic_md, + f64_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + let ret_f32x4_take_f32x4_f32x4_md_md = f32x4_ty.fn_type( + &[ + f32x4_ty_basic_md, + f32x4_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + let ret_f64x2_take_f64x2_f64x2_md_md = f64x2_ty.fn_type( + &[ + f64x2_ty_basic_md, + f64x2_ty_basic_md, + md_ty_basic_md, + md_ty_basic_md, + ], + false, + ); + + let intrinsics = Self { + ctlz_i32: module.add_function("llvm.ctlz.i32", ret_i32_take_i32_i1, None), + ctlz_i64: module.add_function("llvm.ctlz.i64", ret_i64_take_i64_i1, None), + + cttz_i32: module.add_function("llvm.cttz.i32", ret_i32_take_i32_i1, None), + cttz_i64: module.add_function("llvm.cttz.i64", ret_i64_take_i64_i1, None), + + ctpop_i32: module.add_function("llvm.ctpop.i32", ret_i32_take_i32, None), + ctpop_i64: module.add_function("llvm.ctpop.i64", ret_i64_take_i64, None), + ctpop_i8x16: module.add_function("llvm.ctpop.v16i8", ret_i8x16_take_i8x16, None), + + fp_rounding_md: context.metadata_string("round.tonearest").into(), + fp_exception_md: context.metadata_string("fpexcept.strict").into(), + + fp_ogt_md: context.metadata_string("ogt").into(), + fp_olt_md: context.metadata_string("olt").into(), + fp_uno_md: context.metadata_string("uno").into(), + + sqrt_f32: module.add_function("llvm.sqrt.f32", ret_f32_take_f32, None), + sqrt_f64: module.add_function("llvm.sqrt.f64", ret_f64_take_f64, None), + sqrt_f32x4: module.add_function("llvm.sqrt.v4f32", ret_f32x4_take_f32x4, None), + sqrt_f64x2: module.add_function("llvm.sqrt.v2f64", ret_f64x2_take_f64x2, None), + + ceil_f32: module.add_function("llvm.ceil.f32", ret_f32_take_f32, None), + ceil_f64: module.add_function("llvm.ceil.f64", ret_f64_take_f64, None), + ceil_f32x4: module.add_function("llvm.ceil.v4f32", ret_f32x4_take_f32x4, None), + ceil_f64x2: module.add_function("llvm.ceil.v2f64", ret_f64x2_take_f64x2, None), + + floor_f32: module.add_function("llvm.floor.f32", ret_f32_take_f32, None), + floor_f64: module.add_function("llvm.floor.f64", ret_f64_take_f64, None), + floor_f32x4: module.add_function("llvm.floor.v4f32", ret_f32x4_take_f32x4, None), + floor_f64x2: module.add_function("llvm.floor.v2f64", ret_f64x2_take_f64x2, None), + + trunc_f32: module.add_function("llvm.trunc.f32", ret_f32_take_f32, None), + trunc_f64: module.add_function("llvm.trunc.f64", ret_f64_take_f64, None), + trunc_f32x4: module.add_function("llvm.trunc.v4f32", ret_f32x4_take_f32x4, None), + trunc_f64x2: module.add_function("llvm.trunc.v2f64", ret_f64x2_take_f64x2, None), + + nearbyint_f32: module.add_function("llvm.nearbyint.f32", ret_f32_take_f32, None), + nearbyint_f64: module.add_function("llvm.nearbyint.f64", ret_f64_take_f64, None), + nearbyint_f32x4: module.add_function( + "llvm.nearbyint.v4f32", + ret_f32x4_take_f32x4, + None, + ), + nearbyint_f64x2: module.add_function( + "llvm.nearbyint.v2f64", + ret_f64x2_take_f64x2, + None, + ), + + add_f32: module.add_function( + "llvm.experimental.constrained.fadd.f32", + ret_f32_take_f32_f32_md_md, + None, + ), + add_f64: module.add_function( + "llvm.experimental.constrained.fadd.f64", + ret_f64_take_f64_f64_md_md, + None, + ), + add_f32x4: module.add_function( + "llvm.experimental.constrained.fadd.v4f32", + ret_f32x4_take_f32x4_f32x4_md_md, + None, + ), + add_f64x2: module.add_function( + "llvm.experimental.constrained.fadd.v2f64", + ret_f64x2_take_f64x2_f64x2_md_md, + None, + ), + + sub_f32: module.add_function( + "llvm.experimental.constrained.fsub.f32", + ret_f32_take_f32_f32_md_md, + None, + ), + sub_f64: module.add_function( + "llvm.experimental.constrained.fsub.f64", + ret_f64_take_f64_f64_md_md, + None, + ), + sub_f32x4: module.add_function( + "llvm.experimental.constrained.fsub.v4f32", + ret_f32x4_take_f32x4_f32x4_md_md, + None, + ), + sub_f64x2: module.add_function( + "llvm.experimental.constrained.fsub.v2f64", + ret_f64x2_take_f64x2_f64x2_md_md, + None, + ), + + mul_f32: module.add_function( + "llvm.experimental.constrained.fmul.f32", + ret_f32_take_f32_f32_md_md, + None, + ), + mul_f64: module.add_function( + "llvm.experimental.constrained.fmul.f64", + ret_f64_take_f64_f64_md_md, + None, + ), + mul_f32x4: module.add_function( + "llvm.experimental.constrained.fmul.v4f32", + ret_f32x4_take_f32x4_f32x4_md_md, + None, + ), + mul_f64x2: module.add_function( + "llvm.experimental.constrained.fmul.v2f64", + ret_f64x2_take_f64x2_f64x2_md_md, + None, + ), + + div_f32: module.add_function( + "llvm.experimental.constrained.fdiv.f32", + ret_f32_take_f32_f32_md_md, + None, + ), + div_f64: module.add_function( + "llvm.experimental.constrained.fdiv.f64", + ret_f64_take_f64_f64_md_md, + None, + ), + div_f32x4: module.add_function( + "llvm.experimental.constrained.fdiv.v4f32", + ret_f32x4_take_f32x4_f32x4_md_md, + None, + ), + div_f64x2: module.add_function( + "llvm.experimental.constrained.fdiv.v2f64", + ret_f64x2_take_f64x2_f64x2_md_md, + None, + ), + + cmp_f32: module.add_function( + "llvm.experimental.constrained.fcmp.f32", + ret_i1_take_f32_f32_md_md, + None, + ), + cmp_f64: module.add_function( + "llvm.experimental.constrained.fcmp.f64", + ret_i1_take_f64_f64_md_md, + None, + ), + cmp_f32x4: module.add_function( + "llvm.experimental.constrained.fcmp.v4f32", + ret_i1x4_take_f32x4_f32x4_md_md, + None, + ), + cmp_f64x2: module.add_function( + "llvm.experimental.constrained.fcmp.v2f64", + ret_i1x2_take_f64x2_f64x2_md_md, + None, + ), + + fpext_f32: module.add_function( + "llvm.experimental.constrained.fpext.f64.f32", + ret_f64_take_f32_md, + None, + ), + fptrunc_f64: module.add_function( + "llvm.experimental.constrained.fptrunc.f32.f64", + ret_f32_take_f64_md_md, + None, + ), + + fabs_f32: module.add_function("llvm.fabs.f32", ret_f32_take_f32, None), + fabs_f64: module.add_function("llvm.fabs.f64", ret_f64_take_f64, None), + fabs_f32x4: module.add_function("llvm.fabs.v4f32", ret_f32x4_take_f32x4, None), + fabs_f64x2: module.add_function("llvm.fabs.v2f64", ret_f64x2_take_f64x2, None), + + copysign_f32: module.add_function("llvm.copysign.f32", ret_f32_take_f32_f32, None), + copysign_f64: module.add_function("llvm.copysign.f64", ret_f64_take_f64_f64, None), + copysign_f32x4: module.add_function( + "llvm.copysign.v4f32", + ret_f32x4_take_f32x4_f32x4, + None, + ), + copysign_f64x2: module.add_function( + "llvm.copysign.v2f64", + ret_f64x2_take_f64x2_f64x2, + None, + ), + + sadd_sat_i8x16: module.add_function( + "llvm.sadd.sat.v16i8", + ret_i8x16_take_i8x16_i8x16, + None, + ), + sadd_sat_i16x8: module.add_function( + "llvm.sadd.sat.v8i16", + ret_i16x8_take_i16x8_i16x8, + None, + ), + uadd_sat_i8x16: module.add_function( + "llvm.uadd.sat.v16i8", + ret_i8x16_take_i8x16_i8x16, + None, + ), + uadd_sat_i16x8: module.add_function( + "llvm.uadd.sat.v8i16", + ret_i16x8_take_i16x8_i16x8, + None, + ), + + ssub_sat_i8x16: module.add_function( + "llvm.ssub.sat.v16i8", + ret_i8x16_take_i8x16_i8x16, + None, + ), + ssub_sat_i16x8: module.add_function( + "llvm.ssub.sat.v8i16", + ret_i16x8_take_i16x8_i16x8, + None, + ), + usub_sat_i8x16: module.add_function( + "llvm.usub.sat.v16i8", + ret_i8x16_take_i8x16_i8x16, + None, + ), + usub_sat_i16x8: module.add_function( + "llvm.usub.sat.v8i16", + ret_i16x8_take_i16x8_i16x8, + None, + ), + + expect_i1: module.add_function("llvm.expect.i1", ret_i1_take_i1_i1, None), + trap: module.add_function("llvm.trap", void_ty.fn_type(&[], false), None), + debug_trap: module.add_function("llvm.debugtrap", void_ty.fn_type(&[], false), None), + personality: module.add_function( + "__gxx_personality_v0", + i32_ty.fn_type(&[], false), + Some(Linkage::External), + ), + readonly: context + .create_enum_attribute(Attribute::get_named_enum_kind_id("readonly"), 0), + stack_probe: context.create_string_attribute("probe-stack", "wasmer_vm_probestack"), + + void_ty, + i1_ty, + i2_ty, + i4_ty, + i8_ty, + i16_ty, + i32_ty, + i64_ty, + i128_ty, + isize_ty, + f32_ty, + f64_ty, + + i1x128_ty, + i8x16_ty, + i16x8_ty, + i32x4_ty, + i64x2_ty, + f32x4_ty, + f64x2_ty, + i32x8_ty, + + i8_ptr_ty, + i16_ptr_ty, + i32_ptr_ty, + i64_ptr_ty, + i128_ptr_ty, + isize_ptr_ty, + f32_ptr_ty, + f64_ptr_ty, + + anyfunc_ty, + + funcref_ty, + externref_ty, + anyref_ty, + + i1_zero, + i8_zero, + i32_zero, + i64_zero, + i128_zero, + isize_zero, + f32_zero, + f64_zero, + f32x4_zero, + f64x2_zero, + i32_consts, + + trap_unreachable: i32_ty + .const_int(TrapCode::UnreachableCodeReached as _, false) + .as_basic_value_enum(), + trap_call_indirect_null: i32_ty + .const_int(TrapCode::IndirectCallToNull as _, false) + .as_basic_value_enum(), + trap_call_indirect_sig: i32_ty + .const_int(TrapCode::BadSignature as _, false) + .as_basic_value_enum(), + trap_memory_oob: i32_ty + .const_int(TrapCode::HeapAccessOutOfBounds as _, false) + .as_basic_value_enum(), + trap_illegal_arithmetic: i32_ty + .const_int(TrapCode::IntegerOverflow as _, false) + .as_basic_value_enum(), + trap_integer_division_by_zero: i32_ty + .const_int(TrapCode::IntegerDivisionByZero as _, false) + .as_basic_value_enum(), + trap_bad_conversion_to_integer: i32_ty + .const_int(TrapCode::BadConversionToInteger as _, false) + .as_basic_value_enum(), + trap_unaligned_atomic: i32_ty + .const_int(TrapCode::UnalignedAtomic as _, false) + .as_basic_value_enum(), + trap_table_access_oob: i32_ty + .const_int(TrapCode::TableAccessOutOfBounds as _, false) + .as_basic_value_enum(), + + experimental_stackmap: module.add_function( + "llvm.experimental.stackmap", + void_ty.fn_type( + &[ + i64_ty_basic_md, /* id */ + i32_ty_basic_md, /* numShadowBytes */ + ], + true, + ), + None, + ), + + // VM libcalls. + table_copy: module.add_function( + "wasmer_vm_table_copy", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + table_init: module.add_function( + "wasmer_vm_table_init", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + table_fill: module.add_function( + "wasmer_vm_table_fill", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + anyref_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + table_size: module.add_function( + "wasmer_vm_table_size", + i32_ty.fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false), + None, + ), + imported_table_size: module.add_function( + "wasmer_vm_imported_table_size", + i32_ty.fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false), + None, + ), + table_get: module.add_function( + "wasmer_vm_table_get", + anyref_ty.fn_type( + &[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md], + false, + ), + None, + ), + imported_table_get: module.add_function( + "wasmer_vm_imported_table_get", + anyref_ty.fn_type( + &[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md], + false, + ), + None, + ), + table_set: module.add_function( + "wasmer_vm_table_set", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + anyref_ty_basic_md, + ], + false, + ), + None, + ), + imported_table_set: module.add_function( + "wasmer_vm_imported_table_set", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + anyref_ty_basic_md, + ], + false, + ), + None, + ), + table_grow: module.add_function( + "wasmer_vm_table_grow", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + anyref_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + imported_table_grow: module.add_function( + "wasmer_vm_imported_table_grow", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + anyref_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + memory_init: module.add_function( + "wasmer_vm_memory32_init", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + memory_copy: module.add_function( + "wasmer_vm_memory32_copy", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + imported_memory_copy: module.add_function( + "wasmer_vm_imported_memory32_copy", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + memory_fill: module.add_function( + "wasmer_vm_memory32_fill", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + imported_memory_fill: module.add_function( + "wasmer_vm_imported_memory32_fill", + void_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + data_drop: module.add_function( + "wasmer_vm_data_drop", + void_ty.fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false), + None, + ), + func_ref: module.add_function( + "wasmer_vm_func_ref", + funcref_ty.fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false), + None, + ), + elem_drop: module.add_function( + "wasmer_vm_elem_drop", + void_ty.fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false), + None, + ), + throw_trap: module.add_function( + "wasmer_vm_raise_trap", + void_ty.fn_type(&[i32_ty_basic_md], false), + None, + ), + + vmfunction_import_ptr_ty: context + .struct_type(&[i8_ptr_ty_basic, i8_ptr_ty_basic], false) + .ptr_type(AddressSpace::Generic), + vmfunction_import_body_element: 0, + vmfunction_import_vmctx_element: 1, + + vmmemory_definition_ptr_ty: context + .struct_type(&[i8_ptr_ty_basic, isize_ty.into()], false) + .ptr_type(AddressSpace::Generic), + vmmemory_definition_base_element: 0, + vmmemory_definition_current_length_element: 1, + + memory32_grow_ptr_ty: i32_ty + .fn_type( + &[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md], + false, + ) + .ptr_type(AddressSpace::Generic), + imported_memory32_grow_ptr_ty: i32_ty + .fn_type( + &[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md], + false, + ) + .ptr_type(AddressSpace::Generic), + memory32_size_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), + imported_memory32_size_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), + + ctx_ptr_ty, + }; + + let noreturn = + context.create_enum_attribute(Attribute::get_named_enum_kind_id("noreturn"), 0); + intrinsics + .throw_trap + .add_attribute(AttributeLoc::Function, noreturn); + intrinsics + .func_ref + .add_attribute(AttributeLoc::Function, intrinsics.readonly); + + intrinsics + } +} + +#[derive(Clone, Copy)] +pub enum MemoryCache<'ctx> { + /// The memory moves around. + Dynamic { + ptr_to_base_ptr: PointerValue<'ctx>, + ptr_to_current_length: PointerValue<'ctx>, + }, + /// The memory is always in the same place. + Static { base_ptr: PointerValue<'ctx> }, +} + +struct TableCache<'ctx> { + ptr_to_base_ptr: PointerValue<'ctx>, + ptr_to_bounds: PointerValue<'ctx>, +} + +#[derive(Clone, Copy)] +pub enum GlobalCache<'ctx> { + Mut { ptr_to_value: PointerValue<'ctx> }, + Const { value: BasicValueEnum<'ctx> }, +} + +#[derive(Clone)] +pub struct FunctionCache<'ctx> { + pub func: PointerValue<'ctx>, + pub vmctx: BasicValueEnum<'ctx>, + pub attrs: Vec<(Attribute, AttributeLoc)>, +} + +pub struct CtxType<'ctx, 'a> { + ctx_ptr_value: PointerValue<'ctx>, + + wasm_module: &'a WasmerCompilerModule, + cache_builder: &'a Builder<'ctx>, + abi: &'a dyn Abi, + + cached_memories: HashMap>, + cached_tables: HashMap>, + cached_sigindices: HashMap>, + cached_globals: HashMap>, + cached_functions: HashMap>, + cached_memory_grow: HashMap>, + cached_memory_size: HashMap>, + + offsets: VMOffsets, +} + +impl<'ctx, 'a> CtxType<'ctx, 'a> { + pub fn new( + wasm_module: &'a WasmerCompilerModule, + func_value: &FunctionValue<'ctx>, + cache_builder: &'a Builder<'ctx>, + abi: &'a dyn Abi, + ) -> CtxType<'ctx, 'a> { + CtxType { + ctx_ptr_value: abi.get_vmctx_ptr_param(func_value), + + wasm_module, + cache_builder, + abi, + + cached_memories: HashMap::new(), + cached_tables: HashMap::new(), + cached_sigindices: HashMap::new(), + cached_globals: HashMap::new(), + cached_functions: HashMap::new(), + cached_memory_grow: HashMap::new(), + cached_memory_size: HashMap::new(), + + // TODO: pointer width + offsets: VMOffsets::new(8).with_module_info(&wasm_module), + } + } + + pub fn basic(&self) -> BasicValueEnum<'ctx> { + self.ctx_ptr_value.as_basic_value_enum() + } + + pub fn memory( + &mut self, + index: MemoryIndex, + intrinsics: &Intrinsics<'ctx>, + module: &Module<'ctx>, + memory_styles: &PrimaryMap, + ) -> MemoryCache<'ctx> { + let (cached_memories, wasm_module, ctx_ptr_value, cache_builder, offsets) = ( + &mut self.cached_memories, + self.wasm_module, + self.ctx_ptr_value, + &self.cache_builder, + &self.offsets, + ); + let memory_style = &memory_styles[index]; + *cached_memories.entry(index).or_insert_with(|| { + let memory_definition_ptr = + if let Some(local_memory_index) = wasm_module.local_memory_index(index) { + let offset = offsets.vmctx_vmmemory_definition(local_memory_index); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") } + } else { + let offset = offsets.vmctx_vmmemory_import(index); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let memory_definition_ptr_ptr = + unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") }; + let memory_definition_ptr_ptr = cache_builder + .build_bitcast( + memory_definition_ptr_ptr, + intrinsics.i8_ptr_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + let memory_definition_ptr = cache_builder + .build_load(memory_definition_ptr_ptr, "") + .into_pointer_value(); + tbaa_label( + module, + intrinsics, + format!("memory {} definition", index.as_u32()), + memory_definition_ptr.as_instruction_value().unwrap(), + ); + memory_definition_ptr + }; + let memory_definition_ptr = cache_builder + .build_bitcast( + memory_definition_ptr, + intrinsics.vmmemory_definition_ptr_ty, + "", + ) + .into_pointer_value(); + let base_ptr = cache_builder + .build_struct_gep( + memory_definition_ptr, + intrinsics.vmmemory_definition_base_element, + "", + ) + .unwrap(); + if let MemoryStyle::Dynamic { .. } = memory_style { + let current_length_ptr = cache_builder + .build_struct_gep( + memory_definition_ptr, + intrinsics.vmmemory_definition_current_length_element, + "", + ) + .unwrap(); + MemoryCache::Dynamic { + ptr_to_base_ptr: base_ptr, + ptr_to_current_length: current_length_ptr, + } + } else { + let base_ptr = cache_builder.build_load(base_ptr, "").into_pointer_value(); + tbaa_label( + module, + intrinsics, + format!("memory base_ptr {}", index.as_u32()), + base_ptr.as_instruction_value().unwrap(), + ); + MemoryCache::Static { base_ptr } + } + }) + } + + fn table_prepare( + &mut self, + table_index: TableIndex, + intrinsics: &Intrinsics<'ctx>, + module: &Module<'ctx>, + ) -> (PointerValue<'ctx>, PointerValue<'ctx>) { + let (cached_tables, wasm_module, ctx_ptr_value, cache_builder, offsets) = ( + &mut self.cached_tables, + self.wasm_module, + self.ctx_ptr_value, + &self.cache_builder, + &self.offsets, + ); + let TableCache { + ptr_to_base_ptr, + ptr_to_bounds, + } = *cached_tables.entry(table_index).or_insert_with(|| { + let (ptr_to_base_ptr, ptr_to_bounds) = + if let Some(local_table_index) = wasm_module.local_table_index(table_index) { + let offset = intrinsics.i64_ty.const_int( + offsets + .vmctx_vmtable_definition_base(local_table_index) + .into(), + false, + ); + let ptr_to_base_ptr = + unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") }; + let ptr_to_base_ptr = cache_builder + .build_bitcast( + ptr_to_base_ptr, + intrinsics.i8_ptr_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + let offset = intrinsics.i64_ty.const_int( + offsets + .vmctx_vmtable_definition_current_elements(local_table_index) + .into(), + false, + ); + let ptr_to_bounds = + unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") }; + let ptr_to_bounds = cache_builder + .build_bitcast(ptr_to_bounds, intrinsics.i32_ptr_ty, "") + .into_pointer_value(); + (ptr_to_base_ptr, ptr_to_bounds) + } else { + let offset = intrinsics.i64_ty.const_int( + offsets.vmctx_vmtable_import_definition(table_index).into(), + false, + ); + let definition_ptr_ptr = + unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") }; + let definition_ptr_ptr = cache_builder + .build_bitcast( + definition_ptr_ptr, + intrinsics.i8_ptr_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + let definition_ptr = cache_builder + .build_load(definition_ptr_ptr, "") + .into_pointer_value(); + tbaa_label( + module, + intrinsics, + format!("table {} definition", table_index.as_u32()), + definition_ptr.as_instruction_value().unwrap(), + ); + + let offset = intrinsics + .i64_ty + .const_int(offsets.vmtable_definition_base().into(), false); + let ptr_to_base_ptr = + unsafe { cache_builder.build_gep(definition_ptr, &[offset], "") }; + let ptr_to_base_ptr = cache_builder + .build_bitcast( + ptr_to_base_ptr, + intrinsics.i8_ptr_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + let offset = intrinsics + .i64_ty + .const_int(offsets.vmtable_definition_current_elements().into(), false); + let ptr_to_bounds = + unsafe { cache_builder.build_gep(definition_ptr, &[offset], "") }; + let ptr_to_bounds = cache_builder + .build_bitcast(ptr_to_bounds, intrinsics.i32_ptr_ty, "") + .into_pointer_value(); + (ptr_to_base_ptr, ptr_to_bounds) + }; + TableCache { + ptr_to_base_ptr, + ptr_to_bounds, + } + }); + + (ptr_to_base_ptr, ptr_to_bounds) + } + + pub fn table( + &mut self, + index: TableIndex, + intrinsics: &Intrinsics<'ctx>, + module: &Module<'ctx>, + ) -> (PointerValue<'ctx>, IntValue<'ctx>) { + let (ptr_to_base_ptr, ptr_to_bounds) = self.table_prepare(index, intrinsics, module); + let base_ptr = self + .cache_builder + .build_load(ptr_to_base_ptr, "base_ptr") + .into_pointer_value(); + let bounds = self + .cache_builder + .build_load(ptr_to_bounds, "bounds") + .into_int_value(); + tbaa_label( + module, + intrinsics, + format!("table_base_ptr {}", index.index()), + base_ptr.as_instruction_value().unwrap(), + ); + tbaa_label( + module, + intrinsics, + format!("table_bounds {}", index.index()), + bounds.as_instruction_value().unwrap(), + ); + (base_ptr, bounds) + } + + pub fn dynamic_sigindex( + &mut self, + index: SignatureIndex, + intrinsics: &Intrinsics<'ctx>, + module: &Module<'ctx>, + ) -> IntValue<'ctx> { + let (cached_sigindices, ctx_ptr_value, cache_builder, offsets) = ( + &mut self.cached_sigindices, + self.ctx_ptr_value, + &self.cache_builder, + &self.offsets, + ); + *cached_sigindices.entry(index).or_insert_with(|| { + let byte_offset = intrinsics + .i64_ty + .const_int(offsets.vmctx_vmshared_signature_id(index).into(), false); + let sigindex_ptr = unsafe { + cache_builder.build_gep(ctx_ptr_value, &[byte_offset], "dynamic_sigindex") + }; + let sigindex_ptr = cache_builder + .build_bitcast(sigindex_ptr, intrinsics.i32_ptr_ty, "") + .into_pointer_value(); + + let sigindex = cache_builder + .build_load(sigindex_ptr, "sigindex") + .into_int_value(); + tbaa_label( + module, + intrinsics, + format!("sigindex {}", index.as_u32()), + sigindex.as_instruction_value().unwrap(), + ); + sigindex + }) + } + + pub fn global( + &mut self, + index: GlobalIndex, + intrinsics: &Intrinsics<'ctx>, + module: &Module<'ctx>, + ) -> Result<&GlobalCache<'ctx>, CompileError> { + let (cached_globals, wasm_module, ctx_ptr_value, cache_builder, offsets) = ( + &mut self.cached_globals, + self.wasm_module, + self.ctx_ptr_value, + &self.cache_builder, + &self.offsets, + ); + Ok(match cached_globals.entry(index) { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + let global_type = wasm_module.globals[index]; + let global_value_type = global_type.ty; + + let global_mutability = global_type.mutability; + let offset = if let Some(local_global_index) = wasm_module.local_global_index(index) + { + offsets.vmctx_vmglobal_definition(local_global_index) + } else { + offsets.vmctx_vmglobal_import(index) + }; + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let global_ptr = { + let global_ptr_ptr = + unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") }; + let global_ptr_ptr = cache_builder + .build_bitcast( + global_ptr_ptr, + intrinsics.i32_ptr_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + let global_ptr = cache_builder + .build_load(global_ptr_ptr, "") + .into_pointer_value(); + tbaa_label( + module, + intrinsics, + format!("global_ptr {}", index.as_u32()), + global_ptr.as_instruction_value().unwrap(), + ); + global_ptr + }; + let global_ptr = cache_builder + .build_bitcast( + global_ptr, + type_to_llvm_ptr(&intrinsics, global_value_type)?, + "", + ) + .into_pointer_value(); + + entry.insert(match global_mutability { + Mutability::Const => { + let value = cache_builder.build_load(global_ptr, ""); + tbaa_label( + module, + intrinsics, + format!("global {}", index.as_u32()), + value.as_instruction_value().unwrap(), + ); + GlobalCache::Const { value } + } + Mutability::Var => GlobalCache::Mut { + ptr_to_value: global_ptr, + }, + }) + } + }) + } + + pub fn add_func( + &mut self, + function_index: FunctionIndex, + func: PointerValue<'ctx>, + vmctx: BasicValueEnum<'ctx>, + attrs: &[(Attribute, AttributeLoc)], + ) { + match self.cached_functions.entry(function_index) { + Entry::Occupied(_) => unreachable!("duplicate function"), + Entry::Vacant(entry) => { + entry.insert(FunctionCache { + func, + vmctx, + attrs: attrs.to_vec(), + }); + } + } + } + + pub fn local_func( + &mut self, + _local_function_index: LocalFunctionIndex, + function_index: FunctionIndex, + intrinsics: &Intrinsics<'ctx>, + module: &Module<'ctx>, + context: &'ctx Context, + func_type: &FuncType, + function_name: &str, + ) -> Result<&FunctionCache<'ctx>, CompileError> { + let (cached_functions, ctx_ptr_value, offsets) = ( + &mut self.cached_functions, + &self.ctx_ptr_value, + &self.offsets, + ); + Ok(match cached_functions.entry(function_index) { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + debug_assert!(module.get_function(function_name).is_none()); + let (llvm_func_type, llvm_func_attrs) = + self.abi + .func_type_to_llvm(context, intrinsics, Some(offsets), func_type)?; + let func = + module.add_function(function_name, llvm_func_type, Some(Linkage::External)); + for (attr, attr_loc) in &llvm_func_attrs { + func.add_attribute(*attr_loc, *attr); + } + entry.insert(FunctionCache { + func: func.as_global_value().as_pointer_value(), + vmctx: ctx_ptr_value.as_basic_value_enum(), + attrs: llvm_func_attrs, + }) + } + }) + } + + pub fn func( + &mut self, + function_index: FunctionIndex, + intrinsics: &Intrinsics<'ctx>, + context: &'ctx Context, + func_type: &FuncType, + ) -> Result<&FunctionCache<'ctx>, CompileError> { + let (cached_functions, wasm_module, ctx_ptr_value, cache_builder, offsets) = ( + &mut self.cached_functions, + self.wasm_module, + &self.ctx_ptr_value, + &self.cache_builder, + &self.offsets, + ); + Ok(match cached_functions.entry(function_index) { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + let (llvm_func_type, llvm_func_attrs) = + self.abi + .func_type_to_llvm(context, intrinsics, Some(offsets), func_type)?; + debug_assert!(wasm_module.local_func_index(function_index).is_none()); + let offset = offsets.vmctx_vmfunction_import(function_index); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let vmfunction_import_ptr = + unsafe { cache_builder.build_gep(*ctx_ptr_value, &[offset], "") }; + let vmfunction_import_ptr = cache_builder + .build_bitcast( + vmfunction_import_ptr, + intrinsics.vmfunction_import_ptr_ty, + "", + ) + .into_pointer_value(); + + let body_ptr_ptr = cache_builder + .build_struct_gep( + vmfunction_import_ptr, + intrinsics.vmfunction_import_body_element, + "", + ) + .unwrap(); + let body_ptr = cache_builder.build_load(body_ptr_ptr, ""); + let body_ptr = cache_builder + .build_bitcast(body_ptr, llvm_func_type.ptr_type(AddressSpace::Generic), "") + .into_pointer_value(); + let vmctx_ptr_ptr = cache_builder + .build_struct_gep( + vmfunction_import_ptr, + intrinsics.vmfunction_import_vmctx_element, + "", + ) + .unwrap(); + let vmctx_ptr = cache_builder.build_load(vmctx_ptr_ptr, ""); + entry.insert(FunctionCache { + func: body_ptr, + vmctx: vmctx_ptr, + attrs: llvm_func_attrs, + }) + } + }) + } + + pub fn memory_grow( + &mut self, + memory_index: MemoryIndex, + intrinsics: &Intrinsics<'ctx>, + ) -> PointerValue<'ctx> { + let (cached_memory_grow, wasm_module, offsets, cache_builder, ctx_ptr_value) = ( + &mut self.cached_memory_grow, + &self.wasm_module, + &self.offsets, + &self.cache_builder, + &self.ctx_ptr_value, + ); + *cached_memory_grow.entry(memory_index).or_insert_with(|| { + let (grow_fn, grow_fn_ty) = if wasm_module.local_memory_index(memory_index).is_some() { + ( + VMBuiltinFunctionIndex::get_memory32_grow_index(), + intrinsics.memory32_grow_ptr_ty, + ) + } else { + ( + VMBuiltinFunctionIndex::get_imported_memory32_grow_index(), + intrinsics.imported_memory32_grow_ptr_ty, + ) + }; + let offset = offsets.vmctx_builtin_function(grow_fn); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let grow_fn_ptr_ptr = unsafe { cache_builder.build_gep(*ctx_ptr_value, &[offset], "") }; + + let grow_fn_ptr_ptr = cache_builder + .build_bitcast( + grow_fn_ptr_ptr, + grow_fn_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + cache_builder + .build_load(grow_fn_ptr_ptr, "") + .into_pointer_value() + }) + } + + pub fn memory_size( + &mut self, + memory_index: MemoryIndex, + intrinsics: &Intrinsics<'ctx>, + ) -> PointerValue<'ctx> { + let (cached_memory_size, wasm_module, offsets, cache_builder, ctx_ptr_value) = ( + &mut self.cached_memory_size, + &self.wasm_module, + &self.offsets, + &self.cache_builder, + &self.ctx_ptr_value, + ); + *cached_memory_size.entry(memory_index).or_insert_with(|| { + let (size_fn, size_fn_ty) = if wasm_module.local_memory_index(memory_index).is_some() { + ( + VMBuiltinFunctionIndex::get_memory32_size_index(), + intrinsics.memory32_size_ptr_ty, + ) + } else { + ( + VMBuiltinFunctionIndex::get_imported_memory32_size_index(), + intrinsics.imported_memory32_size_ptr_ty, + ) + }; + let offset = offsets.vmctx_builtin_function(size_fn); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let size_fn_ptr_ptr = unsafe { cache_builder.build_gep(*ctx_ptr_value, &[offset], "") }; + + let size_fn_ptr_ptr = cache_builder + .build_bitcast( + size_fn_ptr_ptr, + size_fn_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + + cache_builder + .build_load(size_fn_ptr_ptr, "") + .into_pointer_value() + }) + } + + pub fn get_offsets(&self) -> &VMOffsets { + &self.offsets + } +} + +// Given an instruction that operates on memory, mark the access as not aliasing +// other memory accesses which have a different label. +pub fn tbaa_label<'ctx>( + module: &Module<'ctx>, + intrinsics: &Intrinsics<'ctx>, + label: String, + instruction: InstructionValue<'ctx>, +) { + // To convey to LLVM that two pointers must be pointing to distinct memory, + // we use LLVM's Type Based Aliasing Analysis, or TBAA, to mark the memory + // operations as having different types whose pointers may not alias. + // + // See the LLVM documentation at + // https://llvm.org/docs/LangRef.html#tbaa-metadata + // + // LLVM TBAA supports many features, but we use it in a simple way, with + // only scalar types that are children of the root node. Every TBAA type we + // declare is NoAlias with the others. See NoAlias, PartialAlias, + // MayAlias and MustAlias in the LLVM documentation: + // https://llvm.org/docs/AliasAnalysis.html#must-may-and-no-alias-responses + + let context = module.get_context(); + + // TODO: ContextRef can't return us the lifetime from module through Deref. + // This could be fixed once generic_associated_types is stable. + let context = { + let context2 = &*context; + unsafe { std::mem::transmute::<&Context, &'ctx Context>(context2) } + }; + + // `!wasmer_tbaa_root = {}`, the TBAA root node for wasmer. + let tbaa_root = module + .get_global_metadata("wasmer_tbaa_root") + .pop() + .unwrap_or_else(|| { + module + .add_global_metadata("wasmer_tbaa_root", &context.metadata_node(&[])) + .unwrap(); + module.get_global_metadata("wasmer_tbaa_root")[0] + }); + + // Construct (or look up) the type descriptor, for example + // `!"local 0" = !{!"local 0", !wasmer_tbaa_root}`. + let type_label = context.metadata_string(label.as_str()); + let type_tbaa = module + .get_global_metadata(label.as_str()) + .pop() + .unwrap_or_else(|| { + module + .add_global_metadata( + label.as_str(), + &context.metadata_node(&[type_label.into(), tbaa_root.into()]), + ) + .unwrap(); + module.get_global_metadata(label.as_str())[0] + }); + + // Construct (or look up) the access tag, which is a struct of the form + // (base type, access type, offset). + // + // "If BaseTy is a scalar type, Offset must be 0 and BaseTy and AccessTy + // must be the same". + // -- https://llvm.org/docs/LangRef.html#tbaa-metadata + let label = label + "_memop"; + let type_tbaa = module + .get_global_metadata(label.as_str()) + .pop() + .unwrap_or_else(|| { + module + .add_global_metadata( + label.as_str(), + &context.metadata_node(&[ + type_tbaa.into(), + type_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ) + .unwrap(); + module.get_global_metadata(label.as_str())[0] + }); + + // Attach the access tag to the instruction. + let tbaa_kind = context.get_kind_id("tbaa"); + instruction.set_metadata(type_tbaa, tbaa_kind).unwrap(); +} diff --git a/lib/compiler-llvm/src/translator/mod.rs b/lib/compiler-llvm/src/translator/mod.rs new file mode 100644 index 0000000000..ac546687d3 --- /dev/null +++ b/lib/compiler-llvm/src/translator/mod.rs @@ -0,0 +1,6 @@ +mod code; +pub mod intrinsics; +//mod stackmap; +mod state; + +pub use self::code::FuncTranslator; diff --git a/lib/compiler-llvm/src/translator/stackmap.rs b/lib/compiler-llvm/src/translator/stackmap.rs new file mode 100644 index 0000000000..cb6abc48fb --- /dev/null +++ b/lib/compiler-llvm/src/translator/stackmap.rs @@ -0,0 +1,570 @@ +// https://llvm.org/docs/StackMaps.html#stackmap-section + +use byteorder::{LittleEndian, ReadBytesExt}; +use std::io::{self, Cursor}; +use wasmer_vm_core::vm::Ctx; +use wasmer_vm_core::{ + module::Module, + structures::TypedIndex, + types::{GlobalIndex, LocalOrImport, TableIndex}, +}; + +#[derive(Default, Debug, Clone)] +pub struct StackmapRegistry { + pub entries: Vec, +} + +#[derive(Debug, Clone)] +pub struct StackmapEntry { + pub kind: StackmapEntryKind, + pub local_function_id: usize, + pub opcode_offset: usize, + pub value_semantics: Vec, + pub local_count: usize, + pub stack_count: usize, + pub is_start: bool, +} + +#[derive(Debug, Clone)] +pub enum ValueSemantic { + WasmLocal(usize), + WasmStack(usize), + Ctx, + SignalMem, + PointerToMemoryBase, + PointerToMemoryBound, // 64-bit + MemoryBase, + MemoryBound, // 64-bit + PointerToGlobal(usize), + Global(usize), + PointerToTableBase, + PointerToTableBound, + ImportedFuncPointer(usize), + ImportedFuncCtx(usize), + DynamicSigindice(usize), +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum StackmapEntryKind { + FunctionHeader, + Loop, + Call, + Trappable, +} + +impl StackmapEntry { + #[cfg(all( + any(target_os = "freebsd", target_os = "linux", target_vendor = "apple"), + target_arch = "x86_64" + ))] + pub fn populate_msm( + &self, + module_info: &ModuleInfo, + code_addr: usize, + llvm_map: &StackMap, + size_record: &StkSizeRecord, + map_record: &StkMapRecord, + end: Option<(&StackmapEntry, &StkMapRecord)>, + msm: &mut wasmer_vm_core::state::ModuleStateMap, + ) { + use std::collections::{BTreeMap, HashMap}; + use wasmer_vm_core::state::{ + x64::{new_machine_state, X64Register, GPR}, + FunctionStateMap, MachineStateDiff, MachineValue, OffsetInfo, RegisterIndex, + SuspendOffset, WasmAbstractValue, + }; + use wasmer_vm_core::vm; + + let func_base_addr = (size_record.function_address as usize) + .checked_sub(code_addr) + .unwrap(); + let target_offset = func_base_addr + map_record.instruction_offset as usize; + assert!(self.is_start); + + if msm.local_functions.len() == self.local_function_id { + assert_eq!(self.kind, StackmapEntryKind::FunctionHeader); + msm.local_functions.insert( + target_offset, + FunctionStateMap::new(new_machine_state(), self.local_function_id, 0, vec![]), + ); + } else if msm.local_functions.len() == self.local_function_id + 1 { + } else { + panic!("unordered local functions"); + } + + let (_, fsm) = msm.local_functions.iter_mut().last().unwrap(); + + assert_eq!(self.value_semantics.len(), map_record.locations.len()); + + // System V requires 16-byte alignment before each call instruction. + // Considering the saved rbp we need to ensure the stack size % 16 always equals to 8. + assert!(size_record.stack_size % 16 == 8); + + // Layout begins just below saved rbp. (push rbp; mov rbp, rsp) + let mut machine_stack_half_layout: Vec = + vec![MachineValue::Undefined; (size_record.stack_size - 8) as usize / 4]; + let mut regs: Vec<(RegisterIndex, MachineValue)> = vec![]; + let mut stack_constants: HashMap = HashMap::new(); + + let mut prev_frame_diff: BTreeMap> = BTreeMap::new(); + + let mut wasm_locals: Vec = vec![]; + let mut wasm_stack: Vec = vec![]; + + for (i, loc) in map_record.locations.iter().enumerate() { + let mv = match self.value_semantics[i] { + ValueSemantic::WasmLocal(x) => { + if x != wasm_locals.len() { + panic!("unordered local values"); + } + wasm_locals.push(WasmAbstractValue::Runtime); + MachineValue::WasmLocal(x) + } + ValueSemantic::WasmStack(x) => { + if x != wasm_stack.len() { + panic!("unordered stack values"); + } + wasm_stack.push(WasmAbstractValue::Runtime); + MachineValue::WasmStack(x) + } + ValueSemantic::Ctx => MachineValue::Vmctx, + ValueSemantic::SignalMem => { + MachineValue::VmctxDeref(vec![Ctx::offset_interrupt_signal_mem() as usize, 0]) + } + ValueSemantic::PointerToMemoryBase => { + MachineValue::VmctxDeref(vec![Ctx::offset_memory_base() as usize]) + } + ValueSemantic::PointerToMemoryBound => { + MachineValue::VmctxDeref(vec![Ctx::offset_memory_bound() as usize]) + } + ValueSemantic::MemoryBase => { + MachineValue::VmctxDeref(vec![Ctx::offset_memory_base() as usize, 0]) + } + ValueSemantic::MemoryBound => { + MachineValue::VmctxDeref(vec![Ctx::offset_memory_bound() as usize, 0]) + } + ValueSemantic::PointerToGlobal(idx) => { + MachineValue::VmctxDeref(deref_global(module_info, idx, false)) + } + ValueSemantic::Global(idx) => { + MachineValue::VmctxDeref(deref_global(module_info, idx, true)) + } + ValueSemantic::PointerToTableBase => { + MachineValue::VmctxDeref(deref_table_base(module_info, 0, false)) + } + ValueSemantic::PointerToTableBound => { + MachineValue::VmctxDeref(deref_table_bound(module_info, 0, false)) + } + ValueSemantic::ImportedFuncPointer(idx) => MachineValue::VmctxDeref(vec![ + Ctx::offset_imported_funcs() as usize, + vm::ImportedFunc::size() as usize * idx + + vm::ImportedFunc::offset_func() as usize, + 0, + ]), + ValueSemantic::ImportedFuncCtx(idx) => MachineValue::VmctxDeref(vec![ + Ctx::offset_imported_funcs() as usize, + vm::ImportedFunc::size() as usize * idx + + vm::ImportedFunc::offset_func_ctx() as usize, + 0, + ]), + ValueSemantic::DynamicSigindice(idx) => { + MachineValue::VmctxDeref(vec![Ctx::offset_signatures() as usize, idx * 4, 0]) + } + }; + match loc.ty { + LocationType::Register => { + let index = X64Register::from_dwarf_regnum(loc.dwarf_regnum) + .expect("invalid regnum") + .to_index(); + regs.push((index, mv)); + } + LocationType::Constant => { + let v = loc.offset_or_small_constant as u32 as u64; + match mv { + MachineValue::WasmStack(x) => { + stack_constants.insert(x, v); + *wasm_stack.last_mut().unwrap() = WasmAbstractValue::Const(v); + } + _ => {} // TODO + } + } + LocationType::ConstantIndex => { + let v = + llvm_map.constants[loc.offset_or_small_constant as usize].large_constant; + match mv { + MachineValue::WasmStack(x) => { + stack_constants.insert(x, v); + *wasm_stack.last_mut().unwrap() = WasmAbstractValue::Const(v); + } + _ => {} // TODO + } + } + LocationType::Direct => match mv { + MachineValue::WasmLocal(_) => { + assert_eq!(loc.location_size, 8); // the pointer itself + assert!( + X64Register::from_dwarf_regnum(loc.dwarf_regnum).unwrap() + == X64Register::GPR(GPR::RBP) + ); + if loc.offset_or_small_constant >= 0 { + assert!(loc.offset_or_small_constant >= 16); // (saved_rbp, return_address) + assert!(loc.offset_or_small_constant % 8 == 0); + prev_frame_diff + .insert((loc.offset_or_small_constant as usize - 16) / 8, Some(mv)); + } else { + let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize; + assert!( + stack_offset > 0 && stack_offset <= machine_stack_half_layout.len() + ); + machine_stack_half_layout[stack_offset - 1] = mv; + } + } + _ => unreachable!( + "Direct location type is not expected for values other than local" + ), + }, + LocationType::Indirect => { + assert!(loc.offset_or_small_constant < 0); + assert!( + X64Register::from_dwarf_regnum(loc.dwarf_regnum).unwrap() + == X64Register::GPR(GPR::RBP) + ); + let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize; + assert!(stack_offset > 0 && stack_offset <= machine_stack_half_layout.len()); + machine_stack_half_layout[stack_offset - 1] = mv; + } + } + } + + assert_eq!(wasm_stack.len(), self.stack_count); + assert_eq!(wasm_locals.len(), self.local_count); + + let mut machine_stack_layout: Vec = + Vec::with_capacity(machine_stack_half_layout.len() / 2); + + for i in 0..machine_stack_half_layout.len() / 2 { + let major = &machine_stack_half_layout[i * 2 + 1]; // mod 8 == 0 + let minor = &machine_stack_half_layout[i * 2]; // mod 8 == 4 + let only_major = match *minor { + MachineValue::Undefined => true, + _ => false, + }; + if only_major { + machine_stack_layout.push(major.clone()); + } else { + machine_stack_layout.push(MachineValue::TwoHalves(Box::new(( + major.clone(), + minor.clone(), + )))); + } + } + + let diff = MachineStateDiff { + last: None, + stack_push: machine_stack_layout, + stack_pop: 0, + prev_frame_diff, + reg_diff: regs, + wasm_stack_push: wasm_stack, + wasm_stack_pop: 0, + wasm_inst_offset: self.opcode_offset, + }; + let diff_id = fsm.diffs.len(); + fsm.diffs.push(diff); + + match self.kind { + StackmapEntryKind::FunctionHeader => { + fsm.locals = wasm_locals; + } + _ => { + assert_eq!(fsm.locals, wasm_locals); + } + } + + let end_offset = { + if let Some(end) = end { + let (end_entry, end_record) = end; + assert_eq!(end_entry.is_start, false); + assert_eq!(self.opcode_offset, end_entry.opcode_offset); + let end_offset = func_base_addr + end_record.instruction_offset as usize; + assert!(end_offset >= target_offset); + end_offset + } else { + target_offset + 1 + } + }; + + match self.kind { + StackmapEntryKind::Loop => { + fsm.wasm_offset_to_target_offset + .insert(self.opcode_offset, SuspendOffset::Loop(target_offset)); + fsm.loop_offsets.insert( + target_offset, + OffsetInfo { + end_offset, + diff_id, + activate_offset: target_offset, + }, + ); + } + StackmapEntryKind::Call => { + fsm.wasm_offset_to_target_offset + .insert(self.opcode_offset, SuspendOffset::Call(target_offset)); + fsm.call_offsets.insert( + target_offset, + OffsetInfo { + end_offset: end_offset + 1, // The return address is just after 'call' instruction. Offset by one here. + diff_id, + activate_offset: target_offset, + }, + ); + } + StackmapEntryKind::Trappable => { + fsm.wasm_offset_to_target_offset + .insert(self.opcode_offset, SuspendOffset::Trappable(target_offset)); + fsm.trappable_offsets.insert( + target_offset, + OffsetInfo { + end_offset, + diff_id, + activate_offset: target_offset, + }, + ); + } + StackmapEntryKind::FunctionHeader => { + fsm.wasm_function_header_target_offset = Some(SuspendOffset::Loop(target_offset)); + fsm.loop_offsets.insert( + target_offset, + OffsetInfo { + end_offset, + diff_id, + activate_offset: target_offset, + }, + ); + } + } + } +} + +#[derive(Clone, Debug, Default)] +pub struct StackMap { + pub version: u8, + pub stk_size_records: Vec, + pub constants: Vec, + pub stk_map_records: Vec, +} + +#[derive(Copy, Clone, Debug, Default)] +pub struct StkSizeRecord { + pub function_address: u64, + pub stack_size: u64, + pub record_count: u64, +} + +#[derive(Copy, Clone, Debug, Default)] +pub struct Constant { + pub large_constant: u64, +} + +#[derive(Clone, Debug, Default)] +pub struct StkMapRecord { + pub patchpoint_id: u64, + pub instruction_offset: u32, + pub locations: Vec, + pub live_outs: Vec, +} + +#[derive(Copy, Clone, Debug)] +pub struct Location { + pub ty: LocationType, + pub location_size: u16, + pub dwarf_regnum: u16, + pub offset_or_small_constant: i32, +} + +#[derive(Copy, Clone, Debug, Default)] +pub struct LiveOut { + pub dwarf_regnum: u16, + pub size_in_bytes: u8, +} + +#[derive(Copy, Clone, Debug)] +pub enum LocationType { + Register, + Direct, + Indirect, + Constant, + ConstantIndex, +} + +impl StackMap { + pub fn parse(raw: &[u8]) -> io::Result { + let mut reader = Cursor::new(raw); + let mut map = StackMap::default(); + + let version = reader.read_u8()?; + if version != 3 { + return Err(io::Error::new(io::ErrorKind::Other, "version is not 3")); + } + map.version = version; + if reader.read_u8()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (1)", + )); + } + if reader.read_u16::()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (2)", + )); + } + let num_functions = reader.read_u32::()?; + let num_constants = reader.read_u32::()?; + let num_records = reader.read_u32::()?; + for _ in 0..num_functions { + let mut record = StkSizeRecord::default(); + record.function_address = reader.read_u64::()?; + record.stack_size = reader.read_u64::()?; + record.record_count = reader.read_u64::()?; + map.stk_size_records.push(record); + } + for _ in 0..num_constants { + map.constants.push(Constant { + large_constant: reader.read_u64::()?, + }); + } + for _ in 0..num_records { + let mut record = StkMapRecord::default(); + + record.patchpoint_id = reader.read_u64::()?; + record.instruction_offset = reader.read_u32::()?; + if reader.read_u16::()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (3)", + )); + } + let num_locations = reader.read_u16::()?; + for _ in 0..num_locations { + let ty = reader.read_u8()?; + + let mut location = Location { + ty: match ty { + 1 => LocationType::Register, + 2 => LocationType::Direct, + 3 => LocationType::Indirect, + 4 => LocationType::Constant, + 5 => LocationType::ConstantIndex, + _ => { + return Err(io::Error::new( + io::ErrorKind::Other, + "unknown location type", + )) + } + }, + location_size: 0, + dwarf_regnum: 0, + offset_or_small_constant: 0, + }; + + if reader.read_u8()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (4)", + )); + } + location.location_size = reader.read_u16::()?; + location.dwarf_regnum = reader.read_u16::()?; + if reader.read_u16::()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (5)", + )); + } + location.offset_or_small_constant = reader.read_i32::()?; + + record.locations.push(location); + } + if reader.position() % 8 != 0 { + if reader.read_u32::()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (6)", + )); + } + } + if reader.read_u16::()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (7)", + )); + } + let num_live_outs = reader.read_u16::()?; + for _ in 0..num_live_outs { + let mut liveout = LiveOut::default(); + + liveout.dwarf_regnum = reader.read_u16::()?; + if reader.read_u8()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (8)", + )); + } + liveout.size_in_bytes = reader.read_u8()?; + + record.live_outs.push(liveout); + } + if reader.position() % 8 != 0 { + if reader.read_u32::()? != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "reserved field is not zero (9)", + )); + } + } + + map.stk_map_records.push(record); + } + Ok(map) + } +} + +fn deref_global(info: &ModuleInfo, idx: usize, deref_into_value: bool) -> Vec { + let mut x: Vec = match GlobalIndex::new(idx).local_or_import(info) { + LocalOrImport::Local(idx) => vec![Ctx::offset_globals() as usize, idx.index() * 8, 0], + LocalOrImport::Import(idx) => { + vec![Ctx::offset_imported_globals() as usize, idx.index() * 8, 0] + } + }; + if deref_into_value { + x.push(0); + } + x +} + +fn deref_table_base(info: &ModuleInfo, idx: usize, deref_into_value: bool) -> Vec { + let mut x: Vec = match TableIndex::new(idx).local_or_import(info) { + LocalOrImport::Local(idx) => vec![Ctx::offset_tables() as usize, idx.index() * 8, 0], + LocalOrImport::Import(idx) => { + vec![Ctx::offset_imported_tables() as usize, idx.index() * 8, 0] + } + }; + if deref_into_value { + x.push(0); + } + x +} + +fn deref_table_bound(info: &ModuleInfo, idx: usize, deref_into_value: bool) -> Vec { + let mut x: Vec = match TableIndex::new(idx).local_or_import(info) { + LocalOrImport::Local(idx) => vec![Ctx::offset_tables() as usize, idx.index() * 8, 8], + LocalOrImport::Import(idx) => { + vec![Ctx::offset_imported_tables() as usize, idx.index() * 8, 8] + } + }; + if deref_into_value { + x.push(0); + } + x +} diff --git a/lib/compiler-llvm/src/translator/state.rs b/lib/compiler-llvm/src/translator/state.rs new file mode 100644 index 0000000000..e3046a09d0 --- /dev/null +++ b/lib/compiler-llvm/src/translator/state.rs @@ -0,0 +1,437 @@ +use inkwell::{ + basic_block::BasicBlock, + values::{BasicValue, BasicValueEnum, PhiValue}, +}; +use smallvec::SmallVec; +use std::ops::{BitAnd, BitOr, BitOrAssign}; +use wasmer_compiler::CompileError; + +#[derive(Debug)] +pub enum ControlFrame<'ctx> { + Block { + next: BasicBlock<'ctx>, + phis: SmallVec<[PhiValue<'ctx>; 1]>, + stack_size_snapshot: usize, + }, + Loop { + body: BasicBlock<'ctx>, + next: BasicBlock<'ctx>, + phis: SmallVec<[PhiValue<'ctx>; 1]>, + loop_body_phis: SmallVec<[PhiValue<'ctx>; 1]>, + stack_size_snapshot: usize, + }, + IfElse { + if_then: BasicBlock<'ctx>, + if_else: BasicBlock<'ctx>, + next: BasicBlock<'ctx>, + then_phis: SmallVec<[PhiValue<'ctx>; 1]>, + else_phis: SmallVec<[PhiValue<'ctx>; 1]>, + next_phis: SmallVec<[PhiValue<'ctx>; 1]>, + stack_size_snapshot: usize, + if_else_state: IfElseState, + }, +} + +#[derive(Debug)] +pub enum IfElseState { + If, + Else, +} + +impl<'ctx> ControlFrame<'ctx> { + pub fn code_after(&self) -> &BasicBlock<'ctx> { + match self { + ControlFrame::Block { ref next, .. } + | ControlFrame::Loop { ref next, .. } + | ControlFrame::IfElse { ref next, .. } => next, + } + } + + pub fn br_dest(&self) -> &BasicBlock<'ctx> { + match self { + ControlFrame::Block { ref next, .. } | ControlFrame::IfElse { ref next, .. } => next, + ControlFrame::Loop { ref body, .. } => body, + } + } + + pub fn phis(&self) -> &[PhiValue<'ctx>] { + match self { + ControlFrame::Block { ref phis, .. } | ControlFrame::Loop { ref phis, .. } => { + phis.as_slice() + } + ControlFrame::IfElse { ref next_phis, .. } => next_phis.as_slice(), + } + } + + /// PHI nodes for stack values in the loop body. + pub fn loop_body_phis(&self) -> &[PhiValue<'ctx>] { + match self { + ControlFrame::Block { .. } | ControlFrame::IfElse { .. } => &[], + ControlFrame::Loop { + ref loop_body_phis, .. + } => loop_body_phis.as_slice(), + } + } + + pub fn is_loop(&self) -> bool { + matches!(self, ControlFrame::Loop { .. }) + } +} + +#[derive(Debug, Default, Eq, PartialEq, Copy, Clone, Hash)] +pub struct ExtraInfo { + state: u8, +} +impl ExtraInfo { + // This value is required to be arithmetic 32-bit NaN (or 32x4) by the WAsm + // machine, but which might not be in the LLVM value. The conversion to + // arithmetic NaN is pending. It is required for correctness. + // + // When applied to a 64-bit value, this flag has no meaning and must be + // ignored. It may be set in such cases to allow for common handling of + // 32 and 64-bit operations. + pub const fn pending_f32_nan() -> ExtraInfo { + ExtraInfo { state: 1 } + } + + // This value is required to be arithmetic 64-bit NaN (or 64x2) by the WAsm + // machine, but which might not be in the LLVM value. The conversion to + // arithmetic NaN is pending. It is required for correctness. + // + // When applied to a 32-bit value, this flag has no meaning and must be + // ignored. It may be set in such cases to allow for common handling of + // 32 and 64-bit operations. + pub const fn pending_f64_nan() -> ExtraInfo { + ExtraInfo { state: 2 } + } + + // This value either does not contain a 32-bit NaN, or it contains an + // arithmetic NaN. In SIMD, applies to all 4 lanes. + pub const fn arithmetic_f32() -> ExtraInfo { + ExtraInfo { state: 4 } + } + + // This value either does not contain a 64-bit NaN, or it contains an + // arithmetic NaN. In SIMD, applies to both lanes. + pub const fn arithmetic_f64() -> ExtraInfo { + ExtraInfo { state: 8 } + } + + pub const fn has_pending_f32_nan(&self) -> bool { + self.state & ExtraInfo::pending_f32_nan().state != 0 + } + pub const fn has_pending_f64_nan(&self) -> bool { + self.state & ExtraInfo::pending_f64_nan().state != 0 + } + pub const fn is_arithmetic_f32(&self) -> bool { + self.state & ExtraInfo::arithmetic_f32().state != 0 + } + pub const fn is_arithmetic_f64(&self) -> bool { + self.state & ExtraInfo::arithmetic_f64().state != 0 + } + + pub const fn strip_pending(&self) -> ExtraInfo { + ExtraInfo { + state: self.state + & !(ExtraInfo::pending_f32_nan().state | ExtraInfo::pending_f64_nan().state), + } + } +} + +// Union two ExtraInfos. +impl BitOr for ExtraInfo { + type Output = Self; + + fn bitor(self, other: Self) -> Self { + debug_assert!(!(self.has_pending_f32_nan() && other.has_pending_f64_nan())); + debug_assert!(!(self.has_pending_f64_nan() && other.has_pending_f32_nan())); + ExtraInfo { + state: if self.is_arithmetic_f32() || other.is_arithmetic_f32() { + ExtraInfo::arithmetic_f32().state + } else if self.has_pending_f32_nan() || other.has_pending_f32_nan() { + ExtraInfo::pending_f32_nan().state + } else { + 0 + } + if self.is_arithmetic_f64() || other.is_arithmetic_f64() { + ExtraInfo::arithmetic_f64().state + } else if self.has_pending_f64_nan() || other.has_pending_f64_nan() { + ExtraInfo::pending_f64_nan().state + } else { + 0 + }, + } + } +} +impl BitOrAssign for ExtraInfo { + fn bitor_assign(&mut self, other: Self) { + *self = *self | other; + } +} + +// Intersection for ExtraInfo. +impl BitAnd for ExtraInfo { + type Output = Self; + fn bitand(self, other: Self) -> Self { + // Pending canonicalizations are not safe to discard, or even reorder. + debug_assert!( + self.has_pending_f32_nan() == other.has_pending_f32_nan() + || self.is_arithmetic_f32() + || other.is_arithmetic_f32() + ); + debug_assert!( + self.has_pending_f64_nan() == other.has_pending_f64_nan() + || self.is_arithmetic_f64() + || other.is_arithmetic_f64() + ); + let info = match ( + self.is_arithmetic_f32() && other.is_arithmetic_f32(), + self.is_arithmetic_f64() && other.is_arithmetic_f64(), + ) { + (false, false) => Default::default(), + (true, false) => ExtraInfo::arithmetic_f32(), + (false, true) => ExtraInfo::arithmetic_f64(), + (true, true) => ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), + }; + match (self.has_pending_f32_nan(), self.has_pending_f64_nan()) { + (false, false) => info, + (true, false) => info | ExtraInfo::pending_f32_nan(), + (false, true) => info | ExtraInfo::pending_f64_nan(), + (true, true) => unreachable!("Can't form ExtraInfo with two pending canonicalizations"), + } + } +} + +#[derive(Debug)] +pub struct State<'ctx> { + pub stack: Vec<(BasicValueEnum<'ctx>, ExtraInfo)>, + control_stack: Vec>, + + pub reachable: bool, +} + +impl<'ctx> State<'ctx> { + pub fn new() -> Self { + Self { + stack: vec![], + control_stack: vec![], + reachable: true, + } + } + + pub fn has_control_frames(&self) -> bool { + !self.control_stack.is_empty() + } + + pub fn reset_stack(&mut self, frame: &ControlFrame<'ctx>) { + let stack_size_snapshot = match frame { + ControlFrame::Block { + stack_size_snapshot, + .. + } + | ControlFrame::Loop { + stack_size_snapshot, + .. + } + | ControlFrame::IfElse { + stack_size_snapshot, + .. + } => *stack_size_snapshot, + }; + self.stack.truncate(stack_size_snapshot); + } + + pub fn outermost_frame(&self) -> Result<&ControlFrame<'ctx>, CompileError> { + self.control_stack.get(0).ok_or_else(|| { + CompileError::Codegen("outermost_frame: invalid control stack depth".to_string()) + }) + } + + pub fn frame_at_depth(&self, depth: u32) -> Result<&ControlFrame<'ctx>, CompileError> { + let index = self + .control_stack + .len() + .checked_sub(1 + (depth as usize)) + .ok_or_else(|| { + CompileError::Codegen("frame_at_depth: invalid control stack depth".to_string()) + })?; + Ok(&self.control_stack[index]) + } + + pub fn frame_at_depth_mut( + &mut self, + depth: u32, + ) -> Result<&mut ControlFrame<'ctx>, CompileError> { + let index = self + .control_stack + .len() + .checked_sub(1 + (depth as usize)) + .ok_or_else(|| { + CompileError::Codegen("frame_at_depth_mut: invalid control stack depth".to_string()) + })?; + Ok(&mut self.control_stack[index]) + } + + pub fn pop_frame(&mut self) -> Result, CompileError> { + self.control_stack.pop().ok_or_else(|| { + CompileError::Codegen("pop_frame: cannot pop from control stack".to_string()) + }) + } + + pub fn push1>(&mut self, value: T) { + self.push1_extra(value, Default::default()); + } + + pub fn push1_extra>(&mut self, value: T, info: ExtraInfo) { + self.stack.push((value.as_basic_value_enum(), info)); + } + + pub fn pop1(&mut self) -> Result, CompileError> { + Ok(self.pop1_extra()?.0) + } + + pub fn pop1_extra(&mut self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> { + self.stack + .pop() + .ok_or_else(|| CompileError::Codegen("pop1_extra: invalid value stack".to_string())) + } + + pub fn pop2(&mut self) -> Result<(BasicValueEnum<'ctx>, BasicValueEnum<'ctx>), CompileError> { + let v2 = self.pop1()?; + let v1 = self.pop1()?; + Ok((v1, v2)) + } + + pub fn pop2_extra( + &mut self, + ) -> Result< + ( + (BasicValueEnum<'ctx>, ExtraInfo), + (BasicValueEnum<'ctx>, ExtraInfo), + ), + CompileError, + > { + let v2 = self.pop1_extra()?; + let v1 = self.pop1_extra()?; + Ok((v1, v2)) + } + + pub fn pop3( + &mut self, + ) -> Result< + ( + BasicValueEnum<'ctx>, + BasicValueEnum<'ctx>, + BasicValueEnum<'ctx>, + ), + CompileError, + > { + let v3 = self.pop1()?; + let v2 = self.pop1()?; + let v1 = self.pop1()?; + Ok((v1, v2, v3)) + } + + pub fn pop3_extra( + &mut self, + ) -> Result< + ( + (BasicValueEnum<'ctx>, ExtraInfo), + (BasicValueEnum<'ctx>, ExtraInfo), + (BasicValueEnum<'ctx>, ExtraInfo), + ), + CompileError, + > { + let v3 = self.pop1_extra()?; + let v2 = self.pop1_extra()?; + let v1 = self.pop1_extra()?; + Ok((v1, v2, v3)) + } + + pub fn peek1_extra(&self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> { + let index = + self.stack.len().checked_sub(1).ok_or_else(|| { + CompileError::Codegen("peek1_extra: invalid value stack".to_string()) + })?; + Ok(self.stack[index]) + } + + pub fn peekn(&self, n: usize) -> Result>, CompileError> { + Ok(self.peekn_extra(n)?.iter().map(|x| x.0).collect()) + } + + pub fn peekn_extra( + &self, + n: usize, + ) -> Result<&[(BasicValueEnum<'ctx>, ExtraInfo)], CompileError> { + let index = + self.stack.len().checked_sub(n).ok_or_else(|| { + CompileError::Codegen("peekn_extra: invalid value stack".to_string()) + })?; + Ok(&self.stack[index..]) + } + + pub fn popn_save_extra( + &mut self, + n: usize, + ) -> Result, ExtraInfo)>, CompileError> { + let v = self.peekn_extra(n)?.to_vec(); + self.popn(n)?; + Ok(v) + } + + pub fn popn(&mut self, n: usize) -> Result<(), CompileError> { + let index = self + .stack + .len() + .checked_sub(n) + .ok_or_else(|| CompileError::Codegen("popn: invalid value stack".to_string()))?; + + self.stack.truncate(index); + Ok(()) + } + + pub fn push_block(&mut self, next: BasicBlock<'ctx>, phis: SmallVec<[PhiValue<'ctx>; 1]>) { + self.control_stack.push(ControlFrame::Block { + next, + phis, + stack_size_snapshot: self.stack.len(), + }); + } + + pub fn push_loop( + &mut self, + body: BasicBlock<'ctx>, + next: BasicBlock<'ctx>, + loop_body_phis: SmallVec<[PhiValue<'ctx>; 1]>, + phis: SmallVec<[PhiValue<'ctx>; 1]>, + ) { + self.control_stack.push(ControlFrame::Loop { + body, + next, + loop_body_phis, + phis, + stack_size_snapshot: self.stack.len(), + }); + } + + pub fn push_if( + &mut self, + if_then: BasicBlock<'ctx>, + if_else: BasicBlock<'ctx>, + next: BasicBlock<'ctx>, + then_phis: SmallVec<[PhiValue<'ctx>; 1]>, + else_phis: SmallVec<[PhiValue<'ctx>; 1]>, + next_phis: SmallVec<[PhiValue<'ctx>; 1]>, + ) { + self.control_stack.push(ControlFrame::IfElse { + if_then, + if_else, + next, + then_phis, + else_phis, + next_phis, + stack_size_snapshot: self.stack.len(), + if_else_state: IfElseState::If, + }); + } +} diff --git a/lib/compiler-singlepass/Cargo.toml b/lib/compiler-singlepass/Cargo.toml index 383996fd99..9e7d455eae 100644 --- a/lib/compiler-singlepass/Cargo.toml +++ b/lib/compiler-singlepass/Cargo.toml @@ -15,22 +15,18 @@ edition = "2018" name = "wasmer_compiler_singlepass" [dependencies] -finite-wasm = "0.3.0" wasmer-compiler = { path = "../compiler", package = "wasmer-compiler-near", version = "=2.4.0", features = ["translator"], default-features = false } wasmer-vm = { path = "../vm", package = "wasmer-vm-near", version = "=2.4.0" } wasmer-types = { path = "../types", package = "wasmer-types-near", version = "=2.4.0", default-features = false, features = ["std"] } -byteorder = "1.3" +rayon = { version = "1.5", optional = true } +hashbrown = { version = "0.11", optional = true } +more-asserts = "0.2" dynasm = "1.0" dynasmrt = "1.0" -enumset = "1.0" -hashbrown = { version = "0.11", optional = true } lazy_static = "1.4" -memoffset = "0.6" -more-asserts = "0.2" -rayon = { version = "1.5", optional = true } +byteorder = "1.3" smallvec = "1.6" -strum = { version = "0.24", features = ["derive"] } -tracing = "0.1" +memoffset = "0.6" [dev-dependencies] target-lexicon = { version = "0.12.2", default-features = false } diff --git a/lib/compiler-singlepass/README.md b/lib/compiler-singlepass/README.md index 0cef04e0dd..4265359f37 100644 --- a/lib/compiler-singlepass/README.md +++ b/lib/compiler-singlepass/README.md @@ -20,7 +20,9 @@ here][example].* Singlepass is designed to emit compiled code at linear time, as such is not prone to JIT bombs and also offers great compilation -performance, however with a bit slower runtime speed. +performance orders of magnitude faster than +[`wasmer-compiler-cranelift`] and [`wasmer-compiler-llvm`], however +with a bit slower runtime speed. The fact that singlepass is not prone to JIT bombs and offers a very predictable compilation speed makes it ideal for **blockchains** and other @@ -28,3 +30,5 @@ systems where fast and consistent compilation times are very critical. [example]: https://github.com/wasmerio/wasmer/blob/master/examples/compiler_singlepass.rs +[`wasmer-compiler-cranelift`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-cranelift +[`wasmer-compiler-llvm`]: https://github.com/wasmerio/wasmer/tree/master/lib/compiler-llvm diff --git a/lib/compiler-singlepass/src/codegen_x64.rs b/lib/compiler-singlepass/src/codegen_x64.rs index c115f069b9..76a66a2e6c 100644 --- a/lib/compiler-singlepass/src/codegen_x64.rs +++ b/lib/compiler-singlepass/src/codegen_x64.rs @@ -1,19 +1,19 @@ use crate::address_map::get_function_address_map; -use crate::config::IntrinsicKind; +use crate::config::{Intrinsic, IntrinsicKind}; use crate::{config::Singlepass, emitter_x64::*, machine::Machine, x64_decl::*}; -use dynasmrt::{x64::X64Relocation, DynamicLabel, VecAssembler}; -use finite_wasm::gas::InstrumentationKind; +use dynasmrt::{x64::X64Relocation, AssemblyOffset, DynamicLabel, DynasmApi, VecAssembler}; use memoffset::offset_of; use smallvec::{smallvec, SmallVec}; -use std::convert::TryFrom; +use std::cmp::max; use std::iter; -use std::slice; -use wasmer_compiler::wasmparser::{BlockType as WpBlockType, MemArg, Operator, ValType as WpType}; +use wasmer_compiler::wasmparser::{ + MemoryImmediate, Operator, Type as WpType, TypeOrFuncType as WpTypeOrFuncType, +}; use wasmer_compiler::{ CallingConvention, CompiledFunction, CompiledFunctionFrameInfo, CustomSection, CustomSectionProtection, FunctionBody, FunctionBodyData, InstructionAddressMap, ModuleTranslationState, Relocation, RelocationKind, RelocationTarget, SectionBody, - SectionIndex, SourceLoc, Target, + SectionIndex, SourceLoc, }; use wasmer_types::{ entity::{EntityRef, PrimaryMap, SecondaryMap}, @@ -39,9 +39,6 @@ pub(crate) struct FuncGen<'a> { /// ModuleInfo compilation config. config: &'a Singlepass, - /// Target to which we compile - target: &'a Target, - /// Offsets of vmctx fields. vmoffsets: &'a VMOffsets, @@ -60,12 +57,21 @@ pub(crate) struct FuncGen<'a> { /// support automatic relative relocations for `Vec`. assembler: Assembler, - /// Types of the local variables, including arguments. - local_types: wasmer_types::partial_sum_map::PartialSumMap, + /// Memory locations of local variables. + locals: Vec, + + /// Types of local variables, including arguments. + local_types: Vec, /// Value stack. value_stack: Vec, + /// Max stack depth. + max_stack_depth: usize, + + /// Location to patch when we know the max stack depth. + stack_check_offset: AssemblyOffset, + /// Metadata about floating point values on the stack. fp_stack: Vec, @@ -94,15 +100,6 @@ pub(crate) struct FuncGen<'a> { /// Calling convention to use. calling_convention: CallingConvention, - - /// Cost for initializing the stack of the function - stack_init_gas_cost: u64, - - /// Iterator over the gas instrumentation points - gas_iter: iter::Peekable, slice::Iter<'a, u64>>>, - - /// Maximum size of the stack for this function - stack_size: u32, } struct SpecialLabelSet { @@ -234,14 +231,7 @@ impl WpTypeExt for WpType { #[derive(Debug)] pub(crate) struct ControlFrame { - /// The label to which `br` opcodes should jump - /// - /// This is: - /// * for functions (ie. the control_stack[0]), the start of the epilogue - /// * for `block` or `if`/`else` blocks, the end of the block (after stack cleanup) - /// * for `loop` blocks, the beginning of the loop block - pub(crate) br_label: DynamicLabel, - + pub(crate) label: DynamicLabel, pub(crate) loop_like: bool, pub(crate) if_else: IfElseState, pub(crate) returns: SmallVec<[WpType; 1]>, @@ -280,7 +270,15 @@ impl<'a> FuncGen<'a> { loc } + fn update_max_stack_depth(&mut self) { + self.max_stack_depth = max( + self.max_stack_depth, + self.value_stack.len() + self.fp_stack.len(), + ); + } + fn pop_value_released(&mut self) -> Location { + self.update_max_stack_depth(); let loc = self .value_stack .pop() @@ -299,8 +297,14 @@ impl<'a> FuncGen<'a> { I2O1 { loc_a, loc_b, ret } } - fn emit_call(&mut self, function: FunctionIndex) -> Result<(), CodegenError> { - let sig_index = *self.module.functions.get(function).unwrap(); + fn emit_call(&mut self, function_index: u32) -> Result<(), CodegenError> { + let function_index = function_index as usize; + + let sig_index = *self + .module + .functions + .get(FunctionIndex::new(function_index)) + .unwrap(); let sig = self.module.signatures.get(sig_index).unwrap(); let param_types: SmallVec<[WpType; 8]> = sig.params().iter().cloned().map(type_to_wp_type).collect(); @@ -317,6 +321,7 @@ impl<'a> FuncGen<'a> { // // Canonicalization state will be lost across function calls, so early canonicalization // is necessary here. + self.update_max_stack_depth(); while let Some(fp) = self.fp_stack.last() { if fp.depth >= self.value_stack.len() { let index = fp.depth - self.value_stack.len(); @@ -333,147 +338,165 @@ impl<'a> FuncGen<'a> { } } - if self.try_intrinsic(function, ¶ms) { - // This was genereated as an intrinsic, we're done. - return Ok(()); - } - - let reloc_at = self.assembler.get_offset().0 + self.assembler.arch_mov64_imm_offset(); - // Imported functions are called through trampolines placed as custom sections. - let reloc_target = match self.module.import_counts.local_function_index(function) { - Ok(local) => RelocationTarget::LocalFunc(local), - Err(imp) => RelocationTarget::CustomSection(SectionIndex::from_u32(imp.as_u32())), - }; - self.relocations.push(Relocation { - kind: RelocationKind::Abs8, - reloc_target, - offset: reloc_at as u32, - addend: 0, - }); + if let Some(intrinsic) = self.check_intrinsic(function_index, ¶ms) { + self.emit_intrinsic(intrinsic, ¶ms)? + } else { + let reloc_at = self.assembler.get_offset().0 + self.assembler.arch_mov64_imm_offset(); + // Imported functions are called through trampolines placed as custom sections. + let imports = self.module.import_counts.functions as usize; + let reloc_target = if function_index < imports { + RelocationTarget::CustomSection(SectionIndex::new(function_index)) + } else { + RelocationTarget::LocalFunc(LocalFunctionIndex::new(function_index - imports)) + }; + self.relocations.push(Relocation { + kind: RelocationKind::Abs8, + reloc_target, + offset: reloc_at as u32, + addend: 0, + }); - // RAX is preserved on entry to `emit_call_sysv` callback. - // The Imm64 value is relocated by the JIT linker. - self.assembler.emit_mov( - Size::S64, - Location::Imm64(std::u64::MAX), - Location::GPR(GPR::RAX), - ); + // RAX is preserved on entry to `emit_call_sysv` callback. + // The Imm64 value is relocated by the JIT linker. + self.assembler.emit_mov( + Size::S64, + Location::Imm64(std::u64::MAX), + Location::GPR(GPR::RAX), + ); - self.emit_call_native( - |this| { - this.assembler.emit_call_location(Location::GPR(GPR::RAX)); - }, - params.iter().copied(), - )?; + self.emit_call_native( + |this| { + this.assembler.emit_call_location(Location::GPR(GPR::RAX)); + }, + params.iter().copied(), + )?; - self.machine - .release_locations_only_stack(&mut self.assembler, ¶ms); + self.machine + .release_locations_only_stack(&mut self.assembler, ¶ms); - if !return_types.is_empty() { - let ret = - self.machine - .acquire_locations(&mut self.assembler, &[(return_types[0])], false)[0]; - self.value_stack.push(ret); - if return_types[0].is_float() { - self.assembler - .emit_mov(Size::S64, Location::XMM(XMM::XMM0), ret); - self.fp_stack - .push(FloatValue::new(self.value_stack.len() - 1)); - } else { - self.assembler - .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); + if !return_types.is_empty() { + let ret = self.machine.acquire_locations( + &mut self.assembler, + &[(return_types[0])], + false, + )[0]; + self.value_stack.push(ret); + if return_types[0].is_float() { + self.assembler + .emit_mov(Size::S64, Location::XMM(XMM::XMM0), ret); + self.fp_stack + .push(FloatValue::new(self.value_stack.len() - 1)); + } else { + self.assembler + .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); + } } } + Ok(()) } - /// Try emitting an intrinsic for a function call of function at index. - fn try_intrinsic(&mut self, function: FunctionIndex, params: &SmallVec<[Location; 8]>) -> bool { - let signature_index = self.module.functions[function]; + fn check_intrinsic( + &mut self, + index: usize, + params: &SmallVec<[Location; 8]>, + ) -> Option { + let function_index = FunctionIndex::new(index); + let signature_index = self.module.functions[function_index]; let signature = &self.module.signatures[signature_index]; - let import_name = self.module_translation_state.import_map.get(&function); - let intrinsic = import_name.and_then(|import_name| { - self.config.intrinsics.iter().find(|intrinsic| { - intrinsic.name == *import_name - && intrinsic.signature == *signature - && intrinsic.is_params_ok(params) - }) - }); - match intrinsic.map(|i| &i.kind) { - Some(IntrinsicKind::Gas) => self.emit_gas(params[0]), - None => return false, - } - return true; - } - - fn emit_gas_const(&mut self, cost: u64) { - if let Ok(cost) = u32::try_from(cost) { - self.emit_gas(Location::Imm32(cost)); - } else { - let cost_reg = self.machine.acquire_temp_gpr().unwrap(); - self.assembler - .emit_mov(Size::S64, Location::Imm64(cost), Location::GPR(cost_reg)); - self.emit_gas(Location::GPR(cost_reg)); - self.machine.release_temp_gpr(cost_reg); + // Returns None if not imported. + let import_name = self + .module_translation_state + .import_map + .get(&function_index)?; + // TODO: can keep intrinsics in above map, but not sure if we'll have + // significant amount of them to make it important. + for intrinsic in &self.config.intrinsics { + if intrinsic.name == *import_name + && intrinsic.signature == *signature + && intrinsic.is_params_ok(params) + { + return Some(intrinsic.clone()); + } } + None } - /// Emit a gas charge operation. The gas amount is stored in `cost_location`, which must be either an imm32 or a GPR - // (this is because emit_add can only take up to an imm32) - fn emit_gas(&mut self, cost_location: Location) { - if cost_location == Location::Imm32(0) { - return; // skip, which we must do because emit_add optimizes out the add 0 which leaves CF clobbered otherwise + fn emit_intrinsic( + &mut self, + intrinsic: Intrinsic, + params: &SmallVec<[Location; 8]>, + ) -> Result<(), CodegenError> { + match intrinsic.kind { + IntrinsicKind::Gas => { + let counter_offset = offset_of!(FastGasCounter, burnt_gas) as i32; + let gas_limit_offset = offset_of!(FastGasCounter, gas_limit) as i32; + let opcode_cost_offset = offset_of!(FastGasCounter, opcode_cost) as i32; + // Recheck offsets, to make sure offsets will never change. + assert_eq!(counter_offset, 0); + assert_eq!(gas_limit_offset, 8); + assert_eq!(opcode_cost_offset, 16); + assert_eq!(params.len(), 1); + let count_location = params[0]; + let base_reg = self.machine.acquire_temp_gpr().unwrap(); + // Load gas counter base. + self.assembler.emit_mov( + Size::S64, + Location::Memory( + Machine::get_vmctx_reg(), + self.vmoffsets.vmctx_gas_limiter_pointer() as i32, + ), + Location::GPR(base_reg), + ); + let current_burnt_reg = self.machine.acquire_temp_gpr().unwrap(); + // Read current gas counter. + self.assembler.emit_mov( + Size::S64, + Location::Memory(base_reg, counter_offset), + Location::GPR(current_burnt_reg), + ); + // Read opcode cost. + let count_reg = self.machine.acquire_temp_gpr().unwrap(); + self.assembler.emit_mov( + Size::S64, + Location::Memory(base_reg, opcode_cost_offset), + Location::GPR(count_reg), + ); + // Multiply instruction count by opcode cost. + match count_location { + Location::Imm32(imm) => self.assembler.emit_imul_imm32_gpr64(imm, count_reg), + _ => assert!(false), + } + // Compute new cost. + self.assembler.emit_add( + Size::S64, + Location::GPR(count_reg), + Location::GPR(current_burnt_reg), + ); + self.assembler + .emit_jmp(Condition::Overflow, self.special_labels.integer_overflow); + // Compare with the limit. + self.assembler.emit_cmp( + Size::S64, + Location::GPR(current_burnt_reg), + Location::Memory(base_reg, gas_limit_offset), + ); + // Write new gas counter unconditionally, so that runtime can sort out limits case. + self.assembler.emit_mov( + Size::S64, + Location::GPR(current_burnt_reg), + Location::Memory(base_reg, counter_offset), + ); + self.assembler.emit_jmp( + Condition::BelowEqual, + self.special_labels.gas_limit_exceeded, + ); + self.machine.release_temp_gpr(base_reg); + self.machine.release_temp_gpr(current_burnt_reg); + self.machine.release_temp_gpr(count_reg); + } } - assert!( - matches!(cost_location, Location::Imm32(_) | Location::GPR(_)), - "emit_gas can take only an imm32 or a gpr argument" - ); - - let counter_offset = offset_of!(FastGasCounter, burnt_gas) as i32; - let gas_limit_offset = offset_of!(FastGasCounter, gas_limit) as i32; - // Recheck offsets, to make sure offsets will never change. - assert_eq!(counter_offset, 0); - assert_eq!(gas_limit_offset, 8); - let base_reg = self.machine.acquire_temp_gpr().unwrap(); - // Load gas counter base. - self.assembler.emit_mov( - Size::S64, - Location::Memory( - Machine::get_vmctx_reg(), - self.vmoffsets.vmctx_gas_limiter_pointer() as i32, - ), - Location::GPR(base_reg), - ); - let current_burnt_reg = self.machine.acquire_temp_gpr().unwrap(); - // Read current gas counter. - self.assembler.emit_mov( - Size::S64, - Location::Memory(base_reg, counter_offset), - Location::GPR(current_burnt_reg), - ); - // Compute new cost. - self.assembler - .emit_add(Size::S64, cost_location, Location::GPR(current_burnt_reg)); - self.assembler - .emit_jmp(Condition::Carry, self.special_labels.integer_overflow); - // Compare with the limit. - self.assembler.emit_cmp( - Size::S64, - Location::GPR(current_burnt_reg), - Location::Memory(base_reg, gas_limit_offset), - ); - // Write new gas counter unconditionally, so that runtime can sort out limits case. - self.assembler.emit_mov( - Size::S64, - Location::GPR(current_burnt_reg), - Location::Memory(base_reg, counter_offset), - ); - self.assembler.emit_jmp( - Condition::BelowEqual, - self.special_labels.gas_limit_exceeded, - ); - self.machine.release_temp_gpr(base_reg); - self.machine.release_temp_gpr(current_burnt_reg); + Ok(()) } fn emit_trap(&mut self, code: TrapCode) { @@ -1353,7 +1376,7 @@ impl<'a> FuncGen<'a> { fn emit_memory_op Result<(), CodegenError>>( &mut self, addr: Location, - memarg: &MemArg, + memarg: &MemoryImmediate, check_alignment: bool, value_size: usize, cb: F, @@ -1421,7 +1444,7 @@ impl<'a> FuncGen<'a> { if memarg.offset != 0 { self.assembler.emit_add( Size::S32, - Location::Imm32(u32::try_from(memarg.offset).unwrap()), // we don’t support 64-bit memory, and this module was validated + Location::Imm32(memarg.offset), Location::GPR(tmp_addr), ); @@ -1477,7 +1500,7 @@ impl<'a> FuncGen<'a> { loc: Location, target: Location, ret: Location, - memarg: &MemArg, + memarg: &MemoryImmediate, value_size: usize, memory_sz: Size, stack_sz: Size, @@ -1800,52 +1823,75 @@ impl<'a> FuncGen<'a> { self.assembler.emit_label(end); } - pub(crate) fn emit_head(&mut self) -> Result<(), CodegenError> { + fn emit_stack_check(&mut self, enter: bool, depth: usize) { + if enter { + // Here we must use value we do not yet know, so we write 0x7fff_ffff and patch it later. + self.assembler.emit_sub( + Size::S32, + Location::Imm32(0x7fff_ffff), + Location::Memory( + Machine::get_vmctx_reg(), + self.vmoffsets.vmctx_stack_limit_begin() as i32, + ), + ); + // TODO: make it cleaner, now we assume instruction with 32-bit immediate at the end. + // Recheck offsets, if change above instruction to anything else. + self.stack_check_offset = AssemblyOffset(self.assembler.offset().0 - 4); + self.assembler + .emit_jmp(Condition::Signed, self.special_labels.stack_overflow); + } else { + { + // Patch earlier stack checker with now known max stack depth. + assert!(self.stack_check_offset.0 > 0); + let mut alter = self.assembler.alter(); + alter.goto(self.stack_check_offset); + // TODO: check that the value before was 0x7fff_ffff + alter.push_u32(depth as u32); + } + self.assembler.emit_add( + Size::S32, + Location::Imm32(depth as u32), + Location::Memory( + Machine::get_vmctx_reg(), + self.vmoffsets.vmctx_stack_limit_begin() as i32, + ), + ); + } + } + + fn emit_function_stack_check(&mut self, enter: bool) { + // `local_types` include parameters as well. + let depth = self.local_types.len() + + self.max_stack_depth + // we add 4 to ensure that deep recursion is prohibited even for local and argument free + // functions, as they still use stack space for the saved frame base and return address, + // along with spill area for callee-saved registers. + + 4; + self.emit_stack_check(enter, depth); + } + + fn emit_head(&mut self) -> Result<(), CodegenError> { // TODO: Patchpoint is not emitted for now, and ARM trampoline is not prepended. // Normal x86 entry prologue. self.assembler.emit_push(Size::S64, Location::GPR(GPR::RBP)); self.assembler .emit_mov(Size::S64, Location::GPR(GPR::RSP), Location::GPR(GPR::RBP)); - - // Setup the registers (incl. defining the vmctx register) - let local_count = self.local_count(); - self.machine.setup_registers( + // Initialize locals. + self.locals = self.machine.init_locals( &mut self.assembler, - local_count, - self.signature.params().len() as u32, + self.local_types.len(), + self.signature.params().len(), self.calling_convention, ); - // Verify stack height - self.assembler.emit_sub( - Size::S32, - Location::Imm32(self.stack_size), - Location::Memory( - Machine::get_vmctx_reg(), - self.vmoffsets.vmctx_stack_limit_begin() as i32, - ), - ); - self.assembler - .emit_jmp(Condition::Carry, self.special_labels.stack_overflow); - - // Charge for the stack initialization - self.emit_gas_const(self.stack_init_gas_cost); - - // Initialize the locals - let local_count = self.local_count(); - self.machine.init_locals( - &mut self.assembler, - local_count, - self.signature.params().len() as u32, - self.calling_convention, - ); + self.emit_function_stack_check(true); self.assembler .emit_sub(Size::S64, Location::Imm32(32), Location::GPR(GPR::RSP)); // simulate "red zone" if not supported by the platform self.control_stack.push(ControlFrame { - br_label: self.assembler.get_label(), + label: self.assembler.get_label(), loop_like: false, if_else: IfElseState::None, returns: self @@ -1871,26 +1917,27 @@ impl<'a> FuncGen<'a> { }); } - #[tracing::instrument(skip_all)] pub(crate) fn new( module: &'a ModuleInfo, module_translation_state: &'a ModuleTranslationState, config: &'a Singlepass, - target: &'a Target, vmoffsets: &'a VMOffsets, _table_styles: &'a PrimaryMap, local_func_index: LocalFunctionIndex, + local_types_excluding_arguments: &[WpType], calling_convention: CallingConvention, - stack_init_gas_cost: u64, - gas_offsets: &'a [usize], - gas_costs: &'a [u64], - _gas_kinds: &'a [InstrumentationKind], - stack_size: u64, ) -> Result, CodegenError> { let func_index = module.func_index(local_func_index); let sig_index = module.functions[func_index]; let signature = module.signatures[sig_index].clone(); + let mut local_types: Vec<_> = signature + .params() + .iter() + .map(|&x| type_to_wp_type(x)) + .collect(); + local_types.extend_from_slice(&local_types_excluding_arguments); + let mut assembler = Assembler::new(0); let special_labels = SpecialLabelSet { integer_division_by_zero: assembler.get_label(), @@ -1908,11 +1955,15 @@ impl<'a> FuncGen<'a> { module, module_translation_state, config, - target, vmoffsets, - local_types: wasmer_types::partial_sum_map::PartialSumMap::new(), + // table_styles, + signature, assembler, + locals: vec![], // initialization deferred to emit_head + local_types, value_stack: vec![], + max_stack_depth: 0, + stack_check_offset: AssemblyOffset(0), fp_stack: vec![], control_stack: vec![], machine: Machine::new(), @@ -1922,16 +1973,8 @@ impl<'a> FuncGen<'a> { src_loc: 0, instructions_address_map: vec![], calling_convention, - signature, - stack_init_gas_cost, - gas_iter: gas_offsets.iter().zip(gas_costs.iter()).peekable(), - stack_size: u32::try_from(stack_size).map_err(|_| CodegenError { - message: "one function has a stack more than u32::MAX deep".to_string(), - })?, }; - for param in module.signatures[sig_index].params() { - fg.feed_local(1, type_to_wp_type(*param)); - } + fg.emit_head()?; Ok(fg) } @@ -1939,57 +1982,14 @@ impl<'a> FuncGen<'a> { !self.control_stack.is_empty() } - /// Introduce additional local variables to this function. - /// - /// Calling this after [`emit_head`](Self::emit_head) has been invoked is non-sensical. - pub(crate) fn feed_local(&mut self, local_count: u32, local_type: WpType) { - // FIXME: somehow verify that we haven't invoked `emit_head` yet? Doing so could lead us to - // generate code that accesses the stack buffer out of bounds. - self.local_types - .push(local_count, local_type) - .expect("module cannot have more than u32::MAX locals"); - } - - /// Total number of locals and arguments so far. - /// - /// More can be introduced with the [`feed_local`](Self::feed_local) method. - pub(crate) fn local_count(&self) -> u32 { - *self.local_types.size() - } - - /// Obtain the type of the local or argument at the specified index. - /// - /// # Panics - /// - /// Note that this will panic if `index` is out of bounds, which can happen if an - /// implementation error has occurred or if the WASM module hasn't been validated to conform to - /// the web assembly specification. - pub(crate) fn local_type(&self, index: u32) -> WpType { - *self - .local_types - .find(index) - .expect("local index out of bounds") - } - - /// Consume offset self.src_loc, return Some(cost) iff there must be an instrumentation point here - fn consume_gas_offset(&mut self /*, should_be_unreachable: bool */) -> Option { - if let Some(&(&offset, &cost /* (&cost, &kind) */)) = self.gas_iter.peek() { - if offset == self.src_loc as usize { - // assert!(matches!(kind, InstrumentationKind::Unreachable) == should_be_unreachable, "gas computation results are not of the expected reachability: kind is {:?}, expected reachability is {:?}", kind, !should_be_unreachable); - self.gas_iter.next().unwrap(); - return Some(cost); - } - } - None - } - - #[tracing::instrument(skip(self))] pub(crate) fn feed_operator(&mut self, op: Operator) -> Result<(), CodegenError> { assert!(self.fp_stack.len() <= self.value_stack.len()); - let was_unreachable = self.unreachable_depth > 0; + let was_unreachable; + + if self.unreachable_depth > 0 { + was_unreachable = true; - if was_unreachable { match op { Operator::Block { .. } | Operator::Loop { .. } | Operator::If { .. } => { self.unreachable_depth += 1; @@ -2010,13 +2010,10 @@ impl<'a> FuncGen<'a> { _ => {} } if self.unreachable_depth > 0 { - self.consume_gas_offset(/* true */); // do not instrument unreachable code return Ok(()); } - } - - if let Some(cost) = self.consume_gas_offset(/* false */) { - self.emit_gas_const(cost); + } else { + was_unreachable = false; } match op { @@ -2116,47 +2113,47 @@ impl<'a> FuncGen<'a> { self.machine.release_temp_gpr(tmp); } Operator::LocalGet { local_index } => { - let local_type = self.local_type(local_index); + let local_index = local_index as usize; let ret = self.machine .acquire_locations(&mut self.assembler, &[(WpType::I64)], false)[0]; self.emit_relaxed_binop( Assembler::emit_mov, Size::S64, - self.machine.get_local_location(local_index), + self.locals[local_index], ret, ); self.value_stack.push(ret); - if local_type.is_float() { + if self.local_types[local_index].is_float() { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); } } Operator::LocalSet { local_index } => { + let local_index = local_index as usize; let loc = self.pop_value_released(); - let local_type = self.local_type(local_index); - if local_type.is_float() { + if self.local_types[local_index].is_float() { let fp = self.fp_stack.pop1()?; if self.assembler.arch_supports_canonicalize_nan() && self.config.enable_nan_canonicalization && fp.canonicalization.is_some() { self.canonicalize_nan( - match local_type { + match self.local_types[local_index] { WpType::F32 => Size::S32, WpType::F64 => Size::S64, _ => unreachable!(), }, loc, - self.machine.get_local_location(local_index), + self.locals[local_index], ); } else { self.emit_relaxed_binop( Assembler::emit_mov, Size::S64, loc, - self.machine.get_local_location(local_index), + self.locals[local_index], ); } } else { @@ -2164,34 +2161,35 @@ impl<'a> FuncGen<'a> { Assembler::emit_mov, Size::S64, loc, - self.machine.get_local_location(local_index), + self.locals[local_index], ); } } Operator::LocalTee { local_index } => { + let local_index = local_index as usize; let loc = *self.value_stack.last().unwrap(); - let local_type = self.local_type(local_index); - if local_type.is_float() { + + if self.local_types[local_index].is_float() { let fp = self.fp_stack.peek1()?; if self.assembler.arch_supports_canonicalize_nan() && self.config.enable_nan_canonicalization && fp.canonicalization.is_some() { self.canonicalize_nan( - match local_type { + match self.local_types[local_index] { WpType::F32 => Size::S32, WpType::F64 => Size::S64, _ => unreachable!(), }, loc, - self.machine.get_local_location(local_index), + self.locals[local_index], ); } else { self.emit_relaxed_binop( Assembler::emit_mov, Size::S64, loc, - self.machine.get_local_location(local_index), + self.locals[local_index], ); } } else { @@ -2199,7 +2197,7 @@ impl<'a> FuncGen<'a> { Assembler::emit_mov, Size::S64, loc, - self.machine.get_local_location(local_index), + self.locals[local_index], ); } } @@ -2320,7 +2318,7 @@ impl<'a> FuncGen<'a> { } }; - if self.assembler.arch_has_xzcnt(self.target.cpu_features()) { + if self.assembler.arch_has_xzcnt() { self.assembler.arch_emit_lzcnt( Size::S32, Location::GPR(src), @@ -2385,7 +2383,7 @@ impl<'a> FuncGen<'a> { } }; - if self.assembler.arch_has_xzcnt(self.target.cpu_features()) { + if self.assembler.arch_has_xzcnt() { self.assembler.arch_emit_tzcnt( Size::S32, Location::GPR(src), @@ -2551,7 +2549,7 @@ impl<'a> FuncGen<'a> { } }; - if self.assembler.arch_has_xzcnt(self.target.cpu_features()) { + if self.assembler.arch_has_xzcnt() { self.assembler.arch_emit_lzcnt( Size::S64, Location::GPR(src), @@ -2616,7 +2614,7 @@ impl<'a> FuncGen<'a> { } }; - if self.assembler.arch_has_xzcnt(self.target.cpu_features()) { + if self.assembler.arch_has_xzcnt() { self.assembler.arch_emit_tzcnt( Size::S64, Location::GPR(src), @@ -5141,18 +5139,12 @@ impl<'a> FuncGen<'a> { } } - Operator::Call { function_index } => { - self.emit_call(FunctionIndex::from_u32(function_index))? - } - Operator::CallIndirect { - type_index, - table_index, - table_byte: _, - } => { + Operator::Call { function_index } => self.emit_call(function_index)?, + Operator::CallIndirect { index, table_index } => { // TODO: removed restriction on always being table idx 0; // does any code depend on this? let table_index = TableIndex::new(table_index as _); - let index = SignatureIndex::new(type_index as usize); + let index = SignatureIndex::new(index as usize); let sig = self.module.signatures.get(index).unwrap(); let param_types: SmallVec<[WpType; 8]> = sig.params().iter().cloned().map(type_to_wp_type).collect(); @@ -5345,20 +5337,20 @@ impl<'a> FuncGen<'a> { } } } - Operator::If { blockty } => { + Operator::If { ty } => { let label_end = self.assembler.get_label(); let label_else = self.assembler.get_label(); let cond = self.pop_value_released(); let frame = ControlFrame { - br_label: label_end, + label: label_end, loop_like: false, if_else: IfElseState::If(label_else), - returns: match blockty { - WpBlockType::Empty => smallvec![], - WpBlockType::Type(inner_ty) => smallvec![inner_ty], - WpBlockType::FuncType(_) => { + returns: match ty { + WpTypeOrFuncType::Type(WpType::EmptyBlockType) => smallvec![], + WpTypeOrFuncType::Type(inner_ty) => smallvec![inner_ty], + _ => { return Err(CodegenError { message: "If: multi-value returns not yet implemented".to_string(), }) @@ -5410,6 +5402,8 @@ impl<'a> FuncGen<'a> { } } + self.update_max_stack_depth(); + let mut frame = self.control_stack.last_mut().unwrap(); let released: &[Location] = &self.value_stack[frame.value_stack_depth..]; @@ -5420,7 +5414,7 @@ impl<'a> FuncGen<'a> { match frame.if_else { IfElseState::If(label) => { - self.assembler.emit_jmp(Condition::None, frame.br_label); + self.assembler.emit_jmp(Condition::None, frame.label); self.assembler.emit_label(label); frame.if_else = IfElseState::Else; } @@ -5489,15 +5483,15 @@ impl<'a> FuncGen<'a> { } self.assembler.emit_label(end_label); } - Operator::Block { blockty } => { + Operator::Block { ty } => { let frame = ControlFrame { - br_label: self.assembler.get_label(), + label: self.assembler.get_label(), loop_like: false, if_else: IfElseState::None, - returns: match blockty { - WpBlockType::Empty => smallvec![], - WpBlockType::Type(inner_ty) => smallvec![inner_ty], - WpBlockType::FuncType(_) => { + returns: match ty { + WpTypeOrFuncType::Type(WpType::EmptyBlockType) => smallvec![], + WpTypeOrFuncType::Type(inner_ty) => smallvec![inner_ty], + _ => { return Err(CodegenError { message: "Block: multi-value returns not yet implemented" .to_string(), @@ -5509,7 +5503,7 @@ impl<'a> FuncGen<'a> { }; self.control_stack.push(frame); } - Operator::Loop { blockty } => { + Operator::Loop { ty } => { // Pad with NOPs to the next 16-byte boundary. // Here we don't use the dynasm `.align 16` attribute because it pads the alignment with single-byte nops // which may lead to efficiency problems. @@ -5521,17 +5515,17 @@ impl<'a> FuncGen<'a> { } assert_eq!(self.assembler.get_offset().0 % 16, 0); - let br_label = self.assembler.get_label(); + let label = self.assembler.get_label(); let _activate_offset = self.assembler.get_offset().0; self.control_stack.push(ControlFrame { - br_label, + label, loop_like: true, if_else: IfElseState::None, - returns: match blockty { - WpBlockType::Empty => smallvec![], - WpBlockType::Type(inner_ty) => smallvec![inner_ty], - WpBlockType::FuncType(_) => { + returns: match ty { + WpTypeOrFuncType::Type(WpType::EmptyBlockType) => smallvec![], + WpTypeOrFuncType::Type(inner_ty) => smallvec![inner_ty], + _ => { return Err(CodegenError { message: "Loop: multi-value returns not yet implemented" .to_string(), @@ -5541,7 +5535,7 @@ impl<'a> FuncGen<'a> { value_stack_depth: self.value_stack.len(), fp_stack_depth: self.fp_stack.len(), }); - self.assembler.emit_label(br_label); + self.assembler.emit_label(label); // TODO: Re-enable interrupt signal check without branching } @@ -5576,7 +5570,7 @@ impl<'a> FuncGen<'a> { self.assembler .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); } - Operator::MemoryInit { data_index, mem } => { + Operator::MemoryInit { segment, mem } => { let len = self.value_stack.pop().unwrap(); let src = self.value_stack.pop().unwrap(); let dst = self.value_stack.pop().unwrap(); @@ -5600,7 +5594,7 @@ impl<'a> FuncGen<'a> { // [vmctx, memory_index, segment_index, dst, src, len] [ Location::Imm32(mem), - Location::Imm32(data_index), + Location::Imm32(segment), dst, src, len, @@ -5611,7 +5605,7 @@ impl<'a> FuncGen<'a> { self.machine .release_locations_only_stack(&mut self.assembler, &[dst, src, len]); } - Operator::DataDrop { data_index } => { + Operator::DataDrop { segment } => { self.assembler.emit_mov( Size::S64, Location::Memory( @@ -5628,19 +5622,19 @@ impl<'a> FuncGen<'a> { this.assembler.emit_call_register(GPR::RAX); }, // [vmctx, segment_index] - iter::once(Location::Imm32(data_index)), + iter::once(Location::Imm32(segment)), )?; } - Operator::MemoryCopy { src_mem, dst_mem } => { + Operator::MemoryCopy { src, dst } => { // ignore until we support multiple memories - let _dst = dst_mem; + let _dst = dst; let len = self.value_stack.pop().unwrap(); let src_pos = self.value_stack.pop().unwrap(); let dst_pos = self.value_stack.pop().unwrap(); self.machine .release_locations_only_regs(&[len, src_pos, dst_pos]); - let memory_index = MemoryIndex::new(src_mem as usize); + let memory_index = MemoryIndex::new(src as usize); let (memory_copy_index, memory_index) = if self.module.local_memory_index(memory_index).is_some() { ( @@ -6224,7 +6218,7 @@ impl<'a> FuncGen<'a> { let released = &self.value_stack[frame.value_stack_depth..]; self.machine .release_locations_keep_state(&mut self.assembler, released); - self.assembler.emit_jmp(Condition::None, frame.br_label); + self.assembler.emit_jmp(Condition::None, frame.label); self.unreachable_depth = 1; } Operator::Br { relative_depth } => { @@ -6273,7 +6267,7 @@ impl<'a> FuncGen<'a> { let released = &self.value_stack[frame.value_stack_depth..]; self.machine .release_locations_keep_state(&mut self.assembler, released); - self.assembler.emit_jmp(Condition::None, frame.br_label); + self.assembler.emit_jmp(Condition::None, frame.label); self.unreachable_depth = 1; } Operator::BrIf { relative_depth } => { @@ -6326,18 +6320,18 @@ impl<'a> FuncGen<'a> { let released = &self.value_stack[frame.value_stack_depth..]; self.machine .release_locations_keep_state(&mut self.assembler, released); - self.assembler.emit_jmp(Condition::None, frame.br_label); + self.assembler.emit_jmp(Condition::None, frame.label); self.assembler.emit_label(after); } - Operator::BrTable { ref targets } => { - let default_target = targets.default(); - let targets = targets + Operator::BrTable { ref table } => { + let mut targets = table .targets() .collect::, _>>() .map_err(|e| CodegenError { message: format!("BrTable read_table: {:?}", e), })?; + let default_target = targets.pop().unwrap().0; let cond = self.pop_value_released(); let table_label = self.assembler.get_label(); let mut table: Vec = vec![]; @@ -6365,7 +6359,7 @@ impl<'a> FuncGen<'a> { ); self.assembler.emit_jmp_location(Location::GPR(GPR::RDX)); - for target in targets.iter() { + for (target, _) in targets.iter() { let label = self.assembler.get_label(); self.assembler.emit_label(label); table.push(label); @@ -6416,7 +6410,7 @@ impl<'a> FuncGen<'a> { let released = &self.value_stack[frame.value_stack_depth..]; self.machine .release_locations_keep_state(&mut self.assembler, released); - self.assembler.emit_jmp(Condition::None, frame.br_label); + self.assembler.emit_jmp(Condition::None, frame.label); } self.assembler.emit_label(default_br); @@ -6465,7 +6459,7 @@ impl<'a> FuncGen<'a> { let released = &self.value_stack[frame.value_stack_depth..]; self.machine .release_locations_keep_state(&mut self.assembler, released); - self.assembler.emit_jmp(Condition::None, frame.br_label); + self.assembler.emit_jmp(Condition::None, frame.label); } self.assembler.emit_label(table_label); @@ -6521,24 +6515,13 @@ impl<'a> FuncGen<'a> { } if self.control_stack.is_empty() { - self.assembler.emit_label(frame.br_label); - let local_count = self.local_count(); - self.machine.finalize_locals(&mut self.assembler); - - // Restore stack height - self.assembler.emit_add( - Size::S32, - Location::Imm32(self.stack_size), - Location::Memory( - Machine::get_vmctx_reg(), - self.vmoffsets.vmctx_stack_limit_begin() as i32, - ), - ); - - self.machine.restore_registers( + self.assembler.emit_label(frame.label); + self.update_max_stack_depth(); + self.emit_function_stack_check(false); + self.machine.finalize_locals( &mut self.assembler, + &self.locals, self.calling_convention, - local_count, ); self.assembler.emit_mov( Size::S64, @@ -6563,11 +6546,12 @@ impl<'a> FuncGen<'a> { let released = &self.value_stack[frame.value_stack_depth..]; self.machine .release_locations(&mut self.assembler, released); + self.update_max_stack_depth(); self.value_stack.truncate(frame.value_stack_depth); self.fp_stack.truncate(frame.fp_stack_depth); if !frame.loop_like { - self.assembler.emit_label(frame.br_label); + self.assembler.emit_label(frame.label); } if let IfElseState::If(label) = frame.if_else { @@ -6596,7 +6580,7 @@ impl<'a> FuncGen<'a> { } } } - Operator::AtomicFence => { + Operator::AtomicFence { flags: _ } => { // Fence is a nop. // // Fence was added to preserve information about fences from @@ -8322,7 +8306,7 @@ impl<'a> FuncGen<'a> { self.machine .release_locations_only_stack(&mut self.assembler, &[dest, val, len]); } - Operator::TableInit { elem_index, table } => { + Operator::TableInit { segment, table } => { let len = self.value_stack.pop().unwrap(); let src = self.value_stack.pop().unwrap(); let dest = self.value_stack.pop().unwrap(); @@ -8347,7 +8331,7 @@ impl<'a> FuncGen<'a> { // [vmctx, table_index, elem_index, dst, src, len] [ Location::Imm32(table), - Location::Imm32(elem_index), + Location::Imm32(segment), dest, src, len, @@ -8359,7 +8343,7 @@ impl<'a> FuncGen<'a> { self.machine .release_locations_only_stack(&mut self.assembler, &[dest, src, len]); } - Operator::ElemDrop { elem_index } => { + Operator::ElemDrop { segment } => { self.assembler.emit_mov( Size::S64, Location::Memory( @@ -8376,7 +8360,7 @@ impl<'a> FuncGen<'a> { this.assembler.emit_call_register(GPR::RAX); }, // [vmctx, elem_index] - [Location::Imm32(elem_index)].iter().cloned(), + [Location::Imm32(segment)].iter().cloned(), )?; } _ => { @@ -8389,13 +8373,7 @@ impl<'a> FuncGen<'a> { Ok(()) } - #[tracing::instrument(skip_all)] pub(crate) fn finalize(mut self, data: &FunctionBodyData) -> CompiledFunction { - debug_assert!( - self.gas_iter.next().is_none(), - "finalizing function but not all instrumentation points were inserted" - ); - // Generate actual code for special labels. self.assembler .emit_label(self.special_labels.integer_division_by_zero); @@ -8438,12 +8416,10 @@ impl<'a> FuncGen<'a> { let body_len = self.assembler.get_offset().0; let instructions_address_map = self.instructions_address_map; let address_map = get_function_address_map(instructions_address_map, data, body_len); - let mut body = self.assembler.finalize().unwrap(); - body.shrink_to_fit(); CompiledFunction { body: FunctionBody { - body, + body: self.assembler.finalize().unwrap().to_vec(), unwind_info: None, }, relocations: self.relocations, @@ -8518,7 +8494,6 @@ fn sort_call_movs(movs: &mut [(Location, GPR)]) { } // Standard entry trampoline. -#[tracing::instrument] pub(crate) fn gen_std_trampoline( sig: &FunctionType, calling_convention: CallingConvention, @@ -8621,16 +8596,13 @@ pub(crate) fn gen_std_trampoline( a.emit_ret(); - let mut body = a.finalize().unwrap(); - body.shrink_to_fit(); FunctionBody { - body, + body: a.finalize().unwrap().to_vec(), unwind_info: None, } } /// Generates dynamic import function call trampoline for a function type. -#[tracing::instrument(skip(vmoffsets))] pub(crate) fn gen_std_dynamic_import_trampoline( vmoffsets: &VMOffsets, sig: &FunctionType, @@ -8745,16 +8717,13 @@ pub(crate) fn gen_std_dynamic_import_trampoline( // Return. a.emit_ret(); - let mut body = a.finalize().unwrap(); - body.shrink_to_fit(); FunctionBody { - body, + body: a.finalize().unwrap().to_vec(), unwind_info: None, } } // Singlepass calls import functions through a trampoline. -#[tracing::instrument(skip(vmoffsets))] pub(crate) fn gen_import_call_trampoline( vmoffsets: &VMOffsets, index: FunctionIndex, @@ -8909,9 +8878,7 @@ pub(crate) fn gen_import_call_trampoline( } a.emit_host_redirection(GPR::RAX); - let mut contents = a.finalize().unwrap(); - contents.shrink_to_fit(); - let section_body = SectionBody::new_with_vec(contents); + let section_body = SectionBody::new_with_vec(a.finalize().unwrap().to_vec()); CustomSection { protection: CustomSectionProtection::ReadExecute, diff --git a/lib/compiler-singlepass/src/compiler.rs b/lib/compiler-singlepass/src/compiler.rs index 0e35080415..16309acdfe 100644 --- a/lib/compiler-singlepass/src/compiler.rs +++ b/lib/compiler-singlepass/src/compiler.rs @@ -42,15 +42,12 @@ impl SinglepassCompiler { impl Compiler for SinglepassCompiler { /// Compile the module using Singlepass, producing a compilation result with /// associated relocations. - #[tracing::instrument(skip_all)] fn compile_module( &self, target: &Target, compile_info: &CompileModuleInfo, module_translation: &ModuleTranslationState, function_body_inputs: PrimaryMap>, - tunables: &dyn wasmer_vm::Tunables, - instrumentation: &finite_wasm::AnalysisOutcome, ) -> Result { /*if target.triple().operating_system == OperatingSystem::Windows { return Err(CompileError::UnsupportedTarget( @@ -88,125 +85,92 @@ impl Compiler for SinglepassCompiler { .bytes(); let vmoffsets = VMOffsets::new(pointer_width).with_module_info(&module); let import_idxs = 0..module.import_counts.functions as usize; - let import_trampolines: PrimaryMap = - tracing::info_span!("import_trampolines", n_imports = import_idxs.len()).in_scope( - || { - import_idxs - .into_par_iter_if_rayon() - .map(|i| { - let i = FunctionIndex::new(i); - gen_import_call_trampoline( - &vmoffsets, - i, - &module.signatures[module.functions[i]], - calling_convention, - ) - }) - .collect::>() - .into_iter() - .collect() - }, - ); + let import_trampolines: PrimaryMap = import_idxs + .into_par_iter_if_rayon() + .map(|i| { + let i = FunctionIndex::new(i); + gen_import_call_trampoline( + &vmoffsets, + i, + &module.signatures[module.functions[i]], + calling_convention, + ) + }) + .collect::>() + .into_iter() + .collect(); let functions = function_body_inputs .iter() .collect::)>>() .into_par_iter_if_rayon() .map(|(i, input)| { - tracing::info_span!("function", i = i.index()).in_scope(|| { - let reader = - wasmer_compiler::FunctionReader::new(input.module_offset, input.data); - let stack_init_gas_cost = tunables - .stack_init_gas_cost(instrumentation.function_frame_sizes[i.index()]); - let stack_size = instrumentation.function_frame_sizes[i.index()] - .checked_add(instrumentation.function_operand_stack_sizes[i.index()]) - .ok_or_else(|| { - CompileError::Codegen(String::from( - "got function with frame size going beyond u64::MAX", - )) - })?; - let mut generator = FuncGen::new( - module, - module_translation, - &self.config, - &target, - &vmoffsets, - &table_styles, - i, - calling_convention, - stack_init_gas_cost, - &instrumentation.gas_offsets[i.index()], - &instrumentation.gas_costs[i.index()], - &instrumentation.gas_kinds[i.index()], - stack_size, - ) - .map_err(to_compile_error)?; + let reader = wasmer_compiler::FunctionReader::new(input.module_offset, input.data); - let mut local_reader = reader.get_locals_reader()?; - for _ in 0..local_reader.get_count() { - let (count, ty) = local_reader.read()?; - // Overflows feeding a local here have most likely already been caught by the - // validator, but it is possible that the validator hasn't been run at all, or - // that the validator does not impose any limits on the number of locals. - generator.feed_local(count, ty); + let mut local_reader = reader.get_locals_reader()?; + // This local list excludes arguments. + let mut locals = vec![]; + let num_locals = local_reader.get_count(); + for _ in 0..num_locals { + let (count, ty) = local_reader.read()?; + for _ in 0..count { + locals.push(ty); } + } - generator.emit_head().map_err(to_compile_error)?; + let mut generator = FuncGen::new( + module, + module_translation, + &self.config, + &vmoffsets, + &table_styles, + i, + &locals, + calling_convention, + ) + .map_err(to_compile_error)?; - let mut operator_reader = - reader.get_operators_reader()?.into_iter_with_offsets(); - while generator.has_control_frames() { - let (op, pos) = tracing::info_span!("parsing-next-operator") - .in_scope(|| operator_reader.next().unwrap())?; - generator.set_srcloc(pos as u32); - generator.feed_operator(op).map_err(to_compile_error)?; - } + let mut operator_reader = reader.get_operators_reader()?.into_iter_with_offsets(); + while generator.has_control_frames() { + let (op, pos) = operator_reader.next().unwrap()?; + generator.set_srcloc(pos as u32); + generator.feed_operator(op).map_err(to_compile_error)?; + } - Ok(generator.finalize(&input)) - }) + Ok(generator.finalize(&input)) }) .collect::, CompileError>>()? - .into_iter() // TODO: why not just collect to PrimaryMap directly? + .into_iter() .collect::>(); - let function_call_trampolines = - tracing::info_span!("function_call_trampolines").in_scope(|| { - module - .signatures - .values() - .collect::>() - .into_par_iter_if_rayon() - .map(|func_type| gen_std_trampoline(&func_type, calling_convention)) - .collect::>() - .into_iter() - .collect::>() - }); + let function_call_trampolines = module + .signatures + .values() + .collect::>() + .into_par_iter_if_rayon() + .map(|func_type| gen_std_trampoline(&func_type, calling_convention)) + .collect::>() + .into_iter() + .collect::>(); - let dynamic_function_trampolines = tracing::info_span!("dynamic_function_trampolines") - .in_scope(|| { - module - .imported_function_types() - .collect::>() - .into_par_iter_if_rayon() - .map(|func_type| { - gen_std_dynamic_import_trampoline( - &vmoffsets, - &func_type, - calling_convention, - ) - }) - .collect::>() - .into_iter() - .collect::>() - }); + let dynamic_function_trampolines = module + .imported_function_types() + .collect::>() + .into_par_iter_if_rayon() + .map(|func_type| { + gen_std_dynamic_import_trampoline(&vmoffsets, &func_type, calling_convention) + }) + .collect::>() + .into_iter() + .collect::>(); - Ok(Compilation { + Ok(Compilation::new( functions, - custom_sections: import_trampolines, + import_trampolines, function_call_trampolines, dynamic_function_trampolines, - debug: None, - trampolines: None, - }) + None, + None, + )) } } @@ -257,7 +221,6 @@ mod tests { CompileModuleInfo, ModuleTranslationState, PrimaryMap>, - finite_wasm::AnalysisOutcome, ) { let compile_info = CompileModuleInfo { features: Features::new(), @@ -267,19 +230,7 @@ mod tests { }; let module_translation = ModuleTranslationState::new(); let function_body_inputs = PrimaryMap::>::new(); - let analysis = finite_wasm::AnalysisOutcome { - function_frame_sizes: Vec::new(), - function_operand_stack_sizes: Vec::new(), - gas_offsets: Vec::new(), - gas_costs: Vec::new(), - gas_kinds: Vec::new(), - }; - ( - compile_info, - module_translation, - function_body_inputs, - analysis, - ) + (compile_info, module_translation, function_body_inputs) } #[test] @@ -297,15 +248,8 @@ mod tests { // Compile for 32bit Linux let linux32 = Target::new(triple!("i686-unknown-linux-gnu"), CpuFeature::for_host()); - let (mut info, translation, inputs, analysis) = dummy_compilation_ingredients(); - let result = compiler.compile_module( - &linux32, - &mut info, - &translation, - inputs, - &wasmer_vm::TestTunables, - &analysis, - ); + let (mut info, translation, inputs) = dummy_compilation_ingredients(); + let result = compiler.compile_module(&linux32, &mut info, &translation, inputs); match result.unwrap_err() { CompileError::UnsupportedTarget(name) => assert_eq!(name, "i686"), error => panic!("Unexpected error: {:?}", error), @@ -313,15 +257,8 @@ mod tests { // Compile for win32 let win32 = Target::new(triple!("i686-pc-windows-gnu"), CpuFeature::for_host()); - let (mut info, translation, inputs, analysis) = dummy_compilation_ingredients(); - let result = compiler.compile_module( - &win32, - &mut info, - &translation, - inputs, - &wasmer_vm::TestTunables, - &analysis, - ); + let (mut info, translation, inputs) = dummy_compilation_ingredients(); + let result = compiler.compile_module(&win32, &mut info, &translation, inputs); match result.unwrap_err() { CompileError::UnsupportedTarget(name) => assert_eq!(name, "i686"), // Windows should be checked before architecture error => panic!("Unexpected error: {:?}", error), diff --git a/lib/compiler-singlepass/src/emitter_x64.rs b/lib/compiler-singlepass/src/emitter_x64.rs index 79bd18a5cd..062e241e32 100644 --- a/lib/compiler-singlepass/src/emitter_x64.rs +++ b/lib/compiler-singlepass/src/emitter_x64.rs @@ -3,8 +3,6 @@ use dynasm::dynasm; use dynasmrt::{ x64::X64Relocation, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, VecAssembler, }; -use enumset::EnumSet; -use wasmer_compiler::CpuFeature; type Assembler = VecAssembler; @@ -34,7 +32,6 @@ pub(crate) enum Location { MemoryAddTriple(GPR, GPR, i32), } -#[allow(dead_code)] #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub(crate) enum Condition { None, @@ -290,7 +287,7 @@ pub(crate) trait Emitter { unimplemented!() } - fn arch_has_xzcnt(&self, _cpu: &EnumSet) -> bool { + fn arch_has_xzcnt(&self) -> bool { false } fn arch_emit_lzcnt(&mut self, _sz: Size, _src: Location, _dst: Location) { @@ -1422,26 +1419,6 @@ impl Emitter for Assembler { self.emit_jmp_location(Location::GPR(target)); } - fn arch_has_xzcnt(&self, cpu: &EnumSet) -> bool { - cpu.contains(CpuFeature::BMI1) && cpu.contains(CpuFeature::LZCNT) - } - - fn arch_emit_lzcnt(&mut self, sz: Size, src: Location, dst: Location) { - binop_gpr_gpr!(lzcnt, self, sz, src, dst, { - binop_mem_gpr!(lzcnt, self, sz, src, dst, { - panic!("singlepass can't emit LZCNT {:?} {:?} {:?}", sz, src, dst) - }) - }) - } - - fn arch_emit_tzcnt(&mut self, sz: Size, src: Location, dst: Location) { - binop_gpr_gpr!(tzcnt, self, sz, src, dst, { - binop_mem_gpr!(tzcnt, self, sz, src, dst, { - panic!("singlepass can't emit TZCNT {:?} {:?} {:?}", sz, src, dst) - }) - }) - } - fn arch_mov64_imm_offset(&self) -> usize { 2 } diff --git a/lib/compiler-singlepass/src/machine.rs b/lib/compiler-singlepass/src/machine.rs index 4f112f5d62..6cf51d0ca3 100644 --- a/lib/compiler-singlepass/src/machine.rs +++ b/lib/compiler-singlepass/src/machine.rs @@ -1,48 +1,29 @@ use crate::emitter_x64::*; use smallvec::smallvec; use smallvec::SmallVec; -use std::convert::TryFrom; -use wasmer_compiler::wasmparser::ValType as WpType; +use std::cmp; +use std::collections::HashSet; +use wasmer_compiler::wasmparser::Type as WpType; use wasmer_compiler::CallingConvention; const NATIVE_PAGE_SIZE: usize = 4096; -#[derive(Clone, Copy)] struct MachineStackOffset(usize); pub(crate) struct Machine { - used_gprs: u32, // Bitset for the used GPRs, 1 means used - used_xmms: u32, // Bitset for the used XMMs, 1 means used + used_gprs: HashSet, + used_xmms: HashSet, stack_offset: MachineStackOffset, save_area_offset: Option, - /// Memory location at which local variables begin. - /// - /// Populated in `init_locals`. - locals_offset: MachineStackOffset, -} - -/// Returns an u32 that has as 1 bits the ones matching registers passed as parameters -macro_rules! bitset_of_regs { - ($( $r:expr ),*) => {{ - $( (1u32 << ($r as u32)) )|* - }} } -// Note: the below asserts are because we currently use u32 for used_gprs and used_xmms -// Feel free to increase the number in this assert by making them bigger if needed -#[allow(dead_code)] -const _GPRS_FIT_IN_U32: () = assert!(GPR::num_gprs() <= 32); -#[allow(dead_code)] -const _XMMS_FIT_IN_U32: () = assert!(XMM::num_xmms() <= 32); - impl Machine { pub(crate) fn new() -> Self { Machine { - used_gprs: 0, - used_xmms: 0, + used_gprs: HashSet::new(), + used_xmms: HashSet::new(), stack_offset: MachineStackOffset(0), save_area_offset: None, - locals_offset: MachineStackOffset(0), } } @@ -50,42 +31,34 @@ impl Machine { self.stack_offset.0 } - fn get_used_in(mut v: u32, to_return_type: impl Fn(u8) -> T) -> Vec { - let mut n = 0u8; - let mut res = Vec::with_capacity(v.count_ones() as usize); - while v != 0 { - n += v.trailing_zeros() as u8; - res.push(to_return_type(n)); - v >>= v.trailing_zeros() + 1; - n += 1; - } - res - } - pub(crate) fn get_used_gprs(&self) -> Vec { - Self::get_used_in(self.used_gprs, |r| GPR::from_repr(r).unwrap()) + let mut result = self.used_gprs.iter().cloned().collect::>(); + result.sort_unstable(); + result } pub(crate) fn get_used_xmms(&self) -> Vec { - Self::get_used_in(self.used_xmms, |r| XMM::from_repr(r).unwrap()) + let mut result = self.used_xmms.iter().cloned().collect::>(); + result.sort_unstable(); + result } pub(crate) fn get_vmctx_reg() -> GPR { GPR::R15 } - fn pick_one_in(v: u32) -> Option { - let r = v.trailing_zeros() as u8; - (r != 32).then_some(r) - } - /// Picks an unused general purpose register for local/stack/argument use. /// /// This method does not mark the register as used. pub(crate) fn pick_gpr(&self) -> Option { use GPR::*; - const REGS: u32 = bitset_of_regs!(RSI, RDI, R8, R9, R10, R11); - Self::pick_one_in(!self.used_gprs & REGS).map(|r| GPR::from_repr(r).unwrap()) + static REGS: &[GPR] = &[RSI, RDI, R8, R9, R10, R11]; + for r in REGS { + if !self.used_gprs.contains(r) { + return Some(*r); + } + } + None } /// Picks an unused general purpose register for internal temporary use. @@ -93,61 +66,33 @@ impl Machine { /// This method does not mark the register as used. pub(crate) fn pick_temp_gpr(&self) -> Option { use GPR::*; - const REGS: u32 = bitset_of_regs!(RAX, RCX, RDX); - Self::pick_one_in(!self.used_gprs & REGS).map(|r| GPR::from_repr(r).unwrap()) - } - - fn get_gpr_used(&self, r: GPR) -> bool { - if 0 != (self.used_gprs & bitset_of_regs!(r)) { - true - } else { - false - } - } - - fn set_gpr_used(&mut self, r: GPR) { - self.used_gprs |= bitset_of_regs!(r); - } - - fn set_gpr_unused(&mut self, r: GPR) { - self.used_gprs &= !bitset_of_regs!(r); - } - - fn get_xmm_used(&self, r: XMM) -> bool { - if 0 != (self.used_xmms & bitset_of_regs!(r)) { - true - } else { - false + static REGS: &[GPR] = &[RAX, RCX, RDX]; + for r in REGS { + if !self.used_gprs.contains(r) { + return Some(*r); + } } - } - - fn set_xmm_used(&mut self, r: XMM) { - self.used_xmms |= bitset_of_regs!(r); - } - - fn set_xmm_unused(&mut self, r: XMM) { - self.used_xmms &= !bitset_of_regs!(r); + None } /// Acquires a temporary GPR. pub(crate) fn acquire_temp_gpr(&mut self) -> Option { let gpr = self.pick_temp_gpr(); if let Some(x) = gpr { - self.set_gpr_used(x); + self.used_gprs.insert(x); } gpr } /// Releases a temporary GPR. pub(crate) fn release_temp_gpr(&mut self, gpr: GPR) { - assert!(self.get_gpr_used(gpr)); - self.set_gpr_unused(gpr); + assert!(self.used_gprs.remove(&gpr)); } /// Specify that a given register is in use. pub(crate) fn reserve_unused_temp_gpr(&mut self, gpr: GPR) -> GPR { - assert!(!self.get_gpr_used(gpr)); - self.set_gpr_used(gpr); + assert!(!self.used_gprs.contains(&gpr)); + self.used_gprs.insert(gpr); gpr } @@ -156,8 +101,13 @@ impl Machine { /// This method does not mark the register as used. pub(crate) fn pick_xmm(&self) -> Option { use XMM::*; - const REGS: u32 = bitset_of_regs!(XMM3, XMM4, XMM5, XMM6, XMM7); - Self::pick_one_in(!self.used_xmms & REGS).map(|r| XMM::from_repr(r).unwrap()) + static REGS: &[XMM] = &[XMM3, XMM4, XMM5, XMM6, XMM7]; + for r in REGS { + if !self.used_xmms.contains(r) { + return Some(*r); + } + } + None } /// Picks an unused XMM register for internal temporary use. @@ -165,41 +115,27 @@ impl Machine { /// This method does not mark the register as used. pub(crate) fn pick_temp_xmm(&self) -> Option { use XMM::*; - const REGS: u32 = bitset_of_regs!(XMM0, XMM1, XMM2); - Self::pick_one_in(!self.used_xmms & REGS).map(|r| XMM::from_repr(r).unwrap()) + static REGS: &[XMM] = &[XMM0, XMM1, XMM2]; + for r in REGS { + if !self.used_xmms.contains(r) { + return Some(*r); + } + } + None } /// Acquires a temporary XMM register. pub(crate) fn acquire_temp_xmm(&mut self) -> Option { let xmm = self.pick_temp_xmm(); if let Some(x) = xmm { - self.set_xmm_used(x); + self.used_xmms.insert(x); } xmm } /// Releases a temporary XMM register. pub(crate) fn release_temp_xmm(&mut self, xmm: XMM) { - assert!(self.get_xmm_used(xmm)); - self.set_xmm_unused(xmm); - } - - fn increase_rsp(&mut self, a: &mut impl Emitter, sz: usize) { - a.emit_add( - Size::S64, - Location::Imm32(u32::try_from(sz).unwrap()), - Location::GPR(GPR::RSP), - ); - self.stack_offset.0 -= sz; - } - - fn decrease_rsp(&mut self, a: &mut impl Emitter, sz: usize) { - a.emit_sub( - Size::S64, - Location::Imm32(u32::try_from(sz).unwrap()), - Location::GPR(GPR::RSP), - ); - self.stack_offset.0 += sz; + assert_eq!(self.used_xmms.remove(&xmm), true); } /// Acquires locations from the machine state. @@ -226,22 +162,24 @@ impl Machine { let loc = if let Some(x) = loc { x } else { + self.stack_offset.0 += 8; delta_stack_offset += 8; - Location::Memory( - GPR::RBP, - -((self.stack_offset.0 + delta_stack_offset) as i32), - ) + Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) }; if let Location::GPR(x) = loc { - self.set_gpr_used(x); + self.used_gprs.insert(x); } else if let Location::XMM(x) = loc { - self.set_xmm_used(x); + self.used_xmms.insert(x); } ret.push(loc); } if delta_stack_offset != 0 { - self.decrease_rsp(assembler, delta_stack_offset); + assembler.emit_sub( + Size::S64, + Location::Imm32(delta_stack_offset as u32), + Location::GPR(GPR::RSP), + ); } if zeroed { for i in 0..tys.len() { @@ -257,22 +195,21 @@ impl Machine { for loc in locs.iter().rev() { match *loc { - Location::GPR(x) => { - assert!(self.get_gpr_used(x)); - self.set_gpr_unused(x); + Location::GPR(ref x) => { + assert_eq!(self.used_gprs.remove(x), true); } - Location::XMM(x) => { - assert!(self.get_xmm_used(x)); - self.set_xmm_unused(x); + Location::XMM(ref x) => { + assert_eq!(self.used_xmms.remove(x), true); } Location::Memory(GPR::RBP, x) => { if x >= 0 { unreachable!(); } let offset = (-x) as usize; - if offset != self.stack_offset.0 - delta_stack_offset { + if offset != self.stack_offset.0 { unreachable!(); } + self.stack_offset.0 -= 8; delta_stack_offset += 8; } _ => {} @@ -280,20 +217,22 @@ impl Machine { } if delta_stack_offset != 0 { - self.increase_rsp(assembler, delta_stack_offset); + assembler.emit_add( + Size::S64, + Location::Imm32(delta_stack_offset as u32), + Location::GPR(GPR::RSP), + ); } } pub(crate) fn release_locations_only_regs(&mut self, locs: &[Location]) { for loc in locs.iter().rev() { match *loc { - Location::GPR(x) => { - assert!(self.get_gpr_used(x)); - self.set_gpr_unused(x); + Location::GPR(ref x) => { + assert_eq!(self.used_gprs.remove(x), true); } - Location::XMM(x) => { - assert!(self.get_xmm_used(x)); - self.set_xmm_unused(x); + Location::XMM(ref x) => { + assert_eq!(self.used_xmms.remove(x), true); } _ => {} } @@ -313,15 +252,20 @@ impl Machine { unreachable!(); } let offset = (-x) as usize; - if offset != self.stack_offset.0 - delta_stack_offset { + if offset != self.stack_offset.0 { unreachable!(); } + self.stack_offset.0 -= 8; delta_stack_offset += 8; } } if delta_stack_offset != 0 { - self.increase_rsp(assembler, delta_stack_offset); + assembler.emit_add( + Size::S64, + Location::Imm32(delta_stack_offset as u32), + Location::GPR(GPR::RSP), + ); } } @@ -331,6 +275,7 @@ impl Machine { locs: &[Location], ) { let mut delta_stack_offset: usize = 0; + let mut stack_offset = self.stack_offset.0; for loc in locs.iter().rev() { if let Location::Memory(GPR::RBP, x) = *loc { @@ -338,15 +283,15 @@ impl Machine { unreachable!(); } let offset = (-x) as usize; - if offset != self.stack_offset.0 - delta_stack_offset { + if offset != stack_offset { unreachable!(); } + stack_offset -= 8; delta_stack_offset += 8; } } if delta_stack_offset != 0 { - // DO NOT use increase_rsp, as we don’t want to change stack_offset assembler.emit_add( Size::S64, Location::Imm32(delta_stack_offset as u32), @@ -355,127 +300,128 @@ impl Machine { } } - const LOCAL_REGISTERS: &'static [GPR] = &[GPR::R12, GPR::R13, GPR::R14, GPR::RBX]; - - pub(crate) fn get_local_location(&self, idx: u32) -> Location { - // NB: This calculation cannot reasonably overflow. `self.locals_offset` will typically be - // small (< 32), and `idx` is bounded to `51000` due to limits imposed by the wasmparser - // validator. We introduce a debug_assert here to ensure that `idx` never really exceeds - // some incredibly large value. - debug_assert!( - idx <= 999_999, - "this runtime can't deal with unreasonable number of locals" - ); - Self::LOCAL_REGISTERS - .get(idx as usize) - .map(|r| Location::GPR(*r)) - .unwrap_or_else(|| { - let local_offset = idx - .checked_sub(Self::LOCAL_REGISTERS.len() as u32) - .unwrap() - .wrapping_mul(8); - Location::Memory( - GPR::RBP, - (local_offset.wrapping_add(self.locals_offset.0 as u32) as i32).wrapping_neg(), - ) - }) - } - - // `setup_registers`, `init_locals`, `finalize_locals` and `restore_registers` work together, - // the first two making up the function prologue (with a stack check and gas charge in-between), - // and the second two making up the function epilogue (with the stack height reset in-between). - // - // Together, they build the following stack, with `N = min(n, LOCAL_REGISTERS.len())`: - // +-------------------+--------+ - // | Return Pointer | 8B | - // | Saved RBP | 8B | <- RBP - // | LOCAL_REGISTERS 0 | 8B | - // | ... | | - // | LOCAL_REGISTERS N | 8B | - // | Saved R15 | 8B | - // | (Win FastC) RDI | 8B | - // | (Win FastC) RSI | 8B | <- save_area_offset - // | Local 0 | 8B | <- locals_offset - // | ... | | - // | Local n | 8B | <- RSP, stack_offset (at end of init_locals, will keep moving during fn codegen) - // +-------------------+--------+ - pub(crate) fn setup_registers( + pub(crate) fn init_locals( &mut self, a: &mut E, - n: u32, - n_params: u32, + n: usize, + n_params: usize, calling_convention: CallingConvention, - ) { + ) -> Vec { + // Determine whether a local should be allocated on the stack. + fn is_local_on_stack(idx: usize) -> bool { + idx > 3 + } + + // Determine a local's location. + fn get_local_location(idx: usize, callee_saved_regs_size: usize) -> Location { + // Use callee-saved registers for the first locals. + match idx { + 0 => Location::GPR(GPR::R12), + 1 => Location::GPR(GPR::R13), + 2 => Location::GPR(GPR::R14), + 3 => Location::GPR(GPR::RBX), + _ => Location::Memory(GPR::RBP, -(((idx - 3) * 8 + callee_saved_regs_size) as i32)), + } + } + + // How many machine stack slots will all the locals use? + let num_mem_slots = (0..n).filter(|&x| is_local_on_stack(x)).count(); + // Total size (in bytes) of the pre-allocated "static area" for this function's // locals and callee-saved registers. let mut static_area_size: usize = 0; - // Space to clobber registers used for locals. - static_area_size += 8 * std::cmp::min(Self::LOCAL_REGISTERS.len(), n as usize); + // Callee-saved registers used for locals. + // Keep this consistent with the "Save callee-saved registers" code below. + for i in 0..n { + // If a local is not stored on stack, then it is allocated to a callee-saved register. + if !is_local_on_stack(i) { + static_area_size += 8; + } + } // Callee-saved R15 for vmctx. static_area_size += 8; - // Allocate the stack - self.decrease_rsp(a, static_area_size); + // For Windows ABI, save RDI and RSI + if calling_convention == CallingConvention::WindowsFastcall { + static_area_size += 8 * 2; + } + + // Total size of callee saved registers. + let callee_saved_regs_size = static_area_size; - // Save callee-saved registers - for (i, local_reg) in Self::LOCAL_REGISTERS.iter().take(n as usize).enumerate() { - a.emit_mov( - Size::S64, - Location::GPR(*local_reg), - Location::Memory(GPR::RBP, -((i + 1) as i32) * 8), - ); + // Now we can determine concrete locations for locals. + let locations: Vec = (0..n) + .map(|i| get_local_location(i, callee_saved_regs_size)) + .collect(); + + // Add size of locals on stack. + static_area_size += num_mem_slots * 8; + + // Allocate save area, without actually writing to it. + a.emit_sub( + Size::S64, + Location::Imm32(static_area_size as _), + Location::GPR(GPR::RSP), + ); + + // Save callee-saved registers. + for loc in locations.iter() { + if let Location::GPR(_) = *loc { + self.stack_offset.0 += 8; + a.emit_mov( + Size::S64, + *loc, + Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)), + ); + } } // Save R15 for vmctx use. + self.stack_offset.0 += 8; a.emit_mov( Size::S64, Location::GPR(GPR::R15), - Location::Memory(GPR::RSP, 0), + Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)), ); - // For Windows ABI, save RDI and RSI if calling_convention == CallingConvention::WindowsFastcall { - self.decrease_rsp(a, 8 * 2); - for (i, reg) in [GPR::RSI, GPR::RDI].iter().enumerate() { - a.emit_mov( - Size::S64, - Location::GPR(*reg), - Location::Memory(GPR::RSP, i as i32 * 8), - ); - } + // Save RDI + self.stack_offset.0 += 8; + a.emit_mov( + Size::S64, + Location::GPR(GPR::RDI), + Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)), + ); + // Save RSI + self.stack_offset.0 += 8; + a.emit_mov( + Size::S64, + Location::GPR(GPR::RSI), + Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)), + ); } // Save the offset of register save area. self.save_area_offset = Some(MachineStackOffset(self.stack_offset.0)); - // Load in-register parameters into the allocated locations for register parameters. + // Load in-register parameters into the allocated locations. // Locals are allocated on the stack from higher address to lower address, // so we won't skip the stack guard page here. - self.locals_offset = MachineStackOffset(self.stack_offset.0 + 8); // + 8 because locals_offset is supposed to point to 1st local - let params_size = (n_params as usize) - .saturating_sub(Self::LOCAL_REGISTERS.len()) - .checked_mul(8) - .unwrap(); - self.decrease_rsp(a, params_size); for i in 0..n_params { - // NB: the 0th parameter is used for passing around the internal VM data (vmctx). - let loc = Self::get_param_location((i + 1) as usize, calling_convention); - let local_loc = self.get_local_location(i); + let loc = Self::get_param_location(i + 1, calling_convention); match loc { Location::GPR(_) => { - a.emit_mov(Size::S64, loc, local_loc); + a.emit_mov(Size::S64, loc, locations[i]); } - // TODO: move Location::Memory args init into init_locals down below so it happens after instrumentation - // Registers *must* stay here because we’re using registers between setup_registers and init_locals - Location::Memory(_, _) => match local_loc { + Location::Memory(_, _) => match locations[i] { Location::GPR(_) => { - a.emit_mov(Size::S64, loc, local_loc); + a.emit_mov(Size::S64, loc, locations[i]); } Location::Memory(_, _) => { a.emit_mov(Size::S64, loc, Location::GPR(GPR::RAX)); - a.emit_mov(Size::S64, Location::GPR(GPR::RAX), local_loc); + a.emit_mov(Size::S64, Location::GPR(GPR::RAX), locations[i]); } _ => unreachable!(), }, @@ -489,69 +435,54 @@ impl Machine { Self::get_param_location(0, calling_convention), Location::GPR(GPR::R15), ); - } - - pub(crate) fn init_locals( - &mut self, - a: &mut E, - n: u32, - n_params: u32, - _calling_convention: CallingConvention, - ) { - let registers_remaining_for_locals = Self::LOCAL_REGISTERS - .len() - .saturating_sub(n_params as usize); - let locals_to_init = (n - n_params) as usize; - let locals_size = locals_to_init - .saturating_sub(registers_remaining_for_locals) - .checked_mul(8) - .unwrap(); - - // Allocate the stack, without actually writing to it. - self.decrease_rsp(a, locals_size); // Stack probe. // // `rep stosq` writes data from low address to high address and may skip the stack guard page. // so here we probe it explicitly when needed. for i in (n_params..n).step_by(NATIVE_PAGE_SIZE / 8).skip(1) { - a.emit_mov(Size::S64, Location::Imm32(0), self.get_local_location(i)); + a.emit_mov(Size::S64, Location::Imm32(0), locations[i]); } - // Initialize all remaining locals to zero. - // - // This is a little tricky, as we want to initialize all stack local slots, except for - // those that were already populated with function argument data. The complication is in - // the fact that we allocate some registers to the first couple local slots. - // - // First: handle the locals that are allocated to registers... - for local_reg_idx in Self::LOCAL_REGISTERS - .iter() - .skip(n_params as usize) - .take((n_params..n).len()) - { - a.emit_mov(Size::S64, Location::Imm32(0), Location::GPR(*local_reg_idx)); + // Initialize all normal locals to zero. + let mut init_stack_loc_cnt = 0; + let mut last_stack_loc = Location::Memory(GPR::RBP, i32::MAX); + for i in n_params..n { + match locations[i] { + Location::Memory(_, _) => { + init_stack_loc_cnt += 1; + last_stack_loc = cmp::min(last_stack_loc, locations[i]); + } + Location::GPR(_) => { + a.emit_mov(Size::S64, Location::Imm32(0), locations[i]); + } + _ => unreachable!(), + } } - // Second: handle the locals that are allocated to the stack. - let stack_loc_idxs = std::cmp::max(Self::LOCAL_REGISTERS.len() as u32, n_params)..n; - if stack_loc_idxs.len() > 0 { + if init_stack_loc_cnt > 0 { // Since these assemblies take up to 24 bytes, if more than 2 slots are initialized, then they are smaller. a.emit_mov( Size::S64, - Location::Imm64(stack_loc_idxs.len() as u64), + Location::Imm64(init_stack_loc_cnt as u64), Location::GPR(GPR::RCX), ); a.emit_xor(Size::S64, Location::GPR(GPR::RAX), Location::GPR(GPR::RAX)); - a.emit_lea( - Size::S64, - self.get_local_location(n - 1), - Location::GPR(GPR::RDI), - ); + a.emit_lea(Size::S64, last_stack_loc, Location::GPR(GPR::RDI)); a.emit_rep_stosq(); } + + // Add the size of all locals allocated to stack. + self.stack_offset.0 += static_area_size - callee_saved_regs_size; + + locations } - pub(crate) fn finalize_locals(&mut self, a: &mut E) { + pub(crate) fn finalize_locals( + &mut self, + a: &mut E, + locations: &[Location], + calling_convention: CallingConvention, + ) { // Unwind stack to the "save area". a.emit_lea( Size::S64, @@ -561,14 +492,7 @@ impl Machine { ), Location::GPR(GPR::RSP), ); - } - pub(crate) fn restore_registers( - &mut self, - a: &mut E, - calling_convention: CallingConvention, - local_count: u32, - ) { if calling_convention == CallingConvention::WindowsFastcall { // Restore RSI and RDI a.emit_pop(Size::S64, Location::GPR(GPR::RSI)); @@ -577,13 +501,11 @@ impl Machine { // Restore R15 used by vmctx. a.emit_pop(Size::S64, Location::GPR(GPR::R15)); - // Restore callee-saved registers that we used for locals. - for reg in Self::LOCAL_REGISTERS - .iter() - .take(local_count as usize) - .rev() - { - a.emit_pop(Size::S64, Location::GPR(*reg)); + // Restore callee-saved registers. + for loc in locations.iter().rev() { + if let Location::GPR(_) = *loc { + a.emit_pop(Size::S64, *loc); + } } } diff --git a/lib/compiler-singlepass/src/x64_decl.rs b/lib/compiler-singlepass/src/x64_decl.rs index 465a9f2a88..d1727a4f33 100644 --- a/lib/compiler-singlepass/src/x64_decl.rs +++ b/lib/compiler-singlepass/src/x64_decl.rs @@ -3,8 +3,8 @@ use wasmer_compiler::CallingConvention; use wasmer_types::Type; /// General-purpose registers. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, strum::FromRepr)] #[repr(u8)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub(crate) enum GPR { /// RAX register RAX, @@ -40,15 +40,10 @@ pub(crate) enum GPR { R15, } -impl GPR { - pub const fn num_gprs() -> usize { - GPR::R15 as usize + 1 - } -} - /// XMM registers. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, strum::FromRepr)] #[repr(u8)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[allow(dead_code)] pub(crate) enum XMM { /// XMM register 0 XMM0, @@ -84,12 +79,6 @@ pub(crate) enum XMM { XMM15, } -impl XMM { - pub const fn num_xmms() -> usize { - XMM::XMM15 as usize + 1 - } -} - /// A machine register under the x86-64 architecture. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub(crate) enum X64Register { diff --git a/lib/compiler/Cargo.toml b/lib/compiler/Cargo.toml index 48dae8e1bc..eafd4bf6f0 100644 --- a/lib/compiler/Cargo.toml +++ b/lib/compiler/Cargo.toml @@ -14,17 +14,15 @@ edition = "2018" name = "wasmer_compiler" [dependencies] -finite-wasm = "0.3.0" wasmer-vm = { path = "../vm", package = "wasmer-vm-near", version = "=2.4.0" } wasmer-types = { path = "../types", package = "wasmer-types-near", version = "=2.4.0", default-features = false } -wasmparser = { version = "0.99.0", optional = true, default-features = false } +wasmparser = { version = "0.78", optional = true, default-features = false } target-lexicon = { version = "0.12.2", default-features = false } enumset = "1.0" hashbrown = { version = "0.11", optional = true } thiserror = "1.0" smallvec = "1.6" rkyv = { version = "0.7.20" } -tracing = "0.1" [features] default = ["std" ] diff --git a/lib/compiler/README.md b/lib/compiler/README.md index 093c6afa62..ace37ae93b 100644 --- a/lib/compiler/README.md +++ b/lib/compiler/README.md @@ -9,6 +9,8 @@ compiler implementor. Here are some of the Compilers provided by Wasmer: * [Singlepass](https://github.com/wasmerio/wasmer/tree/master/lib/compiler-singlepass), +* [Cranelift](https://github.com/wasmerio/wasmer/tree/master/lib/compiler-cranelift), +* [LLVM](https://github.com/wasmerio/wasmer/tree/master/lib/compiler-llvm). ## How to create a compiler @@ -36,7 +38,6 @@ pub trait Compiler { module_translation: &ModuleTranslationState, // The list of function bodies function_body_inputs: PrimaryMap>, - instrumentation: &finite_wasm::Module, ) -> Result; } ``` diff --git a/lib/compiler/src/compiler.rs b/lib/compiler/src/compiler.rs index 2cd88f5ec0..3bd6a49c32 100644 --- a/lib/compiler/src/compiler.rs +++ b/lib/compiler/src/compiler.rs @@ -81,6 +81,7 @@ pub trait Compiler: Send { features: &Features, data: &'data [u8], ) -> Result<(), CompileError> { + let mut validator = Validator::new(); let wasm_features = WasmFeatures { bulk_memory: features.bulk_memory, threads: features.threads, @@ -88,19 +89,13 @@ pub trait Compiler: Send { multi_value: features.multi_value, simd: features.simd, tail_call: features.tail_call, + module_linking: features.module_linking, multi_memory: features.multi_memory, memory64: features.memory64, exceptions: features.exceptions, - floats: true, - component_model: false, - extended_const: false, - mutable_global: features.mutable_global, - relaxed_simd: false, - saturating_float_to_int: features.saturating_float_to_int, - sign_extension: features.sign_extension, - memory_control: false, + deterministic_only: false, }; - let mut validator = Validator::new_with_features(wasm_features); + validator.wasm_features(wasm_features); validator .validate_all(data) .map_err(|e| CompileError::Validate(format!("{}", e)))?; @@ -117,8 +112,6 @@ pub trait Compiler: Send { module_translation: &ModuleTranslationState, // The list of function bodies function_body_inputs: PrimaryMap>, - tunables: &dyn wasmer_vm::Tunables, - instrumentation: &finite_wasm::AnalysisOutcome, ) -> Result; /// Compiles a module into a native object file. diff --git a/lib/compiler/src/error.rs b/lib/compiler/src/error.rs index 6ecee73588..2423d79fe0 100644 --- a/lib/compiler/src/error.rs +++ b/lib/compiler/src/error.rs @@ -28,10 +28,6 @@ pub enum CompileError { #[cfg_attr(feature = "std", error("Validation error: {0}"))] Validate(String), - /// Finite-wasm failed to handle the module. - #[cfg_attr(feature = "std", error("Finite-wasm analysis error: {0}"))] - Analyze(finite_wasm::Error), - /// The compiler doesn't support a Wasm feature #[cfg_attr(feature = "std", error("Feature {0} is not yet supported"))] UnsupportedFeature(String), diff --git a/lib/compiler/src/function.rs b/lib/compiler/src/function.rs index 18b05a6c65..22633062f0 100644 --- a/lib/compiler/src/function.rs +++ b/lib/compiler/src/function.rs @@ -141,12 +141,12 @@ impl TrampolinesSection { #[derive(Debug, PartialEq, Eq)] pub struct Compilation { /// Compiled code for the function bodies. - pub functions: Functions, + functions: Functions, /// Custom sections for the module. /// It will hold the data, for example, for constants used in a /// function, global variables, rodata_64, hot/cold function partitioning, ... - pub custom_sections: CustomSections, + custom_sections: CustomSections, /// Trampolines to call a function defined locally in the wasm via a /// provided `Vec` of values. @@ -157,7 +157,7 @@ pub struct Compilation { /// let func = instance.exports.get_function("my_func"); /// func.call(&[Value::I32(1)]); /// ``` - pub function_call_trampolines: PrimaryMap, + function_call_trampolines: PrimaryMap, /// Trampolines to call a dynamic function defined in /// a host, from a Wasm module. @@ -178,11 +178,135 @@ pub struct Compilation { /// ``` /// /// Note: Dynamic function trampolines are only compiled for imported function types. - pub dynamic_function_trampolines: PrimaryMap, + dynamic_function_trampolines: PrimaryMap, /// Section ids corresponding to the Dwarf debug info - pub debug: Option, + debug: Option, /// Trampolines for the arch that needs it - pub trampolines: Option, + trampolines: Option, +} + +impl Compilation { + /// Creates a compilation artifact from a contiguous function buffer and a set of ranges + pub fn new( + functions: Functions, + custom_sections: CustomSections, + function_call_trampolines: PrimaryMap, + dynamic_function_trampolines: PrimaryMap, + debug: Option, + trampolines: Option, + ) -> Self { + Self { + functions, + custom_sections, + function_call_trampolines, + dynamic_function_trampolines, + debug, + trampolines, + } + } + + /// Gets the bytes of a single function + pub fn get(&self, func: LocalFunctionIndex) -> &CompiledFunction { + &self.functions[func] + } + + /// Gets the number of functions defined. + pub fn len(&self) -> usize { + self.functions.len() + } + + /// Returns whether there are no functions defined. + pub fn is_empty(&self) -> bool { + self.functions.is_empty() + } + + /// Gets functions relocations. + pub fn get_relocations(&self) -> PrimaryMap> { + self.functions + .iter() + .map(|(_, func)| func.relocations.clone()) + .collect::>() + } + + /// Gets functions bodies. + pub fn get_function_bodies(&self) -> PrimaryMap { + self.functions + .iter() + .map(|(_, func)| func.body.clone()) + .collect::>() + } + + /// Gets functions jump table offsets. + pub fn get_jt_offsets(&self) -> PrimaryMap { + self.functions + .iter() + .map(|(_, func)| func.jt_offsets.clone()) + .collect::>() + } + + /// Gets functions frame info. + pub fn get_frame_info(&self) -> PrimaryMap { + self.functions + .iter() + .map(|(_, func)| func.frame_info.clone()) + .collect::>() + } + + /// Gets function call trampolines. + pub fn get_function_call_trampolines(&self) -> PrimaryMap { + self.function_call_trampolines.clone() + } + + /// Gets function call trampolines. + pub fn get_dynamic_function_trampolines(&self) -> PrimaryMap { + self.dynamic_function_trampolines.clone() + } + + /// Gets custom section data. + pub fn get_custom_sections(&self) -> PrimaryMap { + self.custom_sections.clone() + } + + /// Gets relocations that apply to custom sections. + pub fn get_custom_section_relocations(&self) -> PrimaryMap> { + self.custom_sections + .iter() + .map(|(_, section)| section.relocations.clone()) + .collect::>() + } + + /// Returns the Dwarf info. + pub fn get_debug(&self) -> Option { + self.debug.clone() + } + + /// Returns the Trampolines info. + pub fn get_trampolines(&self) -> Option { + self.trampolines.clone() + } +} + +impl<'a> IntoIterator for &'a Compilation { + type IntoIter = Iter<'a>; + type Item = ::Item; + + fn into_iter(self) -> Self::IntoIter { + Iter { + iterator: self.functions.iter(), + } + } +} + +pub struct Iter<'a> { + iterator: <&'a Functions as IntoIterator>::IntoIter, +} + +impl<'a> Iterator for Iter<'a> { + type Item = &'a CompiledFunction; + + fn next(&mut self) -> Option { + self.iterator.next().map(|(_, b)| b) + } } diff --git a/lib/compiler/src/translator/environ.rs b/lib/compiler/src/translator/environ.rs index 529b800776..1fc214021a 100644 --- a/lib/compiler/src/translator/environ.rs +++ b/lib/compiler/src/translator/environ.rs @@ -59,7 +59,6 @@ impl<'data> ModuleEnvironment<'data> { /// Translate a wasm module using this environment. This consumes the /// `ModuleEnvironment` and produces a `ModuleInfoTranslation`. - #[tracing::instrument(skip_all)] pub fn translate(mut self, data: &'data [u8]) -> WasmResult> { assert!(self.module_translation_state.is_none()); let module_translation_state = translate_module(data, &mut self)?; diff --git a/lib/compiler/src/translator/error.rs b/lib/compiler/src/translator/error.rs index e61e88c388..07a6397d55 100644 --- a/lib/compiler/src/translator/error.rs +++ b/lib/compiler/src/translator/error.rs @@ -36,7 +36,7 @@ mod tests { let binary_reader_error = reader.read_bytes(10).unwrap_err(); match WasmError::from(binary_reader_error) { WasmError::InvalidWebAssembly { message, offset } => { - assert_eq!(message, "unexpected end-of-file"); + assert_eq!(message, "Unexpected EOF"); assert_eq!(offset, 0); } err => panic!("Unexpected error: {:?}", err), @@ -49,7 +49,7 @@ mod tests { let binary_reader_error = reader.read_bytes(10).unwrap_err(); match CompileError::from(binary_reader_error) { CompileError::Wasm(WasmError::InvalidWebAssembly { message, offset }) => { - assert_eq!(message, "unexpected end-of-file"); + assert_eq!(message, "Unexpected EOF"); assert_eq!(offset, 0); } err => panic!("Unexpected error: {:?}", err), diff --git a/lib/compiler/src/translator/module.rs b/lib/compiler/src/translator/module.rs index 84ba796838..823d77d700 100644 --- a/lib/compiler/src/translator/module.rs +++ b/lib/compiler/src/translator/module.rs @@ -15,7 +15,6 @@ use wasmparser::{NameSectionReader, Parser, Payload}; /// Translate a sequence of bytes forming a valid Wasm binary into a /// parsed ModuleInfo `ModuleTranslationState`. -#[tracing::instrument(skip_all)] pub fn translate_module<'data>( data: &'data [u8], environ: &mut ModuleEnvironment<'data>, @@ -24,7 +23,7 @@ pub fn translate_module<'data>( for payload in Parser::new(0).parse_all(data) { match payload? { - Payload::Version { .. } | Payload::End(_) => {} + Payload::Version { .. } | Payload::End => {} Payload::TypeSection(types) => { parse_type_section(types, &mut module_translation_state, environ)?; @@ -82,53 +81,22 @@ pub fn translate_module<'data>( environ.reserve_passive_data(count)?; } - Payload::InstanceSection(_) => { + Payload::InstanceSection(_) + | Payload::AliasSection(_) + | Payload::EventSection(_) + | Payload::ModuleSectionStart { .. } + | Payload::ModuleSectionEntry { .. } => { unimplemented!("module linking not implemented yet") } - Payload::TagSection(_) => { - unimplemented!("exception handling proposal is not implemented yet") - } - - Payload::CustomSection(reader) => { - if reader.name() == "name" { - parse_name_section( - NameSectionReader::new(reader.data(), reader.data_offset()), - environ, - )?; - } else { - environ.custom_section(reader.name(), reader.data())?; - } - } + Payload::CustomSection { + name: "name", + data, + data_offset, + .. + } => parse_name_section(NameSectionReader::new(data, data_offset)?, environ)?, - Payload::ModuleSection { .. } => unimplemented!("module sections not supported yet"), // which proposal is this coming from? - Payload::CoreTypeSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentInstanceSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentAliasSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentTypeSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentCanonicalSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentStartSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentImportSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } - Payload::ComponentExportSection { .. } => { - unimplemented!("component proposal is not implemented yet") - } + Payload::CustomSection { name, data, .. } => environ.custom_section(name, data)?, Payload::UnknownSection { .. } => unreachable!(), } diff --git a/lib/compiler/src/translator/sections.rs b/lib/compiler/src/translator/sections.rs index c0324b1a4c..9759c2d510 100644 --- a/lib/compiler/src/translator/sections.rs +++ b/lib/compiler/src/translator/sections.rs @@ -17,8 +17,6 @@ use crate::{WasmError, WasmResult}; use core::convert::TryFrom; use std::boxed::Box; use std::collections::HashMap; -use std::convert::TryInto; -use std::sync::Arc; use std::vec::Vec; use wasmer_types::entity::packed_option::ReservedValue; use wasmer_types::entity::EntityRef; @@ -27,23 +25,27 @@ use wasmer_types::{ MemoryIndex, MemoryType, Mutability, Pages, SignatureIndex, TableIndex, TableType, Type, V128, }; use wasmparser::{ - self, Data, DataKind, DataSectionReader, Element, ElementItems, ElementKind, - ElementSectionReader, Export, ExportSectionReader, ExternalKind, FunctionSectionReader, - GlobalSectionReader, GlobalType as WPGlobalType, ImportSectionReader, MemorySectionReader, - NameMap, NameSectionReader, Naming, Operator, TableSectionReader, Type as WPType, TypeRef, - TypeSectionReader, ValType as WPValType, + self, Data, DataKind, DataSectionReader, Element, ElementItem, ElementItems, ElementKind, + ElementSectionReader, Export, ExportSectionReader, ExternalKind, FuncType as WPFunctionType, + FunctionSectionReader, GlobalSectionReader, GlobalType as WPGlobalType, ImportSectionEntryType, + ImportSectionReader, MemorySectionReader, MemoryType as WPMemoryType, NameSectionReader, + Naming, NamingReader, Operator, TableSectionReader, TypeDef, TypeSectionReader, }; /// Helper function translating wasmparser types to Wasm Type. -pub fn wptype_to_type(ty: WPValType) -> WasmResult { +pub fn wptype_to_type(ty: wasmparser::Type) -> WasmResult { match ty { - WPValType::I32 => Ok(Type::I32), - WPValType::I64 => Ok(Type::I64), - WPValType::F32 => Ok(Type::F32), - WPValType::F64 => Ok(Type::F64), - WPValType::V128 => Ok(Type::V128), - WPValType::ExternRef => Ok(Type::ExternRef), - WPValType::FuncRef => Ok(Type::FuncRef), + wasmparser::Type::I32 => Ok(Type::I32), + wasmparser::Type::I64 => Ok(Type::I64), + wasmparser::Type::F32 => Ok(Type::F32), + wasmparser::Type::F64 => Ok(Type::F64), + wasmparser::Type::V128 => Ok(Type::V128), + wasmparser::Type::ExternRef => Ok(Type::ExternRef), + wasmparser::Type::FuncRef => Ok(Type::FuncRef), + ty => Err(wasm_unsupported!( + "wptype_to_type: wasmparser type {:?}", + ty + )), } } @@ -53,30 +55,28 @@ pub fn parse_type_section( module_translation_state: &mut ModuleTranslationState, environ: &mut ModuleEnvironment, ) -> WasmResult<()> { - let count = types.count(); + let count = types.get_count(); environ.reserve_signatures(count)?; for entry in types { - if let Ok(WPType::Func(t)) = entry { - let params: Box<[WPValType]> = t.params().into(); - let results: Box<[WPValType]> = t.results().into(); - let sig_params: Arc<[Type]> = params + if let Ok(TypeDef::Func(WPFunctionType { params, returns })) = entry { + let sig_params: Vec = params .iter() .map(|ty| { wptype_to_type(*ty) .expect("only numeric types are supported in function signatures") }) .collect(); - let sig_results: Arc<[Type]> = results + let sig_returns: Vec = returns .iter() .map(|ty| { wptype_to_type(*ty) .expect("only numeric types are supported in function signatures") }) .collect(); - let sig = FunctionType::new(sig_params, sig_results); + let sig = FunctionType::new(sig_params, sig_returns); environ.declare_signature(sig)?; - module_translation_state.wasm_types.push((params, results)); + module_translation_state.wasm_types.push((params, returns)); } else { unimplemented!("module linking not implemented yet") } @@ -90,34 +90,44 @@ pub fn parse_import_section<'data>( imports: ImportSectionReader<'data>, environ: &mut ModuleEnvironment<'data>, ) -> WasmResult<()> { - environ.reserve_imports(imports.count())?; + environ.reserve_imports(imports.get_count())?; for entry in imports { let import = entry?; let module_name = import.module; - let field_name = import.name; + let field_name = import.field; match import.ty { - TypeRef::Func(sig) => { + ImportSectionEntryType::Function(sig) => { environ.declare_func_import( SignatureIndex::from_u32(sig), module_name, - field_name, + field_name.unwrap_or_default(), )?; } - TypeRef::Memory(mem) => { - assert!(!mem.memory64, "64bit memory not implemented yet"); + ImportSectionEntryType::Module(_) + | ImportSectionEntryType::Instance(_) + | ImportSectionEntryType::Event(_) => { + unimplemented!("module linking not implemented yet") + } + ImportSectionEntryType::Memory(WPMemoryType::M32 { + limits: ref memlimits, + shared, + }) => { environ.declare_memory_import( MemoryType { - minimum: Pages(mem.initial.try_into().unwrap()), - maximum: mem.maximum.map(|m| Pages(m.try_into().unwrap())), - shared: mem.shared, + minimum: Pages(memlimits.initial), + maximum: memlimits.maximum.map(Pages), + shared, }, module_name, - field_name, + field_name.unwrap_or_default(), )?; } - TypeRef::Global(ref ty) => { + ImportSectionEntryType::Memory(WPMemoryType::M64 { .. }) => { + unimplemented!("64bit memory not implemented yet") + } + ImportSectionEntryType::Global(ref ty) => { environ.declare_global_import( GlobalType { ty: wptype_to_type(ty.content_type).unwrap(), @@ -128,21 +138,20 @@ pub fn parse_import_section<'data>( }, }, module_name, - field_name, + field_name.unwrap_or_default(), )?; } - TypeRef::Table(ref tab) => { + ImportSectionEntryType::Table(ref tab) => { environ.declare_table_import( TableType { ty: wptype_to_type(tab.element_type).unwrap(), - minimum: tab.initial, - maximum: tab.maximum, + minimum: tab.limits.initial, + maximum: tab.limits.maximum, }, module_name, - field_name, + field_name.unwrap_or_default(), )?; } - TypeRef::Tag(_) => panic!("exception handling proposal is not implemented yet"), } } @@ -155,7 +164,7 @@ pub fn parse_function_section( functions: FunctionSectionReader, environ: &mut ModuleEnvironment, ) -> WasmResult<()> { - let num_functions = functions.count(); + let num_functions = functions.get_count(); if num_functions == std::u32::MAX { // We reserve `u32::MAX` for our own use. return Err(WasmError::ImplLimitExceeded); @@ -176,14 +185,14 @@ pub fn parse_table_section( tables: TableSectionReader, environ: &mut ModuleEnvironment, ) -> WasmResult<()> { - environ.reserve_tables(tables.count())?; + environ.reserve_tables(tables.get_count())?; for entry in tables { let table = entry?; environ.declare_table(TableType { ty: wptype_to_type(table.element_type).unwrap(), - minimum: table.initial, - maximum: table.maximum, + minimum: table.limits.initial, + maximum: table.limits.maximum, })?; } @@ -195,17 +204,20 @@ pub fn parse_memory_section( memories: MemorySectionReader, environ: &mut ModuleEnvironment, ) -> WasmResult<()> { - environ.reserve_memories(memories.count())?; + environ.reserve_memories(memories.get_count())?; for entry in memories { - let mem = entry?; - assert!(!mem.memory64, "64bit memory not implemented yet"); - - environ.declare_memory(MemoryType { - minimum: Pages(mem.initial.try_into().unwrap()), - maximum: mem.maximum.map(|m| Pages(m.try_into().unwrap())), - shared: mem.shared, - })?; + let memory = entry?; + match memory { + WPMemoryType::M32 { limits, shared } => { + environ.declare_memory(MemoryType { + minimum: Pages(limits.initial), + maximum: limits.maximum.map(Pages), + shared, + })?; + } + WPMemoryType::M64 { .. } => unimplemented!("64bit memory not implemented yet"), + } } Ok(()) @@ -216,7 +228,7 @@ pub fn parse_global_section( globals: GlobalSectionReader, environ: &mut ModuleEnvironment, ) -> WasmResult<()> { - environ.reserve_globals(globals.count())?; + environ.reserve_globals(globals.get_count())?; for entry in globals { let wasmparser::Global { @@ -266,11 +278,11 @@ pub fn parse_export_section<'data>( exports: ExportSectionReader<'data>, environ: &mut ModuleEnvironment<'data>, ) -> WasmResult<()> { - environ.reserve_exports(exports.count())?; + environ.reserve_exports(exports.get_count())?; for entry in exports { let Export { - name, + field, ref kind, index, } = entry?; @@ -280,11 +292,22 @@ pub fn parse_export_section<'data>( // becomes a concern here. let index = index as usize; match *kind { - ExternalKind::Func => environ.declare_func_export(FunctionIndex::new(index), name)?, - ExternalKind::Table => environ.declare_table_export(TableIndex::new(index), name)?, - ExternalKind::Memory => environ.declare_memory_export(MemoryIndex::new(index), name)?, - ExternalKind::Global => environ.declare_global_export(GlobalIndex::new(index), name)?, - ExternalKind::Tag => panic!("exception handling proposal is not implemented yet"), + ExternalKind::Function => { + environ.declare_func_export(FunctionIndex::new(index), field)? + } + ExternalKind::Table => environ.declare_table_export(TableIndex::new(index), field)?, + ExternalKind::Memory => { + environ.declare_memory_export(MemoryIndex::new(index), field)? + } + ExternalKind::Global => { + environ.declare_global_export(GlobalIndex::new(index), field)? + } + ExternalKind::Type + | ExternalKind::Module + | ExternalKind::Instance + | ExternalKind::Event => { + unimplemented!("module linking not implemented yet") + } } } @@ -299,30 +322,16 @@ pub fn parse_start_section(index: u32, environ: &mut ModuleEnvironment) -> WasmR } fn read_elems(items: &ElementItems) -> WasmResult> { - match items.clone() { - ElementItems::Functions(items) => items - .into_iter() - .map(|v| v.map(FunctionIndex::from_u32).map_err(WasmError::from)) - .collect(), - ElementItems::Expressions(items) => { - let mut elems = Vec::with_capacity(usize::try_from(items.count()).unwrap()); - for item in items.into_iter() { - let mut reader = item?.get_operators_reader(); - let op = reader.read()?; - let end = reader.read()?; - reader.ensure_end()?; - use Operator::*; - match (op, end) { - (RefFunc { function_index }, End) => { - elems.push(FunctionIndex::from_u32(function_index)) - } - (RefNull { .. }, End) => elems.push(FunctionIndex::reserved_value()), - _ => todo!("unexpected syntax for elems item initializer"), - } - } - Ok(elems.into_boxed_slice()) - } + let items_reader = items.get_items_reader()?; + let mut elems = Vec::with_capacity(usize::try_from(items_reader.get_count()).unwrap()); + for item in items_reader { + let elem = match item? { + ElementItem::Null(_ty) => FunctionIndex::reserved_value(), + ElementItem::Func(index) => FunctionIndex::from_u32(index), + }; + elems.push(elem); } + Ok(elems.into_boxed_slice()) } /// Parses the Element section of the wasm module. @@ -330,13 +339,11 @@ pub fn parse_element_section<'data>( elements: ElementSectionReader<'data>, environ: &mut ModuleEnvironment, ) -> WasmResult<()> { - environ.reserve_table_initializers(elements.count())?; + environ.reserve_table_initializers(elements.get_count())?; for (index, entry) in elements.into_iter().enumerate() { - let Element { - kind, items, ty, .. - } = entry?; - if ty != WPValType::FuncRef { + let Element { kind, items, ty } = entry?; + if ty != wasmparser::Type::FuncRef { return Err(wasm_unsupported!( "unsupported table element type: {:?}", ty @@ -346,10 +353,10 @@ pub fn parse_element_section<'data>( match kind { ElementKind::Active { table_index, - offset_expr, + init_expr, } => { - let mut offset_expr_reader = offset_expr.get_binary_reader(); - let (base, offset) = match offset_expr_reader.read_operator()? { + let mut init_expr_reader = init_expr.get_binary_reader(); + let (base, offset) = match init_expr_reader.read_operator()? { Operator::I32Const { value } => (None, value as u32 as usize), Operator::GlobalGet { global_index } => { (Some(GlobalIndex::from_u32(global_index)), 0) @@ -383,17 +390,17 @@ pub fn parse_data_section<'data>( data: DataSectionReader<'data>, environ: &mut ModuleEnvironment<'data>, ) -> WasmResult<()> { - environ.reserve_data_initializers(data.count())?; + environ.reserve_data_initializers(data.get_count())?; for (index, entry) in data.into_iter().enumerate() { - let Data { kind, data, .. } = entry?; + let Data { kind, data } = entry?; match kind { DataKind::Active { memory_index, - offset_expr, + init_expr, } => { - let mut offset_expr_reader = offset_expr.get_binary_reader(); - let (base, offset) = match offset_expr_reader.read_operator()? { + let mut init_expr_reader = init_expr.get_binary_reader(); + let (base, offset) = match init_expr_reader.read_operator()? { Operator::I32Const { value } => (None, value as u32 as usize), Operator::GlobalGet { global_index } => { (Some(GlobalIndex::from_u32(global_index)), 0) @@ -427,40 +434,37 @@ pub fn parse_name_section<'data>( mut names: NameSectionReader<'data>, environ: &mut ModuleEnvironment<'data>, ) -> WasmResult<()> { - use wasmparser::Name; - while let Some(subsection) = names.next() { - let subsection = subsection?; + while let Ok(subsection) = names.read() { match subsection { - Name::Function(function_subsection) => { - if let Some(function_names) = parse_function_name_subsection(function_subsection) { + wasmparser::Name::Function(function_subsection) => { + if let Some(function_names) = function_subsection + .get_map() + .ok() + .and_then(parse_function_name_subsection) + { for (index, name) in function_names { environ.declare_function_name(index, name)?; } } } - Name::Module { name, .. } => { - environ.declare_module_name(name)?; + wasmparser::Name::Module(module) => { + if let Ok(name) = module.get_name() { + environ.declare_module_name(name)?; + } } - Name::Local(_) => {} - Name::Label(_) => {} - Name::Type(_) => {} - Name::Table(_) => {} - Name::Memory(_) => {} - Name::Global(_) => {} - Name::Element(_) => {} - Name::Data(_) => {} - Name::Unknown { .. } => {} + wasmparser::Name::Local(_) => {} + wasmparser::Name::Unknown { .. } => {} }; } Ok(()) } fn parse_function_name_subsection( - naming_reader: NameMap<'_>, + mut naming_reader: NamingReader<'_>, ) -> Option> { let mut function_names = HashMap::new(); - for name in naming_reader.into_iter() { - let Naming { index, name } = name.ok()?; + for _ in 0..naming_reader.get_count() { + let Naming { index, name } = naming_reader.read().ok()?; if index == std::u32::MAX { // We reserve `u32::MAX` for our own use. return None; diff --git a/lib/compiler/src/translator/state.rs b/lib/compiler/src/translator/state.rs index 032627e6c1..c8a2ff4bb7 100644 --- a/lib/compiler/src/translator/state.rs +++ b/lib/compiler/src/translator/state.rs @@ -1,6 +1,7 @@ // This file contains code from external sources. // Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md +use crate::{wasm_unsupported, WasmResult}; use std::boxed::Box; use std::collections::HashMap; use wasmer_types::entity::PrimaryMap; @@ -8,7 +9,7 @@ use wasmer_types::{FunctionIndex, ImportIndex, ModuleInfo, SignatureIndex}; /// Map of signatures to a function's parameter and return types. pub(crate) type WasmTypes = - PrimaryMap, Box<[wasmparser::ValType]>)>; + PrimaryMap, Box<[wasmparser::Type]>)>; /// Contains information decoded from the Wasm module that must be referenced /// during each Wasm function's translation. @@ -38,7 +39,6 @@ impl ModuleTranslationState { } /// Build map of imported functions names for intrinsification. - #[tracing::instrument(skip_all)] pub fn build_import_map(&mut self, module: &ModuleInfo) { for key in module.imports.keys() { let value = &module.imports[key]; @@ -52,4 +52,29 @@ impl ModuleTranslationState { } } } + + /// Get the parameter and result types for the given Wasm blocktype. + pub fn blocktype_params_results( + &self, + ty_or_ft: wasmparser::TypeOrFuncType, + ) -> WasmResult<(&[wasmparser::Type], &[wasmparser::Type])> { + Ok(match ty_or_ft { + wasmparser::TypeOrFuncType::Type(ty) => match ty { + wasmparser::Type::I32 => (&[], &[wasmparser::Type::I32]), + wasmparser::Type::I64 => (&[], &[wasmparser::Type::I64]), + wasmparser::Type::F32 => (&[], &[wasmparser::Type::F32]), + wasmparser::Type::F64 => (&[], &[wasmparser::Type::F64]), + wasmparser::Type::V128 => (&[], &[wasmparser::Type::V128]), + wasmparser::Type::ExternRef => (&[], &[wasmparser::Type::ExternRef]), + wasmparser::Type::FuncRef => (&[], &[wasmparser::Type::FuncRef]), + wasmparser::Type::EmptyBlockType => (&[], &[]), + ty => return Err(wasm_unsupported!("blocktype_params_results: type {:?}", ty)), + }, + wasmparser::TypeOrFuncType::FuncType(ty_index) => { + let sig_idx = SignatureIndex::from_u32(ty_index); + let (ref params, ref results) = self.wasm_types[sig_idx]; + (&*params, &*results) + } + }) + } } diff --git a/lib/derive/Cargo.toml b/lib/derive/Cargo.toml new file mode 100644 index 0000000000..fcf6375bb9 --- /dev/null +++ b/lib/derive/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "wasmer-derive-near" +version = "2.4.0" +description = "Wasmer derive macros" +authors = ["Wasmer Engineering Team "] +repository = "https://github.com/wasmerio/wasmer" +license = "MIT" +edition = "2018" + +[lib] +proc-macro = true +name = "wasmer_derive" + +[dependencies] +syn = { version = "1.0.72", features = ["full", "extra-traits"] } +quote = "1" +proc-macro2 = "1" +proc-macro-error = "1.0.0" + +[dev-dependencies] +wasmer = { path = "../api", version = "=2.4.0", package = "wasmer-near" } +compiletest_rs = "0.6" diff --git a/lib/derive/src/lib.rs b/lib/derive/src/lib.rs new file mode 100644 index 0000000000..445315df12 --- /dev/null +++ b/lib/derive/src/lib.rs @@ -0,0 +1,263 @@ +extern crate proc_macro; + +use proc_macro2::TokenStream; +use proc_macro_error::{abort, proc_macro_error, set_dummy}; +use quote::{quote, quote_spanned, ToTokens}; +use syn::{spanned::Spanned, *}; + +mod parse; + +use crate::parse::WasmerAttr; + +#[proc_macro_error] +#[proc_macro_derive(WasmerEnv, attributes(wasmer))] +pub fn derive_wasmer_env(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input: DeriveInput = syn::parse(input).unwrap(); + let gen = impl_wasmer_env(&input); + gen.into() +} + +fn impl_wasmer_env_for_struct( + name: &Ident, + data: &DataStruct, + generics: &Generics, + _attrs: &[Attribute], +) -> TokenStream { + let (trait_methods, helper_methods) = derive_struct_fields(data); + let lifetimes_and_generics = generics.params.clone(); + let where_clause = generics.where_clause.clone(); + quote! { + impl < #lifetimes_and_generics > ::wasmer::WasmerEnv for #name < #lifetimes_and_generics > #where_clause{ + #trait_methods + } + + #[allow(dead_code)] + impl < #lifetimes_and_generics > #name < #lifetimes_and_generics > #where_clause { + #helper_methods + } + } +} + +fn impl_wasmer_env(input: &DeriveInput) -> TokenStream { + let struct_name = &input.ident; + + set_dummy(quote! { + impl ::wasmer::WasmerEnv for #struct_name { + fn init_with_instance(&mut self, instance: &::wasmer::Instance) -> ::core::result::Result<(), ::wasmer::HostEnvInitError> { + Ok(()) + } + } + }); + + match &input.data { + Data::Struct(ds) => { + impl_wasmer_env_for_struct(struct_name, ds, &input.generics, &input.attrs) + } + _ => todo!(), + } + /*match input.data { + Struct(ds /*DataStruct { + fields: syn::Fields::Named(ref fields), + .. + }*/) => , + Enum(ref e) => impl_wasmer_env_for_enum(struct_name, &e.variants, &input.attrs), + _ => abort_call_site!("Clap only supports non-tuple structs and enums"), + }*/ +} + +fn derive_struct_fields(data: &DataStruct) -> (TokenStream, TokenStream) { + let mut finish = vec![]; + let mut helpers = vec![]; + //let mut assign_tokens = vec![]; + let mut touched_fields = vec![]; + let fields: Vec = match &data.fields { + Fields::Named(ref fields) => fields.named.iter().cloned().collect(), + Fields::Unit => vec![], + Fields::Unnamed(fields) => fields.unnamed.iter().cloned().collect(), + }; + for (field_num, f) in fields.into_iter().enumerate() { + let field_idx = syn::Index::from(field_num); + let name = f.ident.clone(); + let top_level_ty: &Type = &f.ty; + touched_fields.push(name.clone()); + let mut wasmer_attr = None; + for attr in &f.attrs { + // if / filter + if attr.path.is_ident(&Ident::new("wasmer", attr.span())) { + let tokens = attr.tokens.clone(); + match syn::parse2(tokens) { + Ok(attr) => { + wasmer_attr = Some(attr); + break; + } + Err(e) => { + abort!(attr, "Failed to parse `wasmer` attribute: {}", e); + } + } + } + } + + if let Some(wasmer_attr) = wasmer_attr { + let inner_type = get_identifier(top_level_ty); + if let Some(name) = &name { + let name_ref_str = format!("{}_ref", name); + let name_ref = syn::Ident::new(&name_ref_str, name.span()); + let name_ref_unchecked_str = format!("{}_ref_unchecked", name); + let name_ref_unchecked = syn::Ident::new(&name_ref_unchecked_str, name.span()); + let helper_tokens = quote_spanned! {f.span()=> + /// Get access to the underlying data. + /// + /// If `WasmerEnv::finish` has been called, this function will never + /// return `None` unless the underlying data has been mutated manually. + pub fn #name_ref(&self) -> Option<&#inner_type> { + self.#name.get_ref() + } + /// Gets the item without checking if it's been initialized. + /// + /// # Safety + /// `WasmerEnv::finish` must have been called on this function or + /// this type manually initialized. + pub unsafe fn #name_ref_unchecked(&self) -> &#inner_type { + self.#name.get_unchecked() + } + }; + helpers.push(helper_tokens); + } + match wasmer_attr { + WasmerAttr::Export { + identifier, + optional, + aliases, + span, + } => { + let finish_tokens = if let Some(name) = name { + let name_str = name.to_string(); + let item_name = + identifier.unwrap_or_else(|| LitStr::new(&name_str, name.span())); + let mut access_expr = quote_spanned! { + f.span() => + instance.get_with_generics_weak::<#inner_type, _, _>(#item_name) + }; + for alias in aliases { + access_expr = quote_spanned! { + f.span()=> + #access_expr .or_else(|_| instance.get_with_generics_weak::<#inner_type, _, _>(#alias)) + }; + } + if optional { + quote_spanned! { + f.span()=> + match #access_expr { + Ok(#name) => { self.#name.initialize(#name); }, + Err(_) => (), + }; + } + } else { + quote_spanned! { + f.span()=> + let #name: #inner_type = #access_expr?; + self.#name.initialize(#name); + } + } + } else if let Some(identifier) = identifier { + let mut access_expr = quote_spanned! { + f.span() => + instance.get_with_generics_weak::<#inner_type, _, _>(#identifier) + }; + for alias in aliases { + access_expr = quote_spanned! { + f.span()=> + #access_expr .or_else(|_| instance.get_with_generics_weak::<#inner_type, _, _>(#alias)) + }; + } + let local_var = + Ident::new(&format!("field_{}", field_num), identifier.span()); + if optional { + quote_spanned! { + f.span()=> + match #access_expr { + Ok(#local_var) => { + self.#field_idx.initialize(#local_var); + }, + Err(_) => (), + } + } + } else { + quote_spanned! { + f.span()=> + let #local_var: #inner_type = #access_expr?; + self.#field_idx.initialize(#local_var); + } + } + } else { + abort!( + span, + "Expected `name` field on export attribute because field does not have a name. For example: `#[wasmer(export(name = \"wasm_ident\"))]`.", + ); + }; + + finish.push(finish_tokens); + } + } + } + } + + let trait_methods = quote! { + fn init_with_instance(&mut self, instance: &::wasmer::Instance) -> ::core::result::Result<(), ::wasmer::HostEnvInitError> { + #(#finish)* + Ok(()) + } + }; + + let helper_methods = quote! { + #(#helpers)* + }; + + (trait_methods, helper_methods) +} + +// TODO: name this something that makes sense +fn get_identifier(ty: &Type) -> TokenStream { + match ty { + Type::Path(TypePath { + path: Path { segments, .. }, + .. + }) => { + if let Some(PathSegment { ident, arguments }) = segments.last() { + if ident != "LazyInit" { + abort!( + ident, + "WasmerEnv derive expects all `export`s to be wrapped in `LazyInit`" + ); + } + if let PathArguments::AngleBracketed(AngleBracketedGenericArguments { + args, .. + }) = arguments + { + // TODO: proper error handling + assert_eq!(args.len(), 1); + if let GenericArgument::Type(Type::Path(TypePath { + path: Path { segments, .. }, + .. + })) = &args[0] + { + segments + .last() + .expect("there must be at least one segment; TODO: error handling") + .to_token_stream() + } else { + abort!( + &args[0], + "unrecognized type in first generic position on `LazyInit`" + ); + } + } else { + abort!(arguments, "Expected a generic parameter on `LazyInit`"); + } + } else { + abort!(segments, "Unknown type found"); + } + } + _ => abort!(ty, "Unrecognized/unsupported type"), + } +} diff --git a/lib/derive/src/parse.rs b/lib/derive/src/parse.rs new file mode 100644 index 0000000000..a8feb9e948 --- /dev/null +++ b/lib/derive/src/parse.rs @@ -0,0 +1,143 @@ +use proc_macro2::Span; +use proc_macro_error::abort; +use syn::{ + parenthesized, + parse::{Parse, ParseStream}, + token, Ident, LitBool, LitStr, Token, +}; + +pub enum WasmerAttr { + Export { + /// The identifier is an override, otherwise we use the field name as the name + /// to lookup in `instance.exports`. + identifier: Option, + optional: bool, + aliases: Vec, + span: Span, + }, +} + +#[derive(Debug)] +struct ExportExpr { + name: Option, + optional: bool, + aliases: Vec, +} + +#[derive(Debug)] +struct ExportOptions { + name: Option, + optional: bool, + aliases: Vec, +} +impl Parse for ExportOptions { + fn parse(input: ParseStream<'_>) -> syn::Result { + let mut name = None; + let mut optional: bool = false; + let mut aliases: Vec = vec![]; + loop { + let ident = input.parse::()?; + let _ = input.parse::()?; + let ident_str = ident.to_string(); + + match ident_str.as_str() { + "name" => { + name = Some(input.parse::()?); + } + "optional" => { + optional = input.parse::()?.value; + } + "alias" => { + let alias = input.parse::()?; + aliases.push(alias); + } + otherwise => { + abort!( + ident, + "Unrecognized argument in export options: expected `name = \"string\"`, `optional = bool`, or `alias = \"string\"` found `{}`", + otherwise + ); + } + } + + match input.parse::() { + Ok(_) => continue, + Err(_) => break, + } + } + + Ok(ExportOptions { + name, + optional, + aliases, + }) + } +} + +impl Parse for ExportExpr { + fn parse(input: ParseStream<'_>) -> syn::Result { + let name; + let optional; + let aliases; + if input.peek(Ident) { + let options = input.parse::()?; + name = options.name; + optional = options.optional; + aliases = options.aliases; + } else { + name = None; + optional = false; + aliases = vec![]; + } + Ok(Self { + name, + optional, + aliases, + }) + } +} + +// allows us to handle parens more cleanly +struct WasmerAttrInner(WasmerAttr); + +impl Parse for WasmerAttrInner { + fn parse(input: ParseStream<'_>) -> syn::Result { + let ident: Ident = input.parse()?; + let ident_str = ident.to_string(); + let span = ident.span(); + let out = match ident_str.as_str() { + "export" => { + let export_expr; + let (name, optional, aliases) = if input.peek(token::Paren) { + let _: token::Paren = parenthesized!(export_expr in input); + + let expr = export_expr.parse::()?; + (expr.name, expr.optional, expr.aliases) + } else { + (None, false, vec![]) + }; + + WasmerAttr::Export { + identifier: name, + optional, + aliases, + span, + } + } + otherwise => abort!( + ident, + "Unexpected identifier `{}`. Expected `export`.", + otherwise + ), + }; + Ok(WasmerAttrInner(out)) + } +} + +impl Parse for WasmerAttr { + fn parse(input: ParseStream<'_>) -> syn::Result { + let attr_inner; + parenthesized!(attr_inner in input); + Ok(attr_inner.parse::()?.0) + } +} diff --git a/lib/derive/tests/basic.rs b/lib/derive/tests/basic.rs new file mode 100644 index 0000000000..4302253eb4 --- /dev/null +++ b/lib/derive/tests/basic.rs @@ -0,0 +1,119 @@ +#![allow(dead_code)] + +use wasmer::{Function, Global, LazyInit, Memory, NativeFunc, Table, WasmerEnv}; + +#[derive(WasmerEnv, Clone)] +struct MyEnv { + num: u32, + nums: Vec, +} + +fn impls_wasmer_env() -> bool { + true +} + +#[test] +fn test_derive() { + let _my_env = MyEnv { + num: 3, + nums: vec![1, 2, 3], + }; + assert!(impls_wasmer_env::()); +} + +#[derive(WasmerEnv, Clone)] +struct MyEnvWithMemory { + num: u32, + nums: Vec, + #[wasmer(export)] + memory: LazyInit, +} + +#[derive(WasmerEnv, Clone)] +struct MyEnvWithFuncs { + num: u32, + nums: Vec, + #[wasmer(export)] + memory: LazyInit, + #[wasmer(export)] + sum: LazyInit>, +} + +#[derive(WasmerEnv, Clone)] +struct MyEnvWithEverything { + num: u32, + nums: Vec, + #[wasmer(export)] + memory: LazyInit, + #[wasmer(export)] + sum: LazyInit>, + #[wasmer(export)] + multiply: LazyInit, + #[wasmer(export)] + counter: LazyInit, + #[wasmer(export)] + functions: LazyInit
, +} + +#[derive(WasmerEnv, Clone)] +struct MyEnvWithLifetime<'a> { + name: &'a str, + #[wasmer(export(name = "memory"))] + memory: LazyInit, +} + +#[derive(WasmerEnv, Clone)] +struct MyUnitStruct; + +#[derive(WasmerEnv, Clone)] +struct MyTupleStruct(u32); + +#[derive(WasmerEnv, Clone)] +struct MyTupleStruct2(u32, u32); + +#[derive(WasmerEnv, Clone)] +struct MyTupleStructWithAttribute(#[wasmer(export(name = "memory"))] LazyInit, u32); + +#[test] +fn test_derive_with_attribute() { + assert!(impls_wasmer_env::()); + assert!(impls_wasmer_env::()); + assert!(impls_wasmer_env::()); + assert!(impls_wasmer_env::()); + assert!(impls_wasmer_env::()); + assert!(impls_wasmer_env::()); + assert!(impls_wasmer_env::()); + assert!(impls_wasmer_env::()); +} + +#[derive(WasmerEnv, Clone)] +struct StructWithOptionalField { + #[wasmer(export(optional = true))] + memory: LazyInit, + #[wasmer(export(optional = true, name = "real_memory"))] + memory2: LazyInit, + #[wasmer(export(optional = false))] + memory3: LazyInit, +} + +#[test] +fn test_derive_with_optional() { + assert!(impls_wasmer_env::()); +} + +#[derive(WasmerEnv, Clone)] +struct StructWithAliases { + #[wasmer(export(alias = "_memory"))] + memory: LazyInit, + #[wasmer(export(alias = "_real_memory", optional = true, name = "real_memory"))] + memory2: LazyInit, + #[wasmer(export(alias = "_memory3", alias = "__memory3"))] + memory3: LazyInit, + #[wasmer(export(alias = "_memory3", name = "memory4", alias = "__memory3"))] + memory4: LazyInit, +} + +#[test] +fn test_derive_with_aliases() { + assert!(impls_wasmer_env::()); +} diff --git a/lib/derive/tests/compile-fail/bad-attribute.rs b/lib/derive/tests/compile-fail/bad-attribute.rs new file mode 100644 index 0000000000..9766dd10c1 --- /dev/null +++ b/lib/derive/tests/compile-fail/bad-attribute.rs @@ -0,0 +1,11 @@ +extern crate wasmer; + +use wasmer::{LazyInit, Memory, WasmerEnv}; + +#[derive(WasmerEnv)] +struct BadAttribute { + #[wasmer(extraport)] //~ Unexpected identifier `extraport`. Expected `export`. + memory: LazyInit, +} + +fn main() {} diff --git a/lib/derive/tests/compile-fail/bad-export-arg.rs b/lib/derive/tests/compile-fail/bad-export-arg.rs new file mode 100644 index 0000000000..e2e5e947df --- /dev/null +++ b/lib/derive/tests/compile-fail/bad-export-arg.rs @@ -0,0 +1,18 @@ +extern crate wasmer; + +use wasmer::{LazyInit, Memory, WasmerEnv}; + +#[derive(WasmerEnv)] +struct BadExportArg { + #[wasmer(export(this_is_not_a_real_argument = "hello, world"))] + //~ Unrecognized argument in export options: expected `name` found `this_is_not_a_real_argument + memory: LazyInit, +} + +#[derive(WasmerEnv)] +struct BadExportArgRawString { + #[wasmer(export("hello"))] //~ Failed to parse `wasmer` attribute: unexpected token + memory: LazyInit, +} + +fn main() {} diff --git a/lib/derive/tests/compile-fail/no-lazy-init.rs b/lib/derive/tests/compile-fail/no-lazy-init.rs new file mode 100644 index 0000000000..baeeb1e438 --- /dev/null +++ b/lib/derive/tests/compile-fail/no-lazy-init.rs @@ -0,0 +1,11 @@ +extern crate wasmer; + +use wasmer::{LazyInit, Memory, WasmerEnv}; + +#[derive(WasmerEnv)] +struct ExportNotWrappedInLazyInit { + #[wasmer(export)] + memory: Memory, //~ WasmerEnv derive expects all `export`s to be wrapped in `LazyInit` +} + +fn main() {} diff --git a/lib/derive/tests/compiletest.rs b/lib/derive/tests/compiletest.rs new file mode 100644 index 0000000000..ae91da95c9 --- /dev/null +++ b/lib/derive/tests/compiletest.rs @@ -0,0 +1,48 @@ +// file is a modified version of https://github.com/AltSysrq/proptest/blob/proptest-derive/proptest-derive/tests/compiletest.rs + +// Original copyright and license: +// Copyright 2018 The proptest developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Modifications copyright 2020 Wasmer +// Licensed under the MIT license + +extern crate compiletest_rs as ct; + +use std::env; + +fn run_mode(src: &'static str, mode: &'static str) { + let mut config = ct::Config::default(); + + config.mode = mode.parse().expect("invalid mode"); + config.target_rustcflags = Some("-L ../../target/debug/deps".to_owned()); + if let Ok(name) = env::var("TESTNAME") { + config.filters.push(name); + } + config.src_base = format!("tests/{}", src).into(); + + // hack to make this work on OSX: we probably don't need it though + /*if std::env::var("DYLD_LIBRARY_PATH").is_err() { + let val = std::env::var("DYLD_FALLBACK_LIBRARY_PATH").unwrap(); + std::env::set_var("DYLD_LIBRARY_PATH", val); + } + config.link_deps();*/ + + // Uncomment this if you have the "multiple crates named `wasmer` issue". Massively slows + // down test iteration though... + config.clean_rmeta(); + + ct::run_tests(&config); +} + +#[test] +#[ignore] // ignored by default because it needs to essentially run `cargo clean` to work correctly + // and that's really, really slow +fn compile_test() { + run_mode("compile-fail", "compile-fail"); +} diff --git a/lib/engine-universal/Cargo.toml b/lib/engine-universal/Cargo.toml index 6b11aa667d..39ca47e480 100644 --- a/lib/engine-universal/Cargo.toml +++ b/lib/engine-universal/Cargo.toml @@ -14,22 +14,17 @@ edition = "2018" name = "wasmer_engine_universal" [dependencies] -finite-wasm = "0.3.0" -wasmer-compiler = { path = "../compiler", version = "=2.4.0", package = "wasmer-compiler-near", features = ["translator"] } -wasmer-engine = { path = "../engine", package = "wasmer-engine-near", version = "=2.4.0" } wasmer-types = { path = "../types", version = "=2.4.0", package = "wasmer-types-near" } +wasmer-compiler = { path = "../compiler", version = "=2.4.0", package = "wasmer-compiler-near", features = ["translator"] } wasmer-vm = { path = "../vm", version = "=2.4.0", package = "wasmer-vm-near" } - -cfg-if = "1.0" -enumset = "1.0" +wasmer-engine = { path = "../engine", package = "wasmer-engine-near", version = "=2.4.0" } # flexbuffers = { path = "../../../flatbuffers/rust/flexbuffers", version = "0.1.0" } -leb128 = "0.2" -prefix-sum-vec = "0.1.1" region = "3.0" +cfg-if = "1.0" +leb128 = "0.2" rkyv = "0.7.31" +enumset = "1.0" thiserror = "1" -tracing = "0.1" -wasmparser = "0.99.0" [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3", features = ["winnt", "impl-default"] } diff --git a/lib/engine-universal/src/engine.rs b/lib/engine-universal/src/engine.rs index cc09653729..180ab26b30 100644 --- a/lib/engine-universal/src/engine.rs +++ b/lib/engine-universal/src/engine.rs @@ -89,19 +89,11 @@ impl UniversalEngine { /// Compile a WebAssembly binary #[cfg(feature = "compiler")] - #[tracing::instrument(skip_all)] pub fn compile_universal( &self, binary: &[u8], tunables: &dyn Tunables, ) -> Result { - // Compute the needed instrumentation - let instrumentation = finite_wasm::Analysis::new() - .with_stack(tunables.stack_limiter_cfg()) - .with_gas(tunables.gas_cfg()) - .analyze(binary) - .map_err(CompileError::Analyze)?; - let inner_engine = self.inner_mut(); let features = inner_engine.features(); let compiler = inner_engine.compiler()?; @@ -128,14 +120,7 @@ impl UniversalEngine { memory_styles, table_styles, }; - let wasmer_compiler::Compilation { - functions, - custom_sections, - function_call_trampolines, - dynamic_function_trampolines, - debug, - trampolines, - } = compiler.compile_module( + let compilation = compiler.compile_module( &self.target(), &compile_info, // SAFETY: Calling `unwrap` is correct since @@ -143,39 +128,27 @@ impl UniversalEngine { // `module_translation_state`. translation.module_translation_state.as_ref().unwrap(), translation.function_body_inputs, - tunables, - &instrumentation, )?; + let function_call_trampolines = compilation.get_function_call_trampolines(); + let dynamic_function_trampolines = compilation.get_dynamic_function_trampolines(); let data_initializers = translation .data_initializers .iter() .map(wasmer_types::OwnedDataInitializer::new) .collect(); - let mut function_frame_info = PrimaryMap::with_capacity(functions.len()); - let mut function_bodies = PrimaryMap::with_capacity(functions.len()); - let mut function_relocations = PrimaryMap::with_capacity(functions.len()); - let mut function_jt_offsets = PrimaryMap::with_capacity(functions.len()); - for (_, func) in functions.into_iter() { - function_bodies.push(func.body); - function_relocations.push(func.relocations); - function_jt_offsets.push(func.jt_offsets); - function_frame_info.push(func.frame_info); - } - let custom_section_relocations = custom_sections - .iter() - .map(|(_, section)| section.relocations.clone()) - .collect::>(); + + let frame_infos = compilation.get_frame_info(); Ok(crate::UniversalExecutable { - function_bodies, - function_relocations, - function_jt_offsets, - function_frame_info, + function_bodies: compilation.get_function_bodies(), + function_relocations: compilation.get_relocations(), + function_jt_offsets: compilation.get_jt_offsets(), + function_frame_info: frame_infos, function_call_trampolines, dynamic_function_trampolines, - custom_sections, - custom_section_relocations, - debug, - trampolines, + custom_sections: compilation.get_custom_sections(), + custom_section_relocations: compilation.get_custom_section_relocations(), + debug: compilation.get_debug(), + trampolines: compilation.get_trampolines(), compile_info, data_initializers, cpu_features: self.target().cpu_features().as_u64(), @@ -183,7 +156,6 @@ impl UniversalEngine { } /// Load a [`UniversalExecutable`](crate::UniversalExecutable) with this engine. - #[tracing::instrument(skip_all)] pub fn load_universal_executable( &self, executable: &UniversalExecutable, @@ -220,7 +192,7 @@ impl UniversalEngine { let signatures = module .signatures .iter() - .map(|(_, sig)| inner_engine.signatures.register(sig.clone())) + .map(|(_, sig)| inner_engine.signatures.register(sig.into())) .collect::>() .into_boxed_slice(); let (functions, trampolines, dynamic_trampolines, custom_sections) = inner_engine @@ -361,12 +333,7 @@ impl UniversalEngine { let signatures = module .signatures .values() - .map(|sig| { - let sig_ref = FunctionTypeRef::from(sig); - inner_engine - .signatures - .register(FunctionType::new(sig_ref.params(), sig_ref.results())) - }) + .map(|sig| inner_engine.signatures.register(sig.into())) .collect::>() .into_boxed_slice(); let (functions, trampolines, dynamic_trampolines, custom_sections) = inner_engine @@ -468,7 +435,7 @@ impl Engine for UniversalEngine { } /// Register a signature - fn register_signature(&self, func_type: FunctionType) -> VMSharedSignatureIndex { + fn register_signature(&self, func_type: FunctionTypeRef<'_>) -> VMSharedSignatureIndex { self.inner().signatures.register(func_type) } @@ -482,7 +449,6 @@ impl Engine for UniversalEngine { } /// Validates a WebAssembly module - #[tracing::instrument(skip_all)] fn validate(&self, binary: &[u8]) -> Result<(), CompileError> { self.inner().validate(binary) } @@ -501,7 +467,6 @@ impl Engine for UniversalEngine { /// Compile a WebAssembly binary #[cfg(feature = "compiler")] - #[tracing::instrument(skip_all)] fn compile( &self, binary: &[u8], @@ -511,7 +476,6 @@ impl Engine for UniversalEngine { .map(|ex| Box::new(ex) as _) } - #[tracing::instrument(skip_all)] fn load( &self, executable: &(dyn wasmer_engine::Executable), diff --git a/lib/engine-universal/src/lib.rs b/lib/engine-universal/src/lib.rs index 2a2d41f434..f4a2d8ebaa 100644 --- a/lib/engine-universal/src/lib.rs +++ b/lib/engine-universal/src/lib.rs @@ -6,6 +6,7 @@ #![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)] #![warn(unused_import_braces)] +#![warn(unsafe_op_in_unsafe_fn)] #![cfg_attr( feature = "cargo-clippy", allow(clippy::new_without_default, clippy::new_without_default) diff --git a/lib/engine-universal/src/link.rs b/lib/engine-universal/src/link.rs index e4d96ec44b..5cee1d8d2f 100644 --- a/lib/engine-universal/src/link.rs +++ b/lib/engine-universal/src/link.rs @@ -168,7 +168,6 @@ fn apply_relocation( /// Links a module, patching the allocated functions with the /// required relocations and jump tables. -#[tracing::instrument(skip_all)] pub fn link_module( allocated_functions: &PrimaryMap, jt_offsets: impl Fn(LocalFunctionIndex, JumpTable) -> wasmer_compiler::CodeOffset, diff --git a/lib/engine/Cargo.toml b/lib/engine/Cargo.toml index e7d4e65b5b..876c3ba187 100644 --- a/lib/engine/Cargo.toml +++ b/lib/engine/Cargo.toml @@ -14,7 +14,6 @@ edition = "2018" name = "wasmer_engine" [dependencies] -finite-wasm = "0.3.0" wasmer-types = { path = "../types", version = "=2.4.0", package = "wasmer-types-near" } wasmer-compiler = { path = "../compiler", version = "=2.4.0", package = "wasmer-compiler-near" } wasmer-vm = { path = "../vm", version = "=2.4.0", package = "wasmer-vm-near" } diff --git a/lib/engine/src/engine.rs b/lib/engine/src/engine.rs index dfa3570db2..2e4e48ffde 100644 --- a/lib/engine/src/engine.rs +++ b/lib/engine/src/engine.rs @@ -3,7 +3,7 @@ use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; use std::sync::Arc; use wasmer_compiler::{CompileError, Target}; -use wasmer_types::FunctionType; +use wasmer_types::{FunctionType, FunctionTypeRef}; use wasmer_vm::{Artifact, Tunables, VMCallerCheckedAnyfunc, VMFuncRef, VMSharedSignatureIndex}; mod private { @@ -21,7 +21,7 @@ pub trait Engine { fn target(&self) -> &Target; /// Register a signature - fn register_signature(&self, func_type: FunctionType) -> VMSharedSignatureIndex; + fn register_signature(&self, func_type: FunctionTypeRef<'_>) -> VMSharedSignatureIndex; /// Register a function's data. fn register_function_metadata(&self, func_data: VMCallerCheckedAnyfunc) -> VMFuncRef; diff --git a/lib/types/Cargo.toml b/lib/types/Cargo.toml index 36132ecc8b..0999a0aba2 100644 --- a/lib/types/Cargo.toml +++ b/lib/types/Cargo.toml @@ -16,17 +16,12 @@ name = "wasmer_types" [dependencies] thiserror = "1.0" indexmap = { version = "1.6" } -num-traits = "0.2.15" rkyv = { version = "0.7.20" } -[dev-dependencies] -bolero = "0.6.0" - [features] default = ["std"] std = [] core = [] -[[test]] -name = "partial-sum-map" -harness = false +# experimental / in-development features +experimental-reference-types-extern-ref = [] diff --git a/lib/types/src/extern_ref.rs b/lib/types/src/extern_ref.rs index fc14459fa3..85494baf6a 100644 --- a/lib/types/src/extern_ref.rs +++ b/lib/types/src/extern_ref.rs @@ -252,6 +252,7 @@ impl ExternRef { } } + #[cfg(feature = "experimental-reference-types-extern-ref")] /// Make a new extern reference pub fn new(value: T) -> Self where @@ -261,6 +262,21 @@ impl ExternRef { inner: VMExternRef::new(value), } } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + /// Try to downcast to the given value + pub fn downcast(&self) -> Option<&T> + where + T: Any + Send + Sync + 'static + Sized, + { + self.inner.downcast::() + } + + #[cfg(feature = "experimental-reference-types-extern-ref")] + /// Get the number of strong references to this data. + pub fn strong_count(&self) -> usize { + self.inner.strong_count() + } } impl From for ExternRef { diff --git a/lib/types/src/features.rs b/lib/types/src/features.rs index 9a6de02813..66d997dd8b 100644 --- a/lib/types/src/features.rs +++ b/lib/types/src/features.rs @@ -16,18 +16,14 @@ pub struct Features { pub multi_value: bool, /// Tail call proposal should be enabled pub tail_call: bool, + /// Module Linking proposal should be enabled + pub module_linking: bool, /// Multi Memory proposal should be enabled pub multi_memory: bool, /// 64-bit Memory proposal should be enabled pub memory64: bool, /// Wasm exceptions proposal should be enabled pub exceptions: bool, - /// Mutable global proposal should be enabled - pub mutable_global: bool, - /// Non-trapping float-to-int proposal should be enabled - pub saturating_float_to_int: bool, - /// Sign-extension operators should be enabled - pub sign_extension: bool, } impl Features { @@ -44,13 +40,10 @@ impl Features { // Multivalue should be on by default multi_value: true, tail_call: false, + module_linking: false, multi_memory: false, memory64: false, exceptions: false, - // these were once defaulting to true in wasmparser, we now set them to true here - mutable_global: true, - saturating_float_to_int: true, - sign_extension: true, } } @@ -170,6 +163,25 @@ impl Features { self } + /// Configures whether the WebAssembly module linking proposal will + /// be enabled. + /// + /// The [WebAssembly module linking proposal][proposal] is not + /// currently fully standardized and is undergoing development. + /// Support for this feature can be enabled through this method for + /// appropriate WebAssembly modules. + /// + /// This feature allows WebAssembly modules to define, import and + /// export modules and instances. + /// + /// This is `false` by default. + /// + /// [proposal]: https://github.com/webassembly/module-linking + pub fn module_linking(&mut self, enable: bool) -> &mut Self { + self.module_linking = enable; + self + } + /// Configures whether the WebAssembly multi-memory proposal will /// be enabled. /// @@ -230,12 +242,10 @@ mod test_features { bulk_memory: true, multi_value: true, tail_call: false, + module_linking: false, multi_memory: false, memory64: false, exceptions: false, - mutable_global: true, - saturating_float_to_int: true, - sign_extension: true, } ); } @@ -295,6 +305,13 @@ mod test_features { assert!(features.tail_call); } + #[test] + fn enable_module_linking() { + let mut features = Features::new(); + features.module_linking(true); + assert!(features.module_linking); + } + #[test] fn enable_multi_memory() { let mut features = Features::new(); diff --git a/lib/types/src/lib.rs b/lib/types/src/lib.rs index 75f8260424..bdccbe188b 100644 --- a/lib/types/src/lib.rs +++ b/lib/types/src/lib.rs @@ -62,7 +62,6 @@ mod initializers; mod memory_view; mod module; mod native; -pub mod partial_sum_map; mod types; mod units; mod values; diff --git a/lib/types/src/partial_sum_map.rs b/lib/types/src/partial_sum_map.rs deleted file mode 100644 index be8695c006..0000000000 --- a/lib/types/src/partial_sum_map.rs +++ /dev/null @@ -1,167 +0,0 @@ -//! Partial sum maps -//! -//! These maps allow you to efficiently store repeating sequences of a value. An example of such -//! sequence could be the list of locals for a webassembly function. -//! -//! Considering the locals example above, it might be represented as a `u32` partial sum of the -//! local’s index. The locals between the index of the previous element and the current element -//! have the `WpType` type. So, given -//! -//! (0, u32), (10, u64), (15, f64) -//! -//! then 0th local would be a u32, locals `1..=10` – u64 and locals `11..=15` – f64. -//! -//! The type of a given index can be quickly found with a binary search over the partial sum -//! field. - -/// A Map from keys to values that is able to efficiently store repeating occurences of the value. -/// -/// This map can only be appended to. -#[derive(Debug)] -pub struct PartialSumMap { - /// Keys between ((keys[n-1] + 1) or 0) and keys[n] (both included) have value values[n] - keys: Vec, - values: Vec, - size: K, -} - -impl PartialSumMap { - /// Create a new `PartialSumMap`. - /// - /// Does not allocate. - pub fn new() -> Self { - Self { - keys: vec![], - values: vec![], - size: K::zero(), - } - } - - /// Push `count` number of `value`s. - /// - /// `O(1)` amortized. - pub fn push(&mut self, count: K, value: V) -> Result<(), Error> { - if count != K::zero() { - self.size = self.size.checked_add(&count).ok_or(Error::Overflow)?; - self.keys.push(self.size.clone() - K::one()); - self.values.push(value); - } - Ok(()) - } - - /// Get the current maximum index that can be used with `find` for this map. - /// - /// Will return `None` if there are no elements in this map yet. - /// - /// `O(1)` - pub fn max_index(&self) -> Option { - self.keys.last().cloned() - } - - /// Get the current (virtual) size of this map. This is the sum of all `count` arguments passed to `push` until now. - /// - /// Note that the result can be greater than `usize::MAX` if eg. `K` is a BigInt type. Cast at your own risk. - /// - /// `O(1)` - pub fn size(&self) -> &K { - &self.size - } - - /// Find the value by the index. - /// - /// This is a `O(n log n)` operation. - pub fn find(&self, index: K) -> Option<&V> { - match self.keys.binary_search(&index) { - // If this index would be inserted at the end of the list, then the - // index is out of bounds and we return a None. - // - // If `Ok` is returned we found the index exactly, or if `Err` is - // returned the position is the one which is the least index - // greater than `idx`, which is still the type of `idx` according - // to our "compressed" representation. In both cases we access the - // list at index `i`. - Ok(i) | Err(i) => self.values.get(i), - } - } -} - -/// Errors that occur when using PartialSumMap. -#[derive(Debug, PartialEq, Eq)] -pub enum Error { - /// The partial sum has overflowed. - Overflow, -} - -impl std::error::Error for Error {} -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match self { - Self::Overflow => "partial sum overflow", - }) - } -} - -#[cfg(test)] -mod tests { - use super::{Error, PartialSumMap}; - - #[test] - fn empty_partial_map() { - let map = PartialSumMap::::new(); - assert_eq!(None, map.find(0)); - assert_eq!(0, *map.size()); - } - - #[test] - fn basic_function() { - let mut map = PartialSumMap::::new(); - assert_eq!(None, map.max_index()); - assert_eq!(0, *map.size()); - for i in 0..10 { - map.push(1, i).unwrap(); - assert_eq!(Some(i), map.max_index()); - assert_eq!(i + 1, *map.size()); - } - for i in 0..10 { - assert_eq!(Some(&i), map.find(i)); - } - assert_eq!(None, map.find(10)); - assert_eq!(None, map.find(0xFFFF_FFFF)); - } - - #[test] - fn zero_count() { - let mut map = PartialSumMap::::new(); - assert_eq!(Ok(()), map.push(0, 0)); - assert_eq!(None, map.max_index()); - assert_eq!(0, *map.size()); - assert_eq!(Ok(()), map.push(10, 42)); - assert_eq!(Some(9), map.max_index()); - assert_eq!(10, *map.size()); - assert_eq!(Ok(()), map.push(0, 43)); - assert_eq!(Some(9), map.max_index()); - assert_eq!(10, *map.size()); - } - - #[test] - fn close_to_limit() { - let mut map = PartialSumMap::::new(); - assert_eq!(Ok(()), map.push(0xFFFF_FFFE, 42)); // we added values 0..=0xFFFF_FFFD - assert_eq!(Some(&42), map.find(0xFFFF_FFFD)); - assert_eq!(None, map.find(0xFFFF_FFFE)); - - assert_eq!(Err(Error::Overflow), map.push(100, 93)); // overflowing does not change the map - assert_eq!(Some(&42), map.find(0xFFFF_FFFD)); - assert_eq!(None, map.find(0xFFFF_FFFE)); - - assert_eq!(Ok(()), map.push(1, 322)); // we added value at index 0xFFFF_FFFE (which is the 0xFFFF_FFFFth value) - assert_eq!(Some(&42), map.find(0xFFFF_FFFD)); - assert_eq!(Some(&322), map.find(0xFFFF_FFFE)); - assert_eq!(None, map.find(0xFFFF_FFFF)); - - assert_eq!(Err(Error::Overflow), map.push(1, 1234)); // can't add any more stuff... - assert_eq!(Some(&42), map.find(0xFFFF_FFFD)); - assert_eq!(Some(&322), map.find(0xFFFF_FFFE)); - assert_eq!(None, map.find(0xFFFF_FFFF)); - } -} diff --git a/lib/types/src/types.rs b/lib/types/src/types.rs index f645156ced..4ed63c576f 100644 --- a/lib/types/src/types.rs +++ b/lib/types/src/types.rs @@ -585,14 +585,17 @@ pub struct FastGasCounter { pub burnt_gas: u64, /// Hard gas limit for execution pub gas_limit: u64, + /// Single WASM opcode cost + pub opcode_cost: u64, } impl FastGasCounter { /// New fast gas counter. - pub fn new(limit: u64) -> Self { + pub fn new(limit: u64, opcode: u64) -> Self { FastGasCounter { burnt_gas: 0, gas_limit: limit, + opcode_cost: opcode, } } /// Amount of gas burnt, maybe load as atomic to avoid aliasing issues. @@ -603,7 +606,13 @@ impl FastGasCounter { impl fmt::Display for FastGasCounter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "burnt: {} limit: {} ", self.burnt(), self.gas_limit,) + write!( + f, + "burnt: {} limit: {} op_cost: {} ", + self.burnt(), + self.gas_limit, + self.opcode_cost + ) } } @@ -617,8 +626,8 @@ pub struct InstanceConfig { pub stack_limit: i32, } -// Default stack limit, in bytes. -const DEFAULT_STACK_LIMIT: i32 = 1024 * 1024; +// Default stack limit, in 8-byte stack slots. +const DEFAULT_STACK_LIMIT: i32 = 100 * 1024; impl InstanceConfig { /// Create default instance configuration. @@ -626,6 +635,7 @@ impl InstanceConfig { let result = Rc::new(UnsafeCell::new(FastGasCounter { burnt_gas: 0, gas_limit: u64::MAX, + opcode_cost: 0, })); Self { gas_counter: result.get(), diff --git a/lib/types/tests/partial-sum-map/corpus/.gitkeep b/lib/types/tests/partial-sum-map/corpus/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/lib/types/tests/partial-sum-map/crashes/.gitkeep b/lib/types/tests/partial-sum-map/crashes/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/lib/types/tests/partial-sum-map/main.rs b/lib/types/tests/partial-sum-map/main.rs deleted file mode 100644 index 4006946afb..0000000000 --- a/lib/types/tests/partial-sum-map/main.rs +++ /dev/null @@ -1,32 +0,0 @@ -use wasmer_types::partial_sum_map::{Error, PartialSumMap}; - -fn main() { - bolero::check!() - .with_type::<(Vec<(u32, u32)>, Vec)>() - .for_each(|input| { - let adds = &input.0; - let tests = &input.1; - let mut psm = PartialSumMap::new(); - let mut v = Vec::new(); - let mut size: u32 = 0; - for a in adds { - let push_res = psm.push(a.0, a.1); - match size.checked_add(a.0) { - None => { - assert_eq!(push_res, Err(Error::Overflow)); - continue; - } - Some(new_size) => { - assert_eq!(push_res, Ok(())); - size = new_size; - } - } - v.push(((size - a.0)..size, a.1)); - } - for t in tests { - let psm_answer = psm.find(*t); - let iset_answer = v.iter().find(|(r, _)| r.contains(&t)).map(|(_, v)| v); - assert_eq!(psm_answer, iset_answer); - } - }); -} diff --git a/lib/vm/Cargo.toml b/lib/vm/Cargo.toml index a7b652fa4f..6925c70f5f 100644 --- a/lib/vm/Cargo.toml +++ b/lib/vm/Cargo.toml @@ -14,19 +14,16 @@ edition = "2018" name = "wasmer_vm" [dependencies] -backtrace = "0.3" -cfg-if = "1.0" -finite-wasm = "0.3.0" -indexmap = { version = "1.6" } +wasmer-types = { path = "../types", package = "wasmer-types-near", version = "=2.4.0" } +region = "3.0" libc = { version = "^0.2", default-features = false } memoffset = "0.6" +indexmap = { version = "1.6" } +thiserror = "1.0" more-asserts = "0.2" -region = "3.0" +cfg-if = "1.0" +backtrace = "0.3" rkyv = { version = "0.7.20" } -thiserror = "1.0" -tracing = "0.1" -wasmer-types = { path = "../types", package = "wasmer-types-near", version = "=2.4.0" } -wasmparser = "0.99.0" [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3", features = ["winbase", "memoryapi", "errhandlingapi"] } diff --git a/lib/vm/src/instance/allocator.rs b/lib/vm/src/instance/allocator.rs index b192e40f56..83e3445c19 100644 --- a/lib/vm/src/instance/allocator.rs +++ b/lib/vm/src/instance/allocator.rs @@ -129,7 +129,7 @@ impl InstanceAllocator { /// memory, i.e. `Self.instance_ptr` must have been allocated by /// `Self::new`. unsafe fn memory_definition_locations(&self) -> Vec> { - let num_memories = self.offsets.num_local_memories(); + let num_memories = self.offsets.num_local_memories; let num_memories = usize::try_from(num_memories).unwrap(); let mut out = Vec::with_capacity(num_memories); @@ -163,7 +163,7 @@ impl InstanceAllocator { /// memory, i.e. `Self.instance_ptr` must have been allocated by /// `Self::new`. unsafe fn table_definition_locations(&self) -> Vec> { - let num_tables = self.offsets.num_local_tables(); + let num_tables = self.offsets.num_local_tables; let num_tables = usize::try_from(num_tables).unwrap(); let mut out = Vec::with_capacity(num_tables); diff --git a/lib/vm/src/instance/mod.rs b/lib/vm/src/instance/mod.rs index c25809ca28..2b272650e7 100644 --- a/lib/vm/src/instance/mod.rs +++ b/lib/vm/src/instance/mod.rs @@ -59,7 +59,7 @@ pub type ImportInitializerFuncPtr = /// to ensure that the `vmctx` field is last. See the documentation of /// the `vmctx` field to learn more. #[repr(C)] -pub struct Instance { +pub(crate) struct Instance { pub(crate) artifact: Arc, /// External configuration for instance. @@ -227,7 +227,15 @@ impl Instance { /// Return the indexed `VMMemoryImport`. fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport { let index = usize::try_from(index.as_u32()).unwrap(); - unsafe { &*self.imported_memories_ptr().add(index) } + let addr = unsafe { self.imported_memories_ptr().add(index) }; + let align = std::mem::align_of::(); + debug_assert!( + addr as usize % align == 0, + "VMMemoryImport addr is not aligned to {}: {:p}", + align, + addr + ); + unsafe { &*addr } } /// Return a pointer to the `VMMemoryImport`s. @@ -386,7 +394,7 @@ impl Instance { result } - pub fn reset_stack_meter(&self) { + fn reset_stack_meter(&self) { unsafe { *(self.stack_limit_ptr()) = *(self.stack_limit_initial_ptr()); } @@ -946,7 +954,7 @@ impl InstanceHandle { } /// Return a reference to the contained `Instance`. - pub fn instance(&self) -> &InstanceRef { + pub(crate) fn instance(&self) -> &InstanceRef { &self.instance } @@ -1172,7 +1180,6 @@ impl InstanceHandle { /// visible to code in `wasmer_vm`, so it's the caller's responsibility to ensure these /// functions are called with the correct type. /// - `instance_ptr` must point to a valid `wasmer::Instance`. -#[tracing::instrument(skip_all)] pub unsafe fn initialize_host_envs( handle: &std::sync::Mutex, instance_ptr: *const ffi::c_void, diff --git a/lib/vm/src/instance/ref.rs b/lib/vm/src/instance/ref.rs index f2d0f20712..fa8a1a2b6a 100644 --- a/lib/vm/src/instance/ref.rs +++ b/lib/vm/src/instance/ref.rs @@ -122,7 +122,7 @@ impl InstanceRef { /// Get a reference to the `Instance`. #[inline] - pub fn as_ref(&self) -> &Instance { + pub(crate) fn as_ref(&self) -> &Instance { (&*self.0).as_ref() } diff --git a/lib/vm/src/lib.rs b/lib/vm/src/lib.rs index 8bd07ce0fd..1317f270d6 100644 --- a/lib/vm/src/lib.rs +++ b/lib/vm/src/lib.rs @@ -59,7 +59,7 @@ pub use crate::resolver::{ pub use crate::sig_registry::{SignatureRegistry, VMSharedSignatureIndex}; pub use crate::table::{LinearTable, Table, TableElement, TableStyle}; pub use crate::trap::*; -pub use crate::tunables::{TestTunables, Tunables}; +pub use crate::tunables::Tunables; pub use crate::vmcontext::{ FunctionBodyPtr, FunctionExtent, SectionBodyPtr, VMBuiltinFunctionIndex, VMCallerCheckedAnyfunc, VMContext, VMDynamicFunctionContext, VMFunctionBody, diff --git a/lib/vm/src/sig_registry.rs b/lib/vm/src/sig_registry.rs index 2f8920e381..de0cc352ac 100644 --- a/lib/vm/src/sig_registry.rs +++ b/lib/vm/src/sig_registry.rs @@ -6,7 +6,7 @@ use std::collections::{hash_map, HashMap}; use std::convert::TryFrom; -use wasmer_types::FunctionType; +use wasmer_types::{FunctionType, FunctionTypeRef}; /// An index into the shared signature registry, usable for checking signatures /// at indirect calls. @@ -41,15 +41,16 @@ impl SignatureRegistry { } /// Register a signature and return its unique index. - pub fn register(&mut self, sig: FunctionType) -> VMSharedSignatureIndex { + pub fn register(&mut self, sig: FunctionTypeRef<'_>) -> VMSharedSignatureIndex { let len = self.index_to_data.len(); - // TODO(0-copy): this. should. not. allocate. (and take FunctionTypeRef as a parameter) + // TODO(0-copy): this. should. not. allocate. // // This is pretty hard to avoid, however. In order to implement bijective map, we'd want // a `Rc`, but indexing into a map keyed by `Rc` with // `FunctionTypeRef` is… not possible given the current API either. // // Consider `transmute` or `hashbrown`'s raw_entry. + let sig = FunctionType::new(sig.params(), sig.results()); match self.type_to_index.entry(sig.clone()) { hash_map::Entry::Occupied(entry) => *entry.get(), hash_map::Entry::Vacant(entry) => { diff --git a/lib/vm/src/tunables.rs b/lib/vm/src/tunables.rs index 0c5db975f3..c049b770c5 100644 --- a/lib/vm/src/tunables.rs +++ b/lib/vm/src/tunables.rs @@ -8,7 +8,7 @@ use wasmer_types::{MemoryType, TableType}; /// An engine delegates the creation of memories, tables, and globals /// to a foreign implementor of this trait. -pub trait Tunables: Sync { +pub trait Tunables { /// Construct a `MemoryStyle` for the provided `MemoryType` fn memory_style(&self, memory: &MemoryType) -> MemoryStyle; @@ -50,72 +50,4 @@ pub trait Tunables: Sync { style: &TableStyle, vm_definition_location: NonNull, ) -> Result, String>; - - /// Instrumentation configuration: stack limiter config - fn stack_limiter_cfg(&self) -> Box; - - /// Instrumentation configuration: gas accounting config - fn gas_cfg(&self) -> Box>; - - /// Cost for initializing a stack frame - fn stack_init_gas_cost(&self, frame_size: u64) -> u64; -} - -#[doc(hidden)] -pub struct TestTunables; - -impl Tunables for TestTunables { - fn memory_style(&self, _memory: &MemoryType) -> MemoryStyle { - unimplemented!() - } - - fn table_style(&self, _table: &TableType) -> TableStyle { - unimplemented!() - } - - fn create_host_memory( - &self, - _ty: &MemoryType, - _style: &MemoryStyle, - ) -> Result, MemoryError> { - unimplemented!() - } - - unsafe fn create_vm_memory( - &self, - _ty: &MemoryType, - _style: &MemoryStyle, - _vm_definition_location: NonNull, - ) -> Result, MemoryError> { - unimplemented!() - } - - fn create_host_table( - &self, - _ty: &TableType, - _style: &TableStyle, - ) -> Result, String> { - unimplemented!() - } - - unsafe fn create_vm_table( - &self, - _ty: &TableType, - _style: &TableStyle, - _vm_definition_location: NonNull, - ) -> Result, String> { - unimplemented!() - } - - fn stack_limiter_cfg(&self) -> Box { - unimplemented!() - } - - fn gas_cfg(&self) -> Box> { - unimplemented!() - } - - fn stack_init_gas_cost(&self, _frame_size: u64) -> u64 { - unimplemented!() - } } diff --git a/lib/vm/src/vmcontext.rs b/lib/vm/src/vmcontext.rs index 66c700ae7d..5eaa15ee82 100644 --- a/lib/vm/src/vmcontext.rs +++ b/lib/vm/src/vmcontext.rs @@ -1107,7 +1107,7 @@ impl VMContext { /// be a `VMContext` allocated as part of an `Instance`. #[allow(clippy::cast_ptr_alignment)] #[inline] - pub unsafe fn instance(&self) -> &Instance { + pub(crate) unsafe fn instance(&self) -> &Instance { &*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *const Instance) } diff --git a/lib/vm/src/vmoffsets.rs b/lib/vm/src/vmoffsets.rs index 74329b895b..ce5e10edad 100644 --- a/lib/vm/src/vmoffsets.rs +++ b/lib/vm/src/vmoffsets.rs @@ -9,6 +9,7 @@ use crate::VMBuiltinFunctionIndex; use more_asserts::assert_lt; use std::convert::TryFrom; +use std::mem::align_of; use wasmer_types::{ FunctionIndex, GlobalIndex, LocalGlobalIndex, LocalMemoryIndex, LocalTableIndex, MemoryIndex, ModuleInfo, SignatureIndex, TableIndex, @@ -24,7 +25,6 @@ fn cast_to_u32(sz: usize) -> u32 { } /// Align an offset used in this module to a specific byte-width by rounding up -#[inline] const fn align(offset: u32, width: u32) -> u32 { (offset + (width - 1)) / width * width } @@ -33,44 +33,28 @@ const fn align(offset: u32, width: u32) -> u32 { /// related structs that JIT code accesses directly. /// /// [`VMContext`]: crate::vmcontext::VMContext -// Invariant: the addresses always fit into an u32 without overflowing #[derive(Clone, Debug)] pub struct VMOffsets { /// The size in bytes of a pointer on the target. - pointer_size: u8, + pub pointer_size: u8, /// The number of signature declarations in the module. - num_signature_ids: u32, + pub num_signature_ids: u32, /// The number of imported functions in the module. - num_imported_functions: u32, + pub num_imported_functions: u32, /// The number of imported tables in the module. - num_imported_tables: u32, + pub num_imported_tables: u32, /// The number of imported memories in the module. - num_imported_memories: u32, + pub num_imported_memories: u32, /// The number of imported globals in the module. - num_imported_globals: u32, + pub num_imported_globals: u32, /// The number of defined tables in the module. - num_local_tables: u32, + pub num_local_tables: u32, /// The number of defined memories in the module. - num_local_memories: u32, + pub num_local_memories: u32, /// The number of defined globals in the module. - num_local_globals: u32, + pub num_local_globals: u32, /// If the module has trap handler. - has_trap_handlers: bool, - - vmctx_signature_ids_begin: u32, - vmctx_imported_functions_begin: u32, - vmctx_imported_tables_begin: u32, - vmctx_imported_memories_begin: u32, - vmctx_imported_globals_begin: u32, - vmctx_tables_begin: u32, - vmctx_memories_begin: u32, - vmctx_globals_begin: u32, - vmctx_builtin_functions_begin: u32, - vmctx_trap_handler_begin: u32, - vmctx_gas_limiter_pointer: u32, - vmctx_stack_limit_begin: u32, - vmctx_stack_limit_initial_begin: u32, - size_of_vmctx: u32, + pub has_trap_handlers: bool, } impl VMOffsets { @@ -90,20 +74,6 @@ impl VMOffsets { num_local_memories: 0, num_local_globals: 0, has_trap_handlers: false, - vmctx_signature_ids_begin: 0, - vmctx_imported_functions_begin: 0, - vmctx_imported_tables_begin: 0, - vmctx_imported_memories_begin: 0, - vmctx_imported_globals_begin: 0, - vmctx_tables_begin: 0, - vmctx_memories_begin: 0, - vmctx_globals_begin: 0, - vmctx_builtin_functions_begin: 0, - vmctx_trap_handler_begin: 0, - vmctx_gas_limiter_pointer: 0, - vmctx_stack_limit_begin: 0, - vmctx_stack_limit_initial_begin: 0, - size_of_vmctx: 0, } } @@ -116,7 +86,6 @@ impl VMOffsets { } /// Add imports and locals from the provided ModuleInfo. - #[tracing::instrument(skip_all)] pub fn with_module_info(mut self, module: &ModuleInfo) -> Self { self.num_imported_functions = module.import_counts.functions; self.num_imported_tables = module.import_counts.tables; @@ -128,7 +97,6 @@ impl VMOffsets { self.num_local_memories = cast_to_u32(module.memories.len()); self.num_local_globals = cast_to_u32(module.globals.len()); self.has_trap_handlers = true; - self.precompute(); self } @@ -144,89 +112,8 @@ impl VMOffsets { self.num_local_memories = cast_to_u32(module.memories.len()); self.num_local_globals = cast_to_u32(module.globals.len()); self.has_trap_handlers = true; - self.precompute(); self } - - /// Number of local tables defined in the module - pub fn num_local_tables(&self) -> u32 { - self.num_local_tables - } - - /// Number of local memories defined in the module - pub fn num_local_memories(&self) -> u32 { - self.num_local_memories - } - - fn precompute(&mut self) { - /// Offset base by num_items items of size item_size, panicking on overflow - fn offset_by(base: u32, num_items: u32, item_size: u32) -> u32 { - base.checked_add(num_items.checked_mul(item_size).unwrap()) - .unwrap() - } - - self.vmctx_signature_ids_begin = 0; - self.vmctx_imported_functions_begin = offset_by( - self.vmctx_signature_ids_begin, - self.num_signature_ids, - u32::from(self.size_of_vmshared_signature_index()), - ); - self.vmctx_imported_tables_begin = offset_by( - self.vmctx_imported_functions_begin, - self.num_imported_functions, - u32::from(self.size_of_vmfunction_import()), - ); - self.vmctx_imported_memories_begin = offset_by( - self.vmctx_imported_tables_begin, - self.num_imported_tables, - u32::from(self.size_of_vmtable_import()), - ); - self.vmctx_imported_globals_begin = offset_by( - self.vmctx_imported_memories_begin, - self.num_imported_memories, - u32::from(self.size_of_vmmemory_import()), - ); - self.vmctx_tables_begin = offset_by( - self.vmctx_imported_globals_begin, - self.num_imported_globals, - u32::from(self.size_of_vmglobal_import()), - ); - self.vmctx_memories_begin = offset_by( - self.vmctx_tables_begin, - self.num_local_tables, - u32::from(self.size_of_vmtable_definition()), - ); - self.vmctx_globals_begin = align( - offset_by( - self.vmctx_memories_begin, - self.num_local_memories, - u32::from(self.size_of_vmmemory_definition()), - ), - 16, - ); - self.vmctx_builtin_functions_begin = offset_by( - self.vmctx_globals_begin, - self.num_local_globals, - u32::from(self.size_of_vmglobal_local()), - ); - self.vmctx_trap_handler_begin = offset_by( - self.vmctx_builtin_functions_begin, - VMBuiltinFunctionIndex::builtin_functions_total_number(), - u32::from(self.pointer_size), - ); - self.vmctx_gas_limiter_pointer = offset_by( - self.vmctx_trap_handler_begin, - if self.has_trap_handlers { 1 } else { 0 }, - u32::from(self.pointer_size), - ); - self.vmctx_stack_limit_begin = offset_by( - self.vmctx_gas_limiter_pointer, - 1, - u32::from(self.pointer_size), - ); - self.vmctx_stack_limit_initial_begin = self.vmctx_stack_limit_begin.checked_add(4).unwrap(); - self.size_of_vmctx = self.vmctx_stack_limit_begin.checked_add(4).unwrap(); - } } /// Offsets for [`VMFunctionImport`]. @@ -491,247 +378,373 @@ impl VMOffsets { } } +/// Offset base by num_items items of size item_size, panicking on overflow +fn offset_by(base: u32, num_items: u32, prev_item_size: u32, next_item_align: usize) -> u32 { + align( + base.checked_add(num_items.checked_mul(prev_item_size).unwrap()) + .unwrap(), + next_item_align as u32, + ) +} + /// Offsets for [`VMContext`]. /// /// [`VMContext`]: crate::vmcontext::VMContext impl VMOffsets { /// The offset of the `signature_ids` array. - #[inline] pub fn vmctx_signature_ids_begin(&self) -> u32 { - self.vmctx_signature_ids_begin + 0 } /// The offset of the `tables` array. + #[allow(clippy::erasing_op)] pub fn vmctx_imported_functions_begin(&self) -> u32 { - self.vmctx_imported_functions_begin + offset_by( + self.vmctx_signature_ids_begin(), + self.num_signature_ids, + u32::from(self.size_of_vmshared_signature_index()), + align_of::(), + ) } /// The offset of the `tables` array. #[allow(clippy::identity_op)] pub fn vmctx_imported_tables_begin(&self) -> u32 { - self.vmctx_imported_tables_begin + offset_by( + self.vmctx_imported_functions_begin(), + self.num_imported_functions, + u32::from(self.size_of_vmfunction_import()), + align_of::(), + ) } /// The offset of the `memories` array. pub fn vmctx_imported_memories_begin(&self) -> u32 { - self.vmctx_imported_memories_begin + offset_by( + self.vmctx_imported_tables_begin(), + self.num_imported_tables, + u32::from(self.size_of_vmtable_import()), + align_of::(), + ) } /// The offset of the `globals` array. pub fn vmctx_imported_globals_begin(&self) -> u32 { - self.vmctx_imported_globals_begin + offset_by( + self.vmctx_imported_memories_begin(), + self.num_imported_memories, + u32::from(self.size_of_vmmemory_import()), + align_of::(), + ) } /// The offset of the `tables` array. pub fn vmctx_tables_begin(&self) -> u32 { - self.vmctx_tables_begin + offset_by( + self.vmctx_imported_globals_begin(), + self.num_imported_globals, + u32::from(self.size_of_vmglobal_import()), + align_of::(), + ) } /// The offset of the `memories` array. pub fn vmctx_memories_begin(&self) -> u32 { - self.vmctx_memories_begin + offset_by( + self.vmctx_tables_begin(), + self.num_local_tables, + u32::from(self.size_of_vmtable_definition()), + align_of::(), + ) } /// The offset of the `globals` array. pub fn vmctx_globals_begin(&self) -> u32 { - self.vmctx_globals_begin + offset_by( + self.vmctx_memories_begin(), + self.num_local_memories, + u32::from(self.size_of_vmmemory_definition()), + align_of::(), + ) } /// The offset of the builtin functions array. pub fn vmctx_builtin_functions_begin(&self) -> u32 { - self.vmctx_builtin_functions_begin + offset_by( + self.vmctx_globals_begin(), + self.num_local_globals, + u32::from(self.size_of_vmglobal_local()), + align_of::(), + ) } /// The offset of the trap handler. pub fn vmctx_trap_handler_begin(&self) -> u32 { - self.vmctx_trap_handler_begin + offset_by( + self.vmctx_builtin_functions_begin(), + VMBuiltinFunctionIndex::builtin_functions_total_number(), + u32::from(self.pointer_size), + align_of::(), + ) } /// The offset of the gas limiter pointer. pub fn vmctx_gas_limiter_pointer(&self) -> u32 { - self.vmctx_gas_limiter_pointer + offset_by( + self.vmctx_trap_handler_begin(), + if self.has_trap_handlers { 1 } else { 0 }, + u32::from(self.pointer_size), + align_of::<*mut wasmer_types::FastGasCounter>(), + ) } /// The offset of the current stack limit. pub fn vmctx_stack_limit_begin(&self) -> u32 { - self.vmctx_stack_limit_begin + offset_by( + self.vmctx_gas_limiter_pointer(), + 1, + u32::from(self.pointer_size), + align_of::(), + ) } /// The offset of the initial stack limit. pub fn vmctx_stack_limit_initial_begin(&self) -> u32 { - self.vmctx_stack_limit_initial_begin + self.vmctx_stack_limit_begin().checked_add(4).unwrap() } /// Return the size of the [`VMContext`] allocation. /// /// [`VMContext`]: crate::vmcontext::VMContext pub fn size_of_vmctx(&self) -> u32 { - self.size_of_vmctx + self.vmctx_stack_limit_initial_begin() + .checked_add(4) + .unwrap() } /// Return the offset to [`VMSharedSignatureIndex`] index `index`. /// /// [`VMSharedSignatureIndex`]: crate::vmcontext::VMSharedSignatureIndex - // Remember updating precompute upon changes pub fn vmctx_vmshared_signature_id(&self, index: SignatureIndex) -> u32 { assert_lt!(index.as_u32(), self.num_signature_ids); - self.vmctx_signature_ids_begin - + index.as_u32() * u32::from(self.size_of_vmshared_signature_index()) + self.vmctx_signature_ids_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmshared_signature_index())) + .unwrap(), + ) + .unwrap() } /// Return the offset to [`VMFunctionImport`] index `index`. /// /// [`VMFunctionImport`]: crate::vmcontext::VMFunctionImport - // Remember updating precompute upon changes pub fn vmctx_vmfunction_import(&self, index: FunctionIndex) -> u32 { assert_lt!(index.as_u32(), self.num_imported_functions); - self.vmctx_imported_functions_begin - + index.as_u32() * u32::from(self.size_of_vmfunction_import()) + self.vmctx_imported_functions_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmfunction_import())) + .unwrap(), + ) + .unwrap() } /// Return the offset to [`VMTableImport`] index `index`. /// /// [`VMTableImport`]: crate::vmcontext::VMTableImport - // Remember updating precompute upon changes pub fn vmctx_vmtable_import(&self, index: TableIndex) -> u32 { assert_lt!(index.as_u32(), self.num_imported_tables); - self.vmctx_imported_tables_begin + index.as_u32() * u32::from(self.size_of_vmtable_import()) + self.vmctx_imported_tables_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmtable_import())) + .unwrap(), + ) + .unwrap() } /// Return the offset to [`VMMemoryImport`] index `index`. /// /// [`VMMemoryImport`]: crate::vmcontext::VMMemoryImport - // Remember updating precompute upon changes pub fn vmctx_vmmemory_import(&self, index: MemoryIndex) -> u32 { assert_lt!(index.as_u32(), self.num_imported_memories); - self.vmctx_imported_memories_begin - + index.as_u32() * u32::from(self.size_of_vmmemory_import()) + self.vmctx_imported_memories_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmmemory_import())) + .unwrap(), + ) + .unwrap() } /// Return the offset to [`VMGlobalImport`] index `index`. /// /// [`VMGlobalImport`]: crate::vmcontext::VMGlobalImport - // Remember updating precompute upon changes pub fn vmctx_vmglobal_import(&self, index: GlobalIndex) -> u32 { assert_lt!(index.as_u32(), self.num_imported_globals); - self.vmctx_imported_globals_begin - + index.as_u32() * u32::from(self.size_of_vmglobal_import()) + self.vmctx_imported_globals_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmglobal_import())) + .unwrap(), + ) + .unwrap() } /// Return the offset to [`VMTableDefinition`] index `index`. /// /// [`VMTableDefinition`]: crate::vmcontext::VMTableDefinition - // Remember updating precompute upon changes pub fn vmctx_vmtable_definition(&self, index: LocalTableIndex) -> u32 { assert_lt!(index.as_u32(), self.num_local_tables); - self.vmctx_tables_begin + index.as_u32() * u32::from(self.size_of_vmtable_definition()) + self.vmctx_tables_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmtable_definition())) + .unwrap(), + ) + .unwrap() } /// Return the offset to [`VMMemoryDefinition`] index `index`. /// /// [`VMMemoryDefinition`]: crate::vmcontext::VMMemoryDefinition - // Remember updating precompute upon changes pub fn vmctx_vmmemory_definition(&self, index: LocalMemoryIndex) -> u32 { assert_lt!(index.as_u32(), self.num_local_memories); - self.vmctx_memories_begin + index.as_u32() * u32::from(self.size_of_vmmemory_definition()) + self.vmctx_memories_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmmemory_definition())) + .unwrap(), + ) + .unwrap() } /// Return the offset to the [`VMGlobalDefinition`] index `index`. /// /// [`VMGlobalDefinition`]: crate::vmcontext::VMGlobalDefinition - // Remember updating precompute upon changes pub fn vmctx_vmglobal_definition(&self, index: LocalGlobalIndex) -> u32 { assert_lt!(index.as_u32(), self.num_local_globals); - self.vmctx_globals_begin + index.as_u32() * u32::from(self.size_of_vmglobal_local()) + self.vmctx_globals_begin() + .checked_add( + index + .as_u32() + .checked_mul(u32::from(self.size_of_vmglobal_local())) + .unwrap(), + ) + .unwrap() } /// Return the offset to the `body` field in `*const VMFunctionBody` index `index`. - // Remember updating precompute upon changes pub fn vmctx_vmfunction_import_body(&self, index: FunctionIndex) -> u32 { - self.vmctx_vmfunction_import(index) + u32::from(self.vmfunction_import_body()) + self.vmctx_vmfunction_import(index) + .checked_add(u32::from(self.vmfunction_import_body())) + .unwrap() } /// Return the offset to the `vmctx` field in `*const VMFunctionBody` index `index`. - // Remember updating precompute upon changes pub fn vmctx_vmfunction_import_vmctx(&self, index: FunctionIndex) -> u32 { - self.vmctx_vmfunction_import(index) + u32::from(self.vmfunction_import_vmctx()) + self.vmctx_vmfunction_import(index) + .checked_add(u32::from(self.vmfunction_import_vmctx())) + .unwrap() } /// Return the offset to the `definition` field in [`VMTableImport`] index `index`. /// /// [`VMTableImport`]: crate::vmcontext::VMTableImport - // Remember updating precompute upon changes pub fn vmctx_vmtable_import_definition(&self, index: TableIndex) -> u32 { - self.vmctx_vmtable_import(index) + u32::from(self.vmtable_import_definition()) + self.vmctx_vmtable_import(index) + .checked_add(u32::from(self.vmtable_import_definition())) + .unwrap() } /// Return the offset to the `base` field in [`VMTableDefinition`] index `index`. /// /// [`VMTableDefinition`]: crate::vmcontext::VMTableDefinition - // Remember updating precompute upon changes pub fn vmctx_vmtable_definition_base(&self, index: LocalTableIndex) -> u32 { - self.vmctx_vmtable_definition(index) + u32::from(self.vmtable_definition_base()) + self.vmctx_vmtable_definition(index) + .checked_add(u32::from(self.vmtable_definition_base())) + .unwrap() } /// Return the offset to the `current_elements` field in [`VMTableDefinition`] index `index`. /// /// [`VMTableDefinition`]: crate::vmcontext::VMTableDefinition - // Remember updating precompute upon changes pub fn vmctx_vmtable_definition_current_elements(&self, index: LocalTableIndex) -> u32 { - self.vmctx_vmtable_definition(index) + u32::from(self.vmtable_definition_current_elements()) + self.vmctx_vmtable_definition(index) + .checked_add(u32::from(self.vmtable_definition_current_elements())) + .unwrap() } /// Return the offset to the `from` field in [`VMMemoryImport`] index `index`. /// /// [`VMMemoryImport`]: crate::vmcontext::VMMemoryImport - // Remember updating precompute upon changes pub fn vmctx_vmmemory_import_definition(&self, index: MemoryIndex) -> u32 { - self.vmctx_vmmemory_import(index) + u32::from(self.vmmemory_import_definition()) + self.vmctx_vmmemory_import(index) + .checked_add(u32::from(self.vmmemory_import_definition())) + .unwrap() } /// Return the offset to the `vmctx` field in [`VMMemoryImport`] index `index`. /// /// [`VMMemoryImport`]: crate::vmcontext::VMMemoryImport - // Remember updating precompute upon changes pub fn vmctx_vmmemory_import_from(&self, index: MemoryIndex) -> u32 { - self.vmctx_vmmemory_import(index) + u32::from(self.vmmemory_import_from()) + self.vmctx_vmmemory_import(index) + .checked_add(u32::from(self.vmmemory_import_from())) + .unwrap() } /// Return the offset to the `base` field in [`VMMemoryDefinition`] index `index`. /// /// [`VMMemoryDefinition`]: crate::vmcontext::VMMemoryDefinition - // Remember updating precompute upon changes pub fn vmctx_vmmemory_definition_base(&self, index: LocalMemoryIndex) -> u32 { - self.vmctx_vmmemory_definition(index) + u32::from(self.vmmemory_definition_base()) + self.vmctx_vmmemory_definition(index) + .checked_add(u32::from(self.vmmemory_definition_base())) + .unwrap() } /// Return the offset to the `current_length` field in [`VMMemoryDefinition`] index `index`. /// /// [`VMMemoryDefinition`]: crate::vmcontext::VMMemoryDefinition - // Remember updating precompute upon changes pub fn vmctx_vmmemory_definition_current_length(&self, index: LocalMemoryIndex) -> u32 { - self.vmctx_vmmemory_definition(index) + u32::from(self.vmmemory_definition_current_length()) + self.vmctx_vmmemory_definition(index) + .checked_add(u32::from(self.vmmemory_definition_current_length())) + .unwrap() } /// Return the offset to the `from` field in [`VMGlobalImport`] index `index`. /// /// [`VMGlobalImport`]: crate::vmcontext::VMGlobalImport - // Remember updating precompute upon changes pub fn vmctx_vmglobal_import_definition(&self, index: GlobalIndex) -> u32 { - self.vmctx_vmglobal_import(index) + u32::from(self.vmglobal_import_definition()) + self.vmctx_vmglobal_import(index) + .checked_add(u32::from(self.vmglobal_import_definition())) + .unwrap() } /// Return the offset to builtin function in `VMBuiltinFunctionsArray` index `index`. - // Remember updating precompute upon changes pub fn vmctx_builtin_function(&self, index: VMBuiltinFunctionIndex) -> u32 { - self.vmctx_builtin_functions_begin + index.index() * u32::from(self.pointer_size) + self.vmctx_builtin_functions_begin() + .checked_add( + index + .index() + .checked_mul(u32::from(self.pointer_size)) + .unwrap(), + ) + .unwrap() } /// Return the offset to the trap handler. pub fn vmctx_trap_handler(&self) -> u32 { // Ensure that we never ask for trap handler offset if it's not enabled. assert!(self.has_trap_handlers); - self.vmctx_trap_handler_begin + self.vmctx_trap_handler_begin() } } diff --git a/rust-toolchain b/rust-toolchain index 9cf4011bde..e01e6c121d 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.66 +1.56 diff --git a/scripts/publish.py b/scripts/publish.py index b5e53eaccd..d38dc8d81f 100644 --- a/scripts/publish.py +++ b/scripts/publish.py @@ -26,28 +26,42 @@ # TODO: generate this by parsing toml files dep_graph = { "wasmer-types": set([]), + "wasmer-derive": set([]), "wasmer-vm": set(["wasmer-types"]), "wasmer-compiler": set(["wasmer-vm", "wasmer-types"]), "wasmer-object": set(["wasmer-types", "wasmer-compiler"]), "wasmer-engine": set(["wasmer-types", "wasmer-vm", "wasmer-compiler"]), "wasmer-compiler-singlepass": set(["wasmer-types", "wasmer-vm", "wasmer-compiler"]), + "wasmer-compiler-cranelift": set(["wasmer-types", "wasmer-vm", "wasmer-compiler"]), + "wasmer-compiler-llvm": set(["wasmer-types", "wasmer-vm", "wasmer-compiler"]), "wasmer-engine-universal": set(["wasmer-types", "wasmer-vm", "wasmer-compiler", "wasmer-engine"]), - "wasmer": set(["wasmer-vm", "wasmer-compiler-singlepass", - "wasmer-compiler", "wasmer-engine", "wasmer-engine-universal", - "wasmer-types"]), + "wasmer-engine-dylib": set(["wasmer-types", "wasmer-vm", "wasmer-compiler", "wasmer-engine", + "wasmer-object"]), + "wasmer-engine-staticlib": set(["wasmer-types", "wasmer-vm", "wasmer-compiler", "wasmer-engine", + "wasmer-object"]), + "wasmer": set(["wasmer-vm", "wasmer-compiler-singlepass", "wasmer-compiler-cranelift", + "wasmer-compiler-llvm", "wasmer-compiler", "wasmer-engine", "wasmer-engine-universal", + "wasmer-engine-dylib", "wasmer-engine-staticlib", "wasmer-types", "wasmer-derive"]), + "wasmer-cache": set(["wasmer"]), } # where each crate is located in the `lib` directory # TODO: this could also be generated from the toml files location = { "wasmer-types": "types", + "wasmer-derive": "derive", "wasmer-vm": "vm", "wasmer-compiler": "compiler", "wasmer-object": "object", "wasmer-engine": "engine", "wasmer-compiler-singlepass": "compiler-singlepass", + "wasmer-compiler-cranelift": "compiler-cranelift", + "wasmer-compiler-llvm": "compiler-llvm", "wasmer-engine": "engine", "wasmer-engine-universal": "engine-universal", + "wasmer-engine-dylib": "engine-dylib", + "wasmer-engine-staticlib": "engine-staticlib", + "wasmer-cache": "cache", "wasmer": "api", } diff --git a/scripts/update-version.sh b/scripts/update-version.sh index 24a0cb50f9..8e28cd484b 100755 --- a/scripts/update-version.sh +++ b/scripts/update-version.sh @@ -18,6 +18,8 @@ echo "manually check changes to wasmer.iss" ## wasmer-types ## win-exception-handler ## compiler +## compiler-cranelift +## compiler-llvm ## compiler-singlepass ## emscripten ## wasi diff --git a/tests/compilers/config.rs b/tests/compilers/config.rs index a5e3b5d77c..7c00f818f3 100644 --- a/tests/compilers/config.rs +++ b/tests/compilers/config.rs @@ -2,6 +2,8 @@ use wasmer::{CompilerConfig, Engine as WasmerEngine, Features, Store}; #[derive(Clone, Debug, PartialEq)] pub enum Compiler { + LLVM, + Cranelift, Singlepass, } @@ -52,6 +54,14 @@ impl Config { #[cfg(not(feature = "engine"))] compile_error!("Plese enable at least one engine via the features"); match &self.engine { + #[cfg(feature = "dylib")] + Engine::Dylib => { + let mut engine = wasmer_engine_dylib::Dylib::new(compiler_config); + if let Some(ref features) = self.features { + engine = engine.features(features.clone()) + } + Box::new(engine.engine()) + } #[cfg(feature = "universal")] Engine::Universal => { let mut engine = wasmer_engine_universal::Universal::new(compiler_config); @@ -70,6 +80,8 @@ impl Config { pub fn engine_headless(&self) -> Box { match &self.engine { + #[cfg(feature = "dylib")] + Engine::Dylib => Box::new(wasmer_engine_dylib::Dylib::headless().engine()), #[cfg(feature = "universal")] Engine::Universal => Box::new(wasmer_engine_universal::Universal::headless().engine()), #[allow(unreachable_patterns)] @@ -82,6 +94,20 @@ impl Config { pub fn compiler_config(&self, canonicalize_nans: bool) -> Box { match &self.compiler { + #[cfg(feature = "cranelift")] + Compiler::Cranelift => { + let mut compiler = wasmer_compiler_cranelift::Cranelift::new(); + compiler.canonicalize_nans(canonicalize_nans); + compiler.enable_verifier(); + Box::new(compiler) + } + #[cfg(feature = "llvm")] + Compiler::LLVM => { + let mut compiler = wasmer_compiler_llvm::LLVM::new(); + compiler.canonicalize_nans(canonicalize_nans); + compiler.enable_verifier(); + Box::new(compiler) + } #[cfg(feature = "singlepass")] Compiler::Singlepass => { let mut compiler = wasmer_compiler_singlepass::Singlepass::new(); diff --git a/tests/compilers/fast_gas_metering.rs b/tests/compilers/fast_gas_metering.rs index 76e531bba0..5f75ce94e2 100644 --- a/tests/compilers/fast_gas_metering.rs +++ b/tests/compilers/fast_gas_metering.rs @@ -51,14 +51,14 @@ fn get_module(store: &Store) -> Module { call 2 call 0 ) - (func (export "bar") (local i32 i32) + (func (export "bar") call 0 i32.const 100 call 2 ) (func (export "zoo") loop - i32.const 1_000_000_000 + i32.const 100 call 2 br 0 end @@ -92,19 +92,16 @@ fn get_module_tricky_arg(store: &Store) -> Module { Module::new(&store, &wat).unwrap() } -fn get_store(regular_op_cost: u64) -> Store { +fn get_store() -> Store { let compiler = Singlepass::default(); - let engine = Universal::new(compiler).engine(); - let mut tunables = BaseTunables::for_target(engine.target()); - tunables.set_regular_op_cost(regular_op_cost); - let store = Store::new_with_tunables(&engine, tunables); + let store = Store::new(&Universal::new(compiler).engine()); store } #[test] fn test_gas_intrinsic_in_start() { - let store = get_store(0); - let mut gas_counter = FastGasCounter::new(100); + let store = get_store(); + let mut gas_counter = FastGasCounter::new(300, 3); let module = get_module_with_start(&store); static HITS: AtomicUsize = AtomicUsize::new(0); let result = Instance::new_with_config( @@ -134,33 +131,29 @@ fn test_gas_intrinsic_in_start() { // Ensure "func" was called twice. assert_eq!(HITS.swap(0, SeqCst), 2); // Ensure gas was partially spent. - assert_eq!(gas_counter.burnt(), 142); - assert_eq!(gas_counter.gas_limit, 100); + assert_eq!(gas_counter.burnt(), 426); + assert_eq!(gas_counter.gas_limit, 300); + assert_eq!(gas_counter.opcode_cost, 3); } -fn test_gas_regular(opcode_cost: u64) { - let store = get_store(opcode_cost); - let mut gas_counter = FastGasCounter::new(200 + 11 * opcode_cost); +#[test] +fn test_gas_intrinsic_regular() { + let store = get_store(); + let mut gas_counter = FastGasCounter::new(500, 3); let module = get_module(&store); - let hits = std::sync::Arc::new(AtomicUsize::new(0)); + static HITS: AtomicUsize = AtomicUsize::new(0); let instance = Instance::new_with_config( &module, unsafe { InstanceConfig::default().with_counter(ptr::addr_of_mut!(gas_counter)) }, &imports! { "host" => { - "func" => Function::new(&store, FunctionType::new(vec![], vec![]), { - let hits = hits.clone(); - move |_values| { - hits.fetch_add(1, SeqCst); - Ok(vec![]) - } + "func" => Function::new(&store, FunctionType::new(vec![], vec![]), |_values| { + HITS.fetch_add(1, SeqCst); + Ok(vec![]) }), - "has" => Function::new(&store, FunctionType::new(vec![ValType::I32], vec![]), { - let hits = hits.clone(); - move |_| { - hits.fetch_add(1, SeqCst); - Ok(vec![]) - } + "has" => Function::new(&store, FunctionType::new(vec![ValType::I32], vec![]), |_| { + HITS.fetch_add(1, SeqCst); + Ok(vec![]) }), "gas" => Function::new(&store, FunctionType::new(vec![ValType::I32], vec![]), |_| { // It shall be never called, as call is intrinsified. @@ -182,37 +175,26 @@ fn test_gas_regular(opcode_cost: u64) { .lookup_function("zoo") .expect("expected function zoo"); // Ensure "func" was not called. - assert_eq!(hits.load(SeqCst), 0); + assert_eq!(HITS.load(SeqCst), 0); let e = bar_func.call(&[]); assert!(e.is_ok()); // Ensure "func" was called. - assert_eq!(hits.load(SeqCst), 1); - assert_eq!(gas_counter.burnt(), 100 + 3 * opcode_cost); + assert_eq!(HITS.load(SeqCst), 1); + assert_eq!(gas_counter.burnt(), 300); let _e = foo_func.call(&[]).err().expect("error calling function"); // Ensure "func" and "has" was called again. - assert_eq!(hits.load(SeqCst), 4); - assert_eq!(gas_counter.burnt(), 242 + 11 * opcode_cost); - // Finally try to exhaust rather large limit - if opcode_cost == 0 { - gas_counter.gas_limit = 1_000_000_000_000_000; - let _e = zoo_func.call(&[]).err().expect("error calling function"); - assert_eq!(gas_counter.burnt(), 1_000_000_000_000_242); - } -} - -#[test] -fn test_gas_intrinsic_regular() { - test_gas_regular(0); -} - -#[test] -fn test_gas_accounting_regular() { - test_gas_regular(3); + assert_eq!(HITS.load(SeqCst), 4); + assert_eq!(gas_counter.burnt(), 726); + // Finally try to exhaust rather large limit. + gas_counter.gas_limit = 10_000_000_000_000_000; + gas_counter.opcode_cost = 100_000_000; + let _e = zoo_func.call(&[]).err().expect("error calling function"); + assert_eq!(gas_counter.burnt(), 10_000_000_000_000_726); } #[test] fn test_gas_intrinsic_default() { - let store = get_store(0); + let store = get_store(); let module = get_module(&store); static HITS: AtomicUsize = AtomicUsize::new(0); let instance = Instance::new( @@ -256,7 +238,7 @@ fn test_gas_intrinsic_default() { #[test] fn test_gas_intrinsic_tricky() { - let store = get_store(0); + let store = get_store(); let module = get_module_tricky_arg(&store); static BURNT_GAS: AtomicUsize = AtomicUsize::new(0); static HITS: AtomicUsize = AtomicUsize::new(0); diff --git a/tests/compilers/imports.rs b/tests/compilers/imports.rs index c856127866..40e49287f6 100644 --- a/tests/compilers/imports.rs +++ b/tests/compilers/imports.rs @@ -89,11 +89,10 @@ fn dynamic_function_with_env(config: crate::Config) -> Result<()> { let store = config.store(); let module = get_module(&store)?; - #[derive(Clone)] + #[derive(WasmerEnv, Clone)] struct Env { counter: Arc, } - impl WasmerEnv for Env {} impl std::ops::Deref for Env { type Target = Arc; @@ -223,9 +222,8 @@ fn static_function_with_env(config: crate::Config) -> Result<()> { let store = config.store(); let module = get_module(&store)?; - #[derive(Clone)] + #[derive(WasmerEnv, Clone)] struct Env(Arc); - impl WasmerEnv for Env {} impl std::ops::Deref for Env { type Target = Arc; @@ -324,12 +322,10 @@ fn dynamic_function_with_env_wasmer_env_init_works(config: crate::Config) -> Res let module = get_module2(&store)?; #[allow(dead_code)] - #[derive(Clone)] + #[derive(WasmerEnv, Clone)] struct Env { memory: Memory, } - impl WasmerEnv for Env {} - let env: Env = Env { memory: Memory::new( &store, diff --git a/tests/compilers/issues.rs b/tests/compilers/issues.rs index dda449faa0..a862c2b6c3 100644 --- a/tests/compilers/issues.rs +++ b/tests/compilers/issues.rs @@ -2,13 +2,6 @@ use anyhow::Result; use wasmer::*; -#[derive(Clone)] -struct Env { - memory: LazyInit, -} - -impl WasmerEnv for Env {} - /// Corruption of WasmerEnv when using call indirect. /// /// Note: this one is specific to Singlepass, but we want to test in all @@ -19,7 +12,20 @@ impl WasmerEnv for Env {} fn issue_2329(mut config: crate::Config) -> Result<()> { let store = config.store(); - fn read_memory(env: &Env, guest_ptr: u32) -> u32 { + #[derive(Clone, Default, WasmerEnv)] + pub struct Env { + memory: LazyInit, + } + + impl Env { + pub fn new() -> Self { + Self { + memory: LazyInit::new(), + } + } + } + + pub fn read_memory(env: &Env, guest_ptr: u32) -> u32 { dbg!(env.memory.get_ref()); dbg!(guest_ptr); 0 @@ -55,14 +61,12 @@ fn issue_2329(mut config: crate::Config) -> Result<()> { (elem (;0;) (i32.const 1) func $__read_memory)) "#; let module = Module::new(&store, wat)?; - let env = Env { - memory: LazyInit::new(), - }; + let env = Env::new(); let imports: ImportObject = imports! { "env" => { "__read_memory" => Function::new_native_with_env( &store, - env, + env.clone(), read_memory ), } diff --git a/tests/compilers/main.rs b/tests/compilers/main.rs index 6378ef7bd3..a556dcfe7e 100644 --- a/tests/compilers/main.rs +++ b/tests/compilers/main.rs @@ -1,5 +1,6 @@ //! This test suite does all the tests that involve any compiler -//! implementation, such as: singlepass. +//! implementation, such as: singlepass, cranelift or llvm depending +//! on what's available on the target. #[macro_use] extern crate compiler_test_derive; diff --git a/tests/compilers/native_functions.rs b/tests/compilers/native_functions.rs index 44b05f750c..06111f1f7d 100644 --- a/tests/compilers/native_functions.rs +++ b/tests/compilers/native_functions.rs @@ -322,9 +322,8 @@ fn static_host_function_with_env(config: crate::Config) -> anyhow::Result<()> { Ok((d * 4.0, c * 3.0, b * 2, a * 1)) } - #[derive(Clone)] + #[derive(WasmerEnv, Clone)] struct Env(Arc>); - impl WasmerEnv for Env {} impl std::ops::Deref for Env { type Target = Arc>; @@ -397,9 +396,8 @@ fn dynamic_host_function_without_env(config: crate::Config) -> anyhow::Result<() fn dynamic_host_function_with_env(config: crate::Config) -> anyhow::Result<()> { let store = config.store(); - #[derive(Clone)] + #[derive(WasmerEnv, Clone)] struct Env(Arc>); - impl WasmerEnv for Env {} impl std::ops::Deref for Env { type Target = Arc>; diff --git a/tests/compilers/stack_limiter.rs b/tests/compilers/stack_limiter.rs index ce1678e85e..5a32cb7aa5 100644 --- a/tests/compilers/stack_limiter.rs +++ b/tests/compilers/stack_limiter.rs @@ -141,7 +141,7 @@ fn stack_limit_ok() { .lookup_function("main") .expect("expected function main"); let e = main_func.call(&[]); - assert!(e.is_ok(), "got stack limit result: {:?}", e); + assert!(e.is_ok()); } #[test] @@ -230,5 +230,5 @@ fn deep_but_sane() { .expect("expected function main"); let e = main_func.call(&[]); - assert!(e.is_ok(), "expected successful result was instead {:?}", e); + assert!(e.is_ok()); } diff --git a/tests/compilers/wast.rs b/tests/compilers/wast.rs index e8ed631be9..fdae6d97cb 100644 --- a/tests/compilers/wast.rs +++ b/tests/compilers/wast.rs @@ -42,6 +42,16 @@ pub fn run_wast(mut config: crate::Config, wast_path: &str) -> anyhow::Result<() wast.allow_trap_message("uninitialized element 2", "uninitialized element"); // `liking.wast` has different wording but the same meaning wast.allow_trap_message("out of bounds memory access", "memory out of bounds"); + if config.compiler == crate::Compiler::Cranelift && config.engine == crate::Engine::Dylib { + wast.allow_trap_message("call stack exhausted", "out of bounds memory access"); + wast.allow_trap_message("indirect call type mismatch", "call stack exhausted"); + wast.allow_trap_message("integer divide by zero", "call stack exhausted"); + wast.allow_trap_message("integer overflow", "call stack exhausted"); + wast.allow_trap_message("invalid conversion to integer", "call stack exhausted"); + wast.allow_trap_message("undefined element", "call stack exhausted"); + wast.allow_trap_message("uninitialized element", "call stack exhausted"); + wast.allow_trap_message("unreachable", "call stack exhausted"); + } if cfg!(feature = "coverage") { wast.disable_assert_and_exhaustion(); } @@ -56,8 +66,8 @@ pub fn run_wast(mut config: crate::Config, wast_path: &str) -> anyhow::Result<() if config.compiler == crate::Compiler::Singlepass { // We don't support multivalue yet in singlepass wast.allow_instantiation_failures(&[ - "Validation error: func type returns multiple values but the multi-value feature is not enabled", - "Validation error: blocks, loops, and ifs may only produce a resulttype when multi-value is not enabled", + "Validation error: invalid result arity: func type returns multiple values", + "Validation error: blocks, loops, and ifs accept no parameters when multi-value is not enabled", ]); } wast.fail_fast = false; diff --git a/tests/ignores.txt b/tests/ignores.txt index 6caee36512..fd9bebcb7f 100644 --- a/tests/ignores.txt +++ b/tests/ignores.txt @@ -2,30 +2,72 @@ singlepass spec::multi_value # Singlepass has not implemented multivalue (functions that returns "structs"/"tuples") singlepass spec::simd # Singlepass doesn't support yet SIMD (no one asked for this feature) +singlepass+dylib * # It needs to add support for PIC in Singlepass. Not implemented at the moment +windows+dylib * # This might be trivial to fix? +musl+dylib * # Dynamic loading not supported in Musl + # Traps ## Traps. Tracing doesn't work properly in Singlepass ## Unwinding is not properly implemented in Singlepass # Needs investigation singlepass traps::test_trap_trace +dylib traps::test_trap_trace aarch64 traps::test_trap_trace singlepass traps::test_trap_stack_overflow # Need to investigate +dylib traps::test_trap_stack_overflow # Need to investigate aarch64 traps::test_trap_stack_overflow # Need to investigate singlepass traps::trap_display_pretty +llvm traps::trap_display_pretty +dylib traps::trap_display_pretty aarch64 traps::trap_display_pretty singlepass traps::trap_display_multi_module +llvm traps::trap_display_multi_module +dylib traps::trap_display_multi_module aarch64 traps::trap_display_multi_module singlepass traps::call_signature_mismatch +llvm traps::call_signature_mismatch +dylib traps::call_signature_mismatch macos+aarch64 traps::call_signature_mismatch singlepass traps::start_trap_pretty +llvm traps::start_trap_pretty +dylib traps::start_trap_pretty aarch64 traps::start_trap_pretty +cranelift multi_value_imports::dylib # Needs investigation singlepass multi_value_imports::dylib # Singlepass doesn't support multivalue singlepass multi_value_imports::dynamic # Singlepass doesn't support multivalue +# LLVM doesn't fully work in macOS M1 +llvm+universal+macos+aarch64 * # We are using the object crate, it was not fully supporting aarch64 relocations emitted by LLVM. Needs reassesment +llvm+dylib+macos+aarch64 * # Tests seem to be randomly failing + # TODO: We need to fix this in ARM. The issue is caused by libunwind overflowing # the stack while creating the stacktrace. # https://github.com/rust-lang/backtrace-rs/issues/356 +cranelift+aarch64 spec::skip_stack_guard_page # This is skipped for ARM, not fully fixed yet +llvm+aarch64 spec::skip_stack_guard_page # This is skipped for ARM, not fully fixed yet singlepass+windows spec::skip_stack_guard_page # Needs investigation. +cranelift+windows spec::skip_stack_guard_page # Needs investigation. Issue: `STATUS_ACCESS_VIOLATION` trap happened +cranelift+macos spec::skip_stack_guard_page # Needs investigation. process didn't exit successfully: (signal: 6, SIGABRT: process abort signal) +llvm+macos spec::skip_stack_guard_page # Needs investigation. process didn't exit successfully: (signal: 6, SIGABRT: process abort signal) + + +# TODO(https://github.com/wasmerio/wasmer/issues/1727): Traps in dylib engine +cranelift+dylib spec::linking # Needs investigation +cranelift+dylib spec::bulk # Needs investigation + +# Some SIMD opperations are not yet supported by Cranelift +# Cranelift just added support for most of those recently, it might be easy to update +cranelift spec::simd::simd_conversions +cranelift spec::simd::simd_i16x8_extadd_pairwise_i8x16 +cranelift spec::simd::simd_i16x8_extmul_i8x16 +cranelift spec::simd::simd_i16x8_q15mulr_sat_s +cranelift spec::simd::simd_i32x4_extadd_pairwise_i16x8 +cranelift spec::simd::simd_i32x4_extmul_i16x8 +cranelift spec::simd::simd_i32x4_trunc_sat_f64x2 +cranelift spec::simd::simd_i64x2_extmul_i32x4 +cranelift spec::simd::simd_i8x16_arith2 +cranelift spec::simd::simd_int_to_int_extend # Windows doesn't overcommit and fails to allocate 4GB of memory windows wasmer::max_size_of_memory diff --git a/tests/integration/README.md b/tests/integration/README.md new file mode 100644 index 0000000000..01ba922aab --- /dev/null +++ b/tests/integration/README.md @@ -0,0 +1,19 @@ +# Wasmer Integration tests + +All Wasmer end to end integration tests live here. +We have different kind of integration tests: + +## CLI Integration tests + +This tests check that the `wasmer` CLI works as it should when running it +as a Command in a shell, for each of the supported compilers. + +## C Integration tests + +This tests verify that Wasmer wasm-c-api tests are passing for each of the +supported compilers. + +## Rust Integration tests + +This tests verify that the `wasmer` API fulfill the required API that +external users use. diff --git a/tests/integration/cli/Cargo.toml b/tests/integration/cli/Cargo.toml new file mode 100644 index 0000000000..be30769156 --- /dev/null +++ b/tests/integration/cli/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "wasmer-integration-tests-cli" +version = "2.1.0" +authors = ["Wasmer Engineering Team "] +description = "CLI integration tests" +repository = "https://github.com/wasmerio/wasmer" +edition = "2018" +publish = false + +[dependencies] +anyhow = "1" +tempfile = "3" diff --git a/tests/integration/cli/src/assets.rs b/tests/integration/cli/src/assets.rs new file mode 100644 index 0000000000..e4ebf38d05 --- /dev/null +++ b/tests/integration/cli/src/assets.rs @@ -0,0 +1,42 @@ +use std::env; +use std::path::PathBuf; + +pub const C_ASSET_PATH: &str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/../../../lib/c-api/examples/assets" +); +pub const ASSET_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/../../../tests/examples"); + +pub const WASMER_INCLUDE_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/../../../lib/c-api"); + +#[cfg(feature = "debug")] +pub const WASMER_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/../../../target/debug/wasmer"); + +#[cfg(not(feature = "debug"))] +pub const WASMER_PATH: &str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/../../../target/release/wasmer" +); + +#[cfg(not(windows))] +pub const LIBWASMER_PATH: &str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/../../../target/release/libwasmer.a" +); +#[cfg(windows)] +pub const LIBWASMER_PATH: &str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/../../../target/release/wasmer.lib" +); + +/// Get the path to the `libwasmer.a` static library. +pub fn get_libwasmer_path() -> PathBuf { + PathBuf::from( + env::var("WASMER_TEST_LIBWASMER_PATH").unwrap_or_else(|_| LIBWASMER_PATH.to_string()), + ) +} + +/// Get the path to the `wasmer` executable to be used in this test. +pub fn get_wasmer_path() -> PathBuf { + PathBuf::from(env::var("WASMER_TEST_WASMER_PATH").unwrap_or_else(|_| WASMER_PATH.to_string())) +} diff --git a/tests/integration/cli/src/lib.rs b/tests/integration/cli/src/lib.rs new file mode 100644 index 0000000000..43d6ada6a6 --- /dev/null +++ b/tests/integration/cli/src/lib.rs @@ -0,0 +1,10 @@ +#![forbid(unsafe_code)] + +//! CLI integration tests + +pub mod assets; +pub mod link_code; +pub mod util; + +pub use assets::*; +pub use util::*; diff --git a/tests/integration/cli/src/link_code.rs b/tests/integration/cli/src/link_code.rs new file mode 100644 index 0000000000..0346dbc7d8 --- /dev/null +++ b/tests/integration/cli/src/link_code.rs @@ -0,0 +1,73 @@ +use crate::assets::*; +use anyhow::bail; +use std::path::PathBuf; +use std::process::Command; + +/// Data used to run a linking command for generated artifacts. +#[derive(Debug)] +pub struct LinkCode { + /// The directory to operate in. + pub current_dir: PathBuf, + /// Path to the linker used to run the linking command. + pub linker_path: PathBuf, + /// String used as an optimization flag. + pub optimization_flag: String, + /// Paths of objects to link. + pub object_paths: Vec, + /// Path to the output target. + pub output_path: PathBuf, + /// Path to the static libwasmer library. + pub libwasmer_path: PathBuf, +} + +impl Default for LinkCode { + fn default() -> Self { + #[cfg(not(windows))] + let linker = "cc"; + #[cfg(windows)] + let linker = "clang"; + Self { + current_dir: std::env::current_dir().unwrap(), + linker_path: PathBuf::from(linker), + optimization_flag: String::from("-O2"), + object_paths: vec![], + output_path: PathBuf::from("a.out"), + libwasmer_path: get_libwasmer_path(), + } + } +} + +impl LinkCode { + pub fn run(&self) -> anyhow::Result<()> { + let mut command = Command::new(&self.linker_path); + let command = command + .current_dir(&self.current_dir) + .arg(&self.optimization_flag) + .args( + self.object_paths + .iter() + .map(|path| path.canonicalize().unwrap()), + ) + .arg(&self.libwasmer_path.canonicalize()?); + #[cfg(windows)] + let command = command + .arg("-luserenv") + .arg("-lWs2_32") + .arg("-ladvapi32") + .arg("-lbcrypt"); + #[cfg(not(windows))] + let command = command.arg("-ldl").arg("-lm").arg("-pthread"); + let output = command.arg("-o").arg(&self.output_path).output()?; + + if !output.status.success() { + bail!( + "linking failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + Ok(()) + } +} diff --git a/tests/integration/cli/src/util.rs b/tests/integration/cli/src/util.rs new file mode 100644 index 0000000000..5ec991e3c7 --- /dev/null +++ b/tests/integration/cli/src/util.rs @@ -0,0 +1,62 @@ +use anyhow::bail; +use std::path::Path; +use std::process::Command; + +#[derive(Debug, Copy, Clone)] +pub enum Compiler { + Cranelift, + LLVM, + Singlepass, +} + +impl Compiler { + pub const fn to_flag(self) -> &'static str { + match self { + Compiler::Cranelift => "--cranelift", + Compiler::LLVM => "--llvm", + Compiler::Singlepass => "--singlepass", + } + } +} + +#[derive(Debug, Copy, Clone)] +pub enum Engine { + Universal, + Dylib, + Staticlib, +} + +impl Engine { + pub const fn to_flag(self) -> &'static str { + match self { + Engine::Universal => "--universal", + Engine::Dylib => "--dylib", + Engine::Staticlib => "--staticlib", + } + } +} + +pub fn run_code( + operating_dir: &Path, + executable_path: &Path, + args: &[String], +) -> anyhow::Result { + let output = Command::new(executable_path.canonicalize()?) + .current_dir(operating_dir) + .args(args) + .output()?; + + if !output.status.success() { + bail!( + "running executable failed: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + let output = + std::str::from_utf8(&output.stdout).expect("output from running executable is not utf-8"); + + Ok(output.to_owned()) +} diff --git a/tests/integration/cli/tests/compile.rs b/tests/integration/cli/tests/compile.rs new file mode 100644 index 0000000000..5032da9bd8 --- /dev/null +++ b/tests/integration/cli/tests/compile.rs @@ -0,0 +1,172 @@ +//! CLI tests for the compile subcommand. + +use anyhow::{bail, Context}; +use std::fs; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::process::Command; +use wasmer_integration_tests_cli::link_code::*; +use wasmer_integration_tests_cli::*; + +const STATICLIB_ENGINE_TEST_C_SOURCE: &[u8] = include_bytes!("staticlib_engine_test_c_source.c"); + +fn staticlib_engine_test_wasm_path() -> String { + format!("{}/{}", C_ASSET_PATH, "qjs.wasm") +} + +/// Data used to run the `wasmer compile` command. +#[derive(Debug)] +struct WasmerCompile { + /// The directory to operate in. + current_dir: PathBuf, + /// Path to wasmer executable used to run the command. + wasmer_path: PathBuf, + /// Path to the Wasm file to compile. + wasm_path: PathBuf, + /// Path to the static object file produced by compiling the Wasm. + wasm_object_path: PathBuf, + /// Path to output the generated header to. + header_output_path: PathBuf, + /// Compiler with which to compile the Wasm. + compiler: Compiler, + /// Engine with which to use to generate the artifacts. + engine: Engine, +} + +impl Default for WasmerCompile { + fn default() -> Self { + #[cfg(not(windows))] + let wasm_obj_path = "wasm.o"; + #[cfg(windows)] + let wasm_obj_path = "wasm.obj"; + Self { + current_dir: std::env::current_dir().unwrap(), + wasmer_path: get_wasmer_path(), + wasm_path: PathBuf::from(staticlib_engine_test_wasm_path()), + wasm_object_path: PathBuf::from(wasm_obj_path), + header_output_path: PathBuf::from("my_wasm.h"), + compiler: Compiler::Cranelift, + engine: Engine::Staticlib, + } + } +} + +impl WasmerCompile { + fn run(&self) -> anyhow::Result<()> { + let output = Command::new(&self.wasmer_path) + .current_dir(&self.current_dir) + .arg("compile") + .arg(&self.wasm_path.canonicalize()?) + .arg(&self.compiler.to_flag()) + .arg(&self.engine.to_flag()) + .arg("-o") + .arg(&self.wasm_object_path) + .arg("--header") + .arg(&self.header_output_path) + .output()?; + + if !output.status.success() { + bail!( + "wasmer compile failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + Ok(()) + } +} + +/// Compile the C code. +fn run_c_compile( + current_dir: &Path, + path_to_c_src: &Path, + output_name: &Path, +) -> anyhow::Result<()> { + #[cfg(not(windows))] + let c_compiler = "cc"; + #[cfg(windows)] + let c_compiler = "clang++"; + + let output = Command::new(c_compiler) + .current_dir(current_dir) + .arg("-O2") + .arg("-c") + .arg(path_to_c_src) + .arg("-I") + .arg(WASMER_INCLUDE_PATH) + .arg("-o") + .arg(output_name) + .output()?; + + if !output.status.success() { + bail!( + "C code compile failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + Ok(()) +} + +#[test] +fn staticlib_engine_works() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir().context("Making a temp dir")?; + let operating_dir: PathBuf = temp_dir.path().to_owned(); + + let wasm_path = operating_dir.join(staticlib_engine_test_wasm_path()); + #[cfg(not(windows))] + let wasm_object_path = operating_dir.join("wasm.o"); + #[cfg(windows)] + let wasm_object_path = operating_dir.join("wasm.obj"); + let header_output_path = operating_dir.join("my_wasm.h"); + + WasmerCompile { + current_dir: operating_dir.clone(), + wasm_path: wasm_path.clone(), + wasm_object_path: wasm_object_path.clone(), + header_output_path, + compiler: Compiler::Cranelift, + engine: Engine::Staticlib, + ..Default::default() + } + .run() + .context("Failed to compile wasm with Wasmer")?; + + let c_src_file_name = operating_dir.join("c_src.c"); + #[cfg(not(windows))] + let c_object_path = operating_dir.join("c_src.o"); + #[cfg(windows)] + let c_object_path = operating_dir.join("c_src.obj"); + let executable_path = operating_dir.join("a.out"); + + // TODO: adjust C source code based on locations of things + { + let mut c_src_file = fs::OpenOptions::new() + .create_new(true) + .write(true) + .open(&c_src_file_name) + .context("Failed to open C source code file")?; + c_src_file.write_all(STATICLIB_ENGINE_TEST_C_SOURCE)?; + } + run_c_compile(&operating_dir, &c_src_file_name, &c_object_path) + .context("Failed to compile C source code")?; + LinkCode { + current_dir: operating_dir.clone(), + object_paths: vec![c_object_path, wasm_object_path], + output_path: executable_path.clone(), + ..Default::default() + } + .run() + .context("Failed to link objects together")?; + + let result = run_code(&operating_dir, &executable_path, &[]) + .context("Failed to run generated executable")?; + let result_lines = result.lines().collect::>(); + assert_eq!(result_lines, vec!["Initializing...", "\"Hello, World\""],); + + Ok(()) +} diff --git a/tests/integration/cli/tests/create_exe.rs b/tests/integration/cli/tests/create_exe.rs new file mode 100644 index 0000000000..3669aed28f --- /dev/null +++ b/tests/integration/cli/tests/create_exe.rs @@ -0,0 +1,162 @@ +//! Tests of the `wasmer create-exe` command. + +use anyhow::{bail, Context}; +use std::fs; +use std::io::prelude::*; +use std::path::PathBuf; +use std::process::Command; +use wasmer_integration_tests_cli::*; + +fn create_exe_test_wasm_path() -> String { + format!("{}/{}", C_ASSET_PATH, "qjs.wasm") +} +const JS_TEST_SRC_CODE: &[u8] = + b"function greet(name) { return JSON.stringify('Hello, ' + name); }; print(greet('World'));\n"; + +/// Data used to run the `wasmer compile` command. +#[derive(Debug)] +struct WasmerCreateExe { + /// The directory to operate in. + current_dir: PathBuf, + /// Path to wasmer executable used to run the command. + wasmer_path: PathBuf, + /// Path to the Wasm file to compile. + wasm_path: PathBuf, + /// Path to the native executable produced by compiling the Wasm. + native_executable_path: PathBuf, + /// Compiler with which to compile the Wasm. + compiler: Compiler, +} + +impl Default for WasmerCreateExe { + fn default() -> Self { + #[cfg(not(windows))] + let native_executable_path = PathBuf::from("wasm.out"); + #[cfg(windows)] + let native_executable_path = PathBuf::from("wasm.exe"); + Self { + current_dir: std::env::current_dir().unwrap(), + wasmer_path: get_wasmer_path(), + wasm_path: PathBuf::from(create_exe_test_wasm_path()), + native_executable_path, + compiler: Compiler::Cranelift, + } + } +} + +impl WasmerCreateExe { + fn run(&self) -> anyhow::Result<()> { + let output = Command::new(&self.wasmer_path) + .current_dir(&self.current_dir) + .arg("create-exe") + .arg(&self.wasm_path.canonicalize()?) + .arg(&self.compiler.to_flag()) + .arg("-o") + .arg(&self.native_executable_path) + .output()?; + + if !output.status.success() { + bail!( + "wasmer create-exe failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + Ok(()) + } +} + +#[test] +fn create_exe_works() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let operating_dir: PathBuf = temp_dir.path().to_owned(); + + let wasm_path = operating_dir.join(create_exe_test_wasm_path()); + #[cfg(not(windows))] + let executable_path = operating_dir.join("wasm.out"); + #[cfg(windows)] + let executable_path = operating_dir.join("wasm.exe"); + + WasmerCreateExe { + current_dir: operating_dir.clone(), + wasm_path: wasm_path.clone(), + native_executable_path: executable_path.clone(), + compiler: Compiler::Cranelift, + ..Default::default() + } + .run() + .context("Failed to create-exe wasm with Wasmer")?; + + let result = run_code( + &operating_dir, + &executable_path, + &["--eval".to_string(), "function greet(name) { return JSON.stringify('Hello, ' + name); }; print(greet('World'));".to_string()], + ) + .context("Failed to run generated executable")?; + let result_lines = result.lines().collect::>(); + assert_eq!(result_lines, vec!["\"Hello, World\""],); + + Ok(()) +} + +#[test] +fn create_exe_works_with_file() -> anyhow::Result<()> { + let temp_dir = tempfile::tempdir()?; + let operating_dir: PathBuf = temp_dir.path().to_owned(); + + let wasm_path = operating_dir.join(create_exe_test_wasm_path()); + #[cfg(not(windows))] + let executable_path = operating_dir.join("wasm.out"); + #[cfg(windows)] + let executable_path = operating_dir.join("wasm.exe"); + + WasmerCreateExe { + current_dir: operating_dir.clone(), + wasm_path: wasm_path.clone(), + native_executable_path: executable_path.clone(), + compiler: Compiler::Cranelift, + ..Default::default() + } + .run() + .context("Failed to create-exe wasm with Wasmer")?; + + { + let mut f = fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(operating_dir.join("test.js"))?; + f.write_all(JS_TEST_SRC_CODE)?; + } + + // test with `--dir` + let result = run_code( + &operating_dir, + &executable_path, + &[ + "--dir=.".to_string(), + "--script".to_string(), + "test.js".to_string(), + ], + ) + .context("Failed to run generated executable")?; + let result_lines = result.lines().collect::>(); + assert_eq!(result_lines, vec!["\"Hello, World\""],); + + // test with `--mapdir` + let result = run_code( + &operating_dir, + &executable_path, + &[ + "--mapdir=abc:.".to_string(), + "--script".to_string(), + "abc/test.js".to_string(), + ], + ) + .context("Failed to run generated executable")?; + let result_lines = result.lines().collect::>(); + assert_eq!(result_lines, vec!["\"Hello, World\""],); + + Ok(()) +} diff --git a/tests/integration/cli/tests/run.rs b/tests/integration/cli/tests/run.rs new file mode 100644 index 0000000000..07fddc8656 --- /dev/null +++ b/tests/integration/cli/tests/run.rs @@ -0,0 +1,77 @@ +//! Basic tests for the `run` subcommand + +use anyhow::bail; +use std::process::Command; +use wasmer_integration_tests_cli::{ASSET_PATH, C_ASSET_PATH, WASMER_PATH}; + +fn wasi_test_wasm_path() -> String { + format!("{}/{}", C_ASSET_PATH, "qjs.wasm") +} + +fn test_no_imports_wat_path() -> String { + format!("{}/{}", ASSET_PATH, "fib.wat") +} + +fn test_no_start_wat_path() -> String { + format!("{}/{}", ASSET_PATH, "no_start.wat") +} + +#[test] +fn run_wasi_works() -> anyhow::Result<()> { + let output = Command::new(WASMER_PATH) + .arg("run") + .arg(wasi_test_wasm_path()) + .arg("--") + .arg("-e") + .arg("print(3 * (4 + 5))") + .output()?; + + if !output.status.success() { + bail!( + "linking failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + + let stdout_output = std::str::from_utf8(&output.stdout).unwrap(); + assert_eq!(stdout_output, "27\n"); + + Ok(()) +} + +#[test] + +fn run_no_imports_wasm_works() -> anyhow::Result<()> { + let output = Command::new(WASMER_PATH) + .arg("run") + .arg(test_no_imports_wat_path()) + .output()?; + + if !output.status.success() { + bail!( + "linking failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + + Ok(()) +} + +#[test] +fn run_no_start_wasm_report_error() -> anyhow::Result<()> { + let output = Command::new(WASMER_PATH) + .arg("run") + .arg(test_no_start_wat_path()) + .output()?; + + assert_eq!(output.status.success(), false); + let result = std::str::from_utf8(&output.stderr).unwrap().to_string(); + assert_eq!(result.contains("Can not find any export functions."), true); + Ok(()) +} diff --git a/tests/integration/cli/tests/staticlib_engine_test_c_source.c b/tests/integration/cli/tests/staticlib_engine_test_c_source.c new file mode 100644 index 0000000000..68a79f1a49 --- /dev/null +++ b/tests/integration/cli/tests/staticlib_engine_test_c_source.c @@ -0,0 +1,100 @@ +#include "wasmer.h" +#include "my_wasm.h" + +#include +#include + +#define own + +static void print_wasmer_error() { + int error_len = wasmer_last_error_length(); + printf("Error len: `%d`\n", error_len); + char *error_str = (char *)malloc(error_len); + wasmer_last_error_message(error_str, error_len); + printf("Error str: `%s`\n", error_str); + free(error_str); +} + +int main() { + printf("Initializing...\n"); + wasm_config_t *config = wasm_config_new(); + wasm_config_set_engine(config, STATICLIB); + wasm_engine_t *engine = wasm_engine_new_with_config(config); + wasm_store_t *store = wasm_store_new(engine); + + wasm_module_t *module = wasmer_staticlib_engine_new(store, "qjs.wasm"); + + if (!module) { + printf("Failed to create module\n"); + print_wasmer_error(); + return -1; + } + + // We have now finished the memory buffer book keeping and we have a valid + // Module. + + // In this example we're passing some JavaScript source code as a command line + // argument to a WASI module that can evaluate JavaScript. + wasi_config_t *wasi_config = wasi_config_new("constant_value_here"); + const char *js_string = + "function greet(name) { return JSON.stringify('Hello, ' + name); }; " + "print(greet('World'));"; + wasi_config_arg(wasi_config, "--eval"); + wasi_config_arg(wasi_config, js_string); + wasi_env_t *wasi_env = wasi_env_new(wasi_config); + + if (!wasi_env) { + printf("> Error building WASI env!\n"); + print_wasmer_error(); + return 1; + } + + wasm_importtype_vec_t import_types; + wasm_module_imports(module, &import_types); + + wasm_extern_vec_t imports; + wasm_extern_vec_new_uninitialized(&imports, import_types.size); + wasm_importtype_vec_delete(&import_types); + + bool get_imports_result = wasi_get_imports(store, module, wasi_env, &imports); + wasi_env_delete(wasi_env); + + if (!get_imports_result) { + printf("> Error getting WASI imports!\n"); + print_wasmer_error(); + return 1; + } + + wasm_instance_t *instance = wasm_instance_new(store, module, &imports, NULL); + + if (!instance) { + printf("Failed to create instance\n"); + print_wasmer_error(); + return -1; + } + + // WASI is now set up. + own wasm_func_t *start_function = wasi_get_start_function(instance); + if (!start_function) { + fprintf(stderr, "`_start` function not found\n"); + print_wasmer_error(); + return -1; + } + + fflush(stdout); + + wasm_val_vec_t args = WASM_EMPTY_VEC; + wasm_val_vec_t results = WASM_EMPTY_VEC; + own wasm_trap_t *trap = wasm_func_call(start_function, &args, &results); + if (trap) { + fprintf(stderr, "Trap is not NULL: TODO:\n"); + return -1; + } + + wasm_instance_delete(instance); + wasm_module_delete(module); + wasm_store_delete(store); + wasm_engine_delete(engine); + + return 0; +} diff --git a/tests/integration/cli/tests/version.rs b/tests/integration/cli/tests/version.rs new file mode 100644 index 0000000000..543237c23f --- /dev/null +++ b/tests/integration/cli/tests/version.rs @@ -0,0 +1,62 @@ +use anyhow::bail; +use std::process::Command; +use wasmer_integration_tests_cli::WASMER_PATH; + +const WASMER_VERSION: &str = env!("CARGO_PKG_VERSION"); + +#[test] +fn version_string_is_correct() -> anyhow::Result<()> { + let expected_version_output = format!("wasmer {}\n", WASMER_VERSION); + + let outputs = [ + Command::new(WASMER_PATH).arg("--version").output()?, + Command::new(WASMER_PATH).arg("-V").output()?, + ]; + + for output in &outputs { + if !output.status.success() { + bail!( + "version failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + + let stdout_output = std::str::from_utf8(&output.stdout).unwrap(); + assert_eq!(stdout_output, &expected_version_output); + } + + Ok(()) +} + +#[test] +fn help_text_contains_version() -> anyhow::Result<()> { + let expected_version_output = format!("wasmer {}", WASMER_VERSION); + + let outputs = [ + Command::new(WASMER_PATH).arg("--help").output()?, + Command::new(WASMER_PATH).arg("-h").output()?, + ]; + + for output in &outputs { + if !output.status.success() { + bail!( + "version failed with: stdout: {}\n\nstderr: {}", + std::str::from_utf8(&output.stdout) + .expect("stdout is not utf8! need to handle arbitrary bytes"), + std::str::from_utf8(&output.stderr) + .expect("stderr is not utf8! need to handle arbitrary bytes") + ); + } + + let stdout_output = std::str::from_utf8(&output.stdout).unwrap(); + assert_eq!( + stdout_output.lines().next().unwrap(), + &expected_version_output + ); + } + + Ok(()) +} diff --git a/tests/integration/ios/.gitignore b/tests/integration/ios/.gitignore new file mode 100644 index 0000000000..55e8065dea --- /dev/null +++ b/tests/integration/ios/.gitignore @@ -0,0 +1,37 @@ +### Xcode ### +# Xcode +# +# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore + +## User settings +xcuserdata/ + +## compatibility with Xcode 8 and earlier (ignoring not required starting Xcode 9) +*.xcscmblueprint +*.xccheckout + +## compatibility with Xcode 3 and earlier (ignoring not required starting Xcode 4) +build/ +DerivedData/ +*.moved-aside +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 + +## Gcc Patch +/*.gcno + +### Xcode Patch ### +*.xcodeproj/* +!*.xcodeproj/project.pbxproj +!*.xcodeproj/xcshareddata/ +!*.xcworkspace/contents.xcworkspacedata +**/xcshareddata/WorkspaceSettings.xcsettings + +# Artifacts from iOS tests +**/*.dylib diff --git a/tests/integration/ios/Cargo.toml b/tests/integration/ios/Cargo.toml new file mode 100644 index 0000000000..944f4216bc --- /dev/null +++ b/tests/integration/ios/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "wasmer-integration-tests-ios" +version = "2.1.0" +authors = ["Wasmer Engineering Team "] +description = "iOS integration tests" +repository = "https://github.com/wasmerio/wasmer" +edition = "2018" +publish = false diff --git a/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.pbxproj b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.pbxproj new file mode 100644 index 0000000000..21a1535009 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.pbxproj @@ -0,0 +1,604 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 54; + objects = { + +/* Begin PBXBuildFile section */ + 6315FBEE26CC45F10059CE47 /* calc.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 6315FBEC26CC45F10059CE47 /* calc.cpp */; }; + 63CD62C426C9492100424C7A /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 63CD62C326C9492100424C7A /* AppDelegate.swift */; }; + 63CD62C626C9492100424C7A /* SceneDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 63CD62C526C9492100424C7A /* SceneDelegate.swift */; }; + 63CD62C826C9492100424C7A /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 63CD62C726C9492100424C7A /* ViewController.swift */; }; + 63CD62CB26C9492100424C7A /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 63CD62C926C9492100424C7A /* Main.storyboard */; }; + 63CD62CD26C9492400424C7A /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 63CD62CC26C9492400424C7A /* Assets.xcassets */; }; + 63CD62D026C9492400424C7A /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 63CD62CE26C9492400424C7A /* LaunchScreen.storyboard */; }; + 63CD62DB26C9492400424C7A /* DylibExampleTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 63CD62DA26C9492400424C7A /* DylibExampleTests.swift */; }; + 63CD62FB26C94F4700424C7A /* libwasmer.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 63CD62F826C94EB600424C7A /* libwasmer.a */; }; + 63CD62FF26C952CC00424C7A /* sum.wasm in Resources */ = {isa = PBXBuildFile; fileRef = 63CD62FE26C952CC00424C7A /* sum.wasm */; }; + 63CD630126C9541E00424C7A /* sum.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 63CD630026C9541E00424C7A /* sum.dylib */; }; + 63CD630226C9541E00424C7A /* sum.dylib in Embed .dylib in App bundle */ = {isa = PBXBuildFile; fileRef = 63CD630026C9541E00424C7A /* sum.dylib */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 63CD62D726C9492400424C7A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 63CD62B826C9492100424C7A /* Project object */; + proxyType = 1; + remoteGlobalIDString = 63CD62BF26C9492100424C7A; + remoteInfo = DylibExample; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXCopyFilesBuildPhase section */ + 63CD630326C9541E00424C7A /* Embed .dylib in App bundle */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = ""; + dstSubfolderSpec = 6; + files = ( + 63CD630226C9541E00424C7A /* sum.dylib in Embed .dylib in App bundle */, + ); + name = "Embed .dylib in App bundle"; + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 6311890B26CBAD2C007746B2 /* sum.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = sum.dylib; path = DylibExample/sum.dylib; sourceTree = ""; }; + 6315FBEC26CC45F10059CE47 /* calc.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = calc.cpp; sourceTree = ""; }; + 6315FBED26CC45F10059CE47 /* calc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = calc.h; sourceTree = ""; }; + 63CD62C026C9492100424C7A /* DylibExample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = DylibExample.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 63CD62C326C9492100424C7A /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + 63CD62C526C9492100424C7A /* SceneDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SceneDelegate.swift; sourceTree = ""; }; + 63CD62C726C9492100424C7A /* ViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ViewController.swift; sourceTree = ""; }; + 63CD62CA26C9492100424C7A /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; + 63CD62CC26C9492400424C7A /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 63CD62CF26C9492400424C7A /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; + 63CD62D126C9492400424C7A /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 63CD62D626C9492400424C7A /* DylibExampleTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = DylibExampleTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 63CD62DA26C9492400424C7A /* DylibExampleTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DylibExampleTests.swift; sourceTree = ""; }; + 63CD62DC26C9492400424C7A /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 63CD62F426C94D6000424C7A /* DylibExample-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "DylibExample-Bridging-Header.h"; sourceTree = ""; }; + 63CD62F826C94EB600424C7A /* libwasmer.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libwasmer.a; path = ../../../../target/universal/release/libwasmer.a; sourceTree = ""; }; + 63CD62FE26C952CC00424C7A /* sum.wasm */ = {isa = PBXFileReference; lastKnownFileType = text; path = sum.wasm; sourceTree = ""; }; + 63CD630026C9541E00424C7A /* sum.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = sum.dylib; path = DylibExample/sum.dylib; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 63CD62BD26C9492100424C7A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 63CD630126C9541E00424C7A /* sum.dylib in Frameworks */, + 63CD62FB26C94F4700424C7A /* libwasmer.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 63CD62D326C9492400424C7A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 63CD62B726C9492100424C7A = { + isa = PBXGroup; + children = ( + 6311890B26CBAD2C007746B2 /* sum.dylib */, + 63CD62F826C94EB600424C7A /* libwasmer.a */, + 63CD62C226C9492100424C7A /* DylibExample */, + 63CD62D926C9492400424C7A /* DylibExampleTests */, + 63CD62C126C9492100424C7A /* Products */, + 63CD62FA26C94F4700424C7A /* Frameworks */, + ); + sourceTree = ""; + }; + 63CD62C126C9492100424C7A /* Products */ = { + isa = PBXGroup; + children = ( + 63CD62C026C9492100424C7A /* DylibExample.app */, + 63CD62D626C9492400424C7A /* DylibExampleTests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + 63CD62C226C9492100424C7A /* DylibExample */ = { + isa = PBXGroup; + children = ( + 63CD62C326C9492100424C7A /* AppDelegate.swift */, + 63CD62C526C9492100424C7A /* SceneDelegate.swift */, + 63CD62C726C9492100424C7A /* ViewController.swift */, + 63CD62C926C9492100424C7A /* Main.storyboard */, + 63CD62CC26C9492400424C7A /* Assets.xcassets */, + 63CD62CE26C9492400424C7A /* LaunchScreen.storyboard */, + 63CD62D126C9492400424C7A /* Info.plist */, + 63CD62F426C94D6000424C7A /* DylibExample-Bridging-Header.h */, + 63CD62FE26C952CC00424C7A /* sum.wasm */, + 6315FBEC26CC45F10059CE47 /* calc.cpp */, + 6315FBED26CC45F10059CE47 /* calc.h */, + ); + path = DylibExample; + sourceTree = ""; + }; + 63CD62D926C9492400424C7A /* DylibExampleTests */ = { + isa = PBXGroup; + children = ( + 63CD62DA26C9492400424C7A /* DylibExampleTests.swift */, + 63CD62DC26C9492400424C7A /* Info.plist */, + ); + path = DylibExampleTests; + sourceTree = ""; + }; + 63CD62FA26C94F4700424C7A /* Frameworks */ = { + isa = PBXGroup; + children = ( + 63CD630026C9541E00424C7A /* sum.dylib */, + ); + name = Frameworks; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 63CD62BF26C9492100424C7A /* DylibExample */ = { + isa = PBXNativeTarget; + buildConfigurationList = 63CD62EA26C9492400424C7A /* Build configuration list for PBXNativeTarget "DylibExample" */; + buildPhases = ( + 63CD62F326C94B1D00424C7A /* Build Headless Wasmer */, + 63CD62FD26C951EF00424C7A /* Compile .wasm to .dylib */, + 63CD62BC26C9492100424C7A /* Sources */, + 63CD62BD26C9492100424C7A /* Frameworks */, + 63CD62BE26C9492100424C7A /* Resources */, + 63CD630326C9541E00424C7A /* Embed .dylib in App bundle */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = DylibExample; + productName = DylibExample; + productReference = 63CD62C026C9492100424C7A /* DylibExample.app */; + productType = "com.apple.product-type.application"; + }; + 63CD62D526C9492400424C7A /* DylibExampleTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = 63CD62ED26C9492400424C7A /* Build configuration list for PBXNativeTarget "DylibExampleTests" */; + buildPhases = ( + 63CD62D226C9492400424C7A /* Sources */, + 63CD62D326C9492400424C7A /* Frameworks */, + 63CD62D426C9492400424C7A /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + 63CD62D826C9492400424C7A /* PBXTargetDependency */, + ); + name = DylibExampleTests; + productName = DylibExampleTests; + productReference = 63CD62D626C9492400424C7A /* DylibExampleTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 63CD62B826C9492100424C7A /* Project object */ = { + isa = PBXProject; + attributes = { + LastSwiftUpdateCheck = 1250; + LastUpgradeCheck = 1250; + TargetAttributes = { + 63CD62BF26C9492100424C7A = { + CreatedOnToolsVersion = 12.5; + LastSwiftMigration = 1250; + }; + 63CD62D526C9492400424C7A = { + CreatedOnToolsVersion = 12.5; + TestTargetID = 63CD62BF26C9492100424C7A; + }; + }; + }; + buildConfigurationList = 63CD62BB26C9492100424C7A /* Build configuration list for PBXProject "DylibExample" */; + compatibilityVersion = "Xcode 9.3"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 63CD62B726C9492100424C7A; + productRefGroup = 63CD62C126C9492100424C7A /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 63CD62BF26C9492100424C7A /* DylibExample */, + 63CD62D526C9492400424C7A /* DylibExampleTests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 63CD62BE26C9492100424C7A /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 63CD62D026C9492400424C7A /* LaunchScreen.storyboard in Resources */, + 63CD62CD26C9492400424C7A /* Assets.xcassets in Resources */, + 63CD62CB26C9492100424C7A /* Main.storyboard in Resources */, + 63CD62FF26C952CC00424C7A /* sum.wasm in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 63CD62D426C9492400424C7A /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 63CD62F326C94B1D00424C7A /* Build Headless Wasmer */ = { + isa = PBXShellScriptBuildPhase; + alwaysOutOfDate = 1; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + ); + name = "Build Headless Wasmer"; + outputFileListPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "#!/bin/bash\nexport PATH=$(bash -l -c 'echo $PATH')\ncd ../../../../\nmake build-wasmer\nmake build-capi-headless-ios\n"; + }; + 63CD62FD26C951EF00424C7A /* Compile .wasm to .dylib */ = { + isa = PBXShellScriptBuildPhase; + alwaysOutOfDate = 1; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + ); + name = "Compile .wasm to .dylib"; + outputFileListPaths = ( + ); + outputPaths = ( + "$(DERIVED_FILE_DIR)/sum.dylib", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "export ARCH='x86_64-apple-ios'\nif [[ `uname -m` == 'arm64' ]]; then\n export ARCH='aarch64-apple-ios-sim'\nfi\n\n\n./../../../../target/release/wasmer compile DylibExample/sum.wasm --target $ARCH --dylib -o DylibExample/sum.dylib\n"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 63CD62BC26C9492100424C7A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 63CD62C826C9492100424C7A /* ViewController.swift in Sources */, + 6315FBEE26CC45F10059CE47 /* calc.cpp in Sources */, + 63CD62C426C9492100424C7A /* AppDelegate.swift in Sources */, + 63CD62C626C9492100424C7A /* SceneDelegate.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 63CD62D226C9492400424C7A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 63CD62DB26C9492400424C7A /* DylibExampleTests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 63CD62D826C9492400424C7A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 63CD62BF26C9492100424C7A /* DylibExample */; + targetProxy = 63CD62D726C9492400424C7A /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + 63CD62C926C9492100424C7A /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 63CD62CA26C9492100424C7A /* Base */, + ); + name = Main.storyboard; + sourceTree = ""; + }; + 63CD62CE26C9492400424C7A /* LaunchScreen.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 63CD62CF26C9492400424C7A /* Base */, + ); + name = LaunchScreen.storyboard; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 63CD62E826C9492400424C7A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 14.5; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + }; + name = Debug; + }; + 63CD62E926C9492400424C7A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 14.5; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + SDKROOT = iphoneos; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_OPTIMIZATION_LEVEL = "-O"; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 63CD62EB26C9492400424C7A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CLANG_ENABLE_MODULES = YES; + CODE_SIGN_IDENTITY = "Apple Development"; + CODE_SIGN_STYLE = Automatic; + DEVELOPMENT_TEAM = ""; + ENABLE_BITCODE = NO; + EXCLUDED_ARCHS = ""; + "EXCLUDED_ARCHS[sdk=iphonesimulator*]" = arm64; + HEADER_SEARCH_PATHS = "../../../../lib/c-api/"; + INFOPLIST_FILE = DylibExample/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = ( + "../../../../target/universal/release/**", + "$(PROJECT_DIR)/DylibExample", + ); + NEW_SETTING = ""; + PATH = "/Users/nathanhorrigan/.wasmer/bin:/Users/nathanhorrigan/.cargo/bin:/Users/nathanhorrigan/Android/Sdk/tools:/Users/nathanhorrigan/Android/Sdk/tools/bin:/Users/nathanhorrigan/Android/Sdk/platform-tools:/Users/nathanhorrigan/.pyenv/shims:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Library/Apple/usr/bin:/Users/nathanhorrigan/.wasmer/globals/wapm_packages/.bin"; + PRODUCT_BUNDLE_IDENTIFIER = io.wasmer.DylibExample; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_OBJC_BRIDGING_HEADER = "DylibExample/DylibExample-Bridging-Header.h"; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = 1; + }; + name = Debug; + }; + 63CD62EC26C9492400424C7A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CLANG_ENABLE_MODULES = YES; + CODE_SIGN_IDENTITY = "Apple Development"; + CODE_SIGN_STYLE = Automatic; + DEVELOPMENT_TEAM = ""; + ENABLE_BITCODE = NO; + "EXCLUDED_ARCHS[sdk=iphonesimulator*]" = arm64; + HEADER_SEARCH_PATHS = "../../../../lib/c-api/"; + INFOPLIST_FILE = DylibExample/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = ( + "../../../../target/universal/release/**", + "$(PROJECT_DIR)/DylibExample", + ); + NEW_SETTING = ""; + PATH = "/Users/nathanhorrigan/.wasmer/bin:/Users/nathanhorrigan/.cargo/bin:/Users/nathanhorrigan/Android/Sdk/tools:/Users/nathanhorrigan/Android/Sdk/tools/bin:/Users/nathanhorrigan/Android/Sdk/platform-tools:/Users/nathanhorrigan/.pyenv/shims:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Library/Apple/usr/bin:/Users/nathanhorrigan/.wasmer/globals/wapm_packages/.bin"; + PRODUCT_BUNDLE_IDENTIFIER = io.wasmer.DylibExample; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_OBJC_BRIDGING_HEADER = "DylibExample/DylibExample-Bridging-Header.h"; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = 1; + }; + name = Release; + }; + 63CD62EE26C9492400424C7A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; + BUNDLE_LOADER = "$(TEST_HOST)"; + CODE_SIGN_STYLE = Manual; + DEVELOPMENT_TEAM = ""; + HEADER_SEARCH_PATHS = "../../../../lib/c-api/**"; + INFOPLIST_FILE = DylibExampleTests/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 14.5; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = "../../../../target/universal/release/**"; + PRODUCT_BUNDLE_IDENTIFIER = io.wasmer.DylibExampleTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE_SPECIFIER = ""; + "PROVISIONING_PROFILE_SPECIFIER[sdk=macosx*]" = ""; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/DylibExample.app/DylibExample"; + }; + name = Debug; + }; + 63CD62EF26C9492400424C7A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; + BUNDLE_LOADER = "$(TEST_HOST)"; + CODE_SIGN_STYLE = Manual; + DEVELOPMENT_TEAM = ""; + HEADER_SEARCH_PATHS = "../../../../lib/c-api/**"; + INFOPLIST_FILE = DylibExampleTests/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 14.5; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = "../../../../target/universal/release/**"; + PRODUCT_BUNDLE_IDENTIFIER = io.wasmer.DylibExampleTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE_SPECIFIER = ""; + "PROVISIONING_PROFILE_SPECIFIER[sdk=macosx*]" = ""; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/DylibExample.app/DylibExample"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 63CD62BB26C9492100424C7A /* Build configuration list for PBXProject "DylibExample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 63CD62E826C9492400424C7A /* Debug */, + 63CD62E926C9492400424C7A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 63CD62EA26C9492400424C7A /* Build configuration list for PBXNativeTarget "DylibExample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 63CD62EB26C9492400424C7A /* Debug */, + 63CD62EC26C9492400424C7A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 63CD62ED26C9492400424C7A /* Build configuration list for PBXNativeTarget "DylibExampleTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 63CD62EE26C9492400424C7A /* Debug */, + 63CD62EF26C9492400424C7A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 63CD62B826C9492100424C7A /* Project object */; +} diff --git a/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000000..919434a625 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist new file mode 100644 index 0000000000..18d981003d --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist @@ -0,0 +1,8 @@ + + + + + IDEDidComputeMac32BitWarning + + + diff --git a/tests/integration/ios/DylibExample/DylibExample.xcodeproj/xcshareddata/xcschemes/DylibExample.xcscheme b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/xcshareddata/xcschemes/DylibExample.xcscheme new file mode 100644 index 0000000000..6cddf2ccde --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample.xcodeproj/xcshareddata/xcschemes/DylibExample.xcscheme @@ -0,0 +1,98 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/integration/ios/DylibExample/DylibExample/AppDelegate.swift b/tests/integration/ios/DylibExample/DylibExample/AppDelegate.swift new file mode 100644 index 0000000000..a2a52654bb --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/AppDelegate.swift @@ -0,0 +1,36 @@ +// +// AppDelegate.swift +// DylibExample +// +// Created by Nathan Horrigan on 15/08/2021. +// + +import UIKit + +@main +class AppDelegate: UIResponder, UIApplicationDelegate { + + + + func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool { + // Override point for customization after application launch. + return true + } + + // MARK: UISceneSession Lifecycle + + func application(_ application: UIApplication, configurationForConnecting connectingSceneSession: UISceneSession, options: UIScene.ConnectionOptions) -> UISceneConfiguration { + // Called when a new scene session is being created. + // Use this method to select a configuration to create the new scene with. + return UISceneConfiguration(name: "Default Configuration", sessionRole: connectingSceneSession.role) + } + + func application(_ application: UIApplication, didDiscardSceneSessions sceneSessions: Set) { + // Called when the user discards a scene session. + // If any sessions were discarded while the application was not running, this will be called shortly after application:didFinishLaunchingWithOptions. + // Use this method to release any resources that were specific to the discarded scenes, as they will not return. + } + + +} + diff --git a/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/AccentColor.colorset/Contents.json b/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/AccentColor.colorset/Contents.json new file mode 100644 index 0000000000..eb87897008 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/AccentColor.colorset/Contents.json @@ -0,0 +1,11 @@ +{ + "colors" : [ + { + "idiom" : "universal" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/AppIcon.appiconset/Contents.json b/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000000..9221b9bb1a --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,98 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "20x20" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "20x20" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "29x29" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "29x29" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "40x40" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "40x40" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "60x60" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "60x60" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "20x20" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "20x20" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "29x29" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "29x29" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "40x40" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "40x40" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "76x76" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "76x76" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "83.5x83.5" + }, + { + "idiom" : "ios-marketing", + "scale" : "1x", + "size" : "1024x1024" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/Contents.json b/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/Contents.json new file mode 100644 index 0000000000..73c00596a7 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/tests/integration/ios/DylibExample/DylibExample/Base.lproj/LaunchScreen.storyboard b/tests/integration/ios/DylibExample/DylibExample/Base.lproj/LaunchScreen.storyboard new file mode 100644 index 0000000000..865e9329f3 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/Base.lproj/LaunchScreen.storyboard @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/integration/ios/DylibExample/DylibExample/Base.lproj/Main.storyboard b/tests/integration/ios/DylibExample/DylibExample/Base.lproj/Main.storyboard new file mode 100644 index 0000000000..46b74d95b3 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/Base.lproj/Main.storyboard @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/integration/ios/DylibExample/DylibExample/DylibExample-Bridging-Header.h b/tests/integration/ios/DylibExample/DylibExample/DylibExample-Bridging-Header.h new file mode 100644 index 0000000000..7ce8fb504f --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/DylibExample-Bridging-Header.h @@ -0,0 +1,4 @@ +#include "wasm.h" +#include "wasmer.h" +#include "wasmer_wasm.h" +#include "calc.h" diff --git a/tests/integration/ios/DylibExample/DylibExample/Info.plist b/tests/integration/ios/DylibExample/DylibExample/Info.plist new file mode 100644 index 0000000000..5b531f7b27 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/Info.plist @@ -0,0 +1,66 @@ + + + + + CFBundleDevelopmentRegion + $(DEVELOPMENT_LANGUAGE) + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + $(PRODUCT_BUNDLE_PACKAGE_TYPE) + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + LSRequiresIPhoneOS + + UIApplicationSceneManifest + + UIApplicationSupportsMultipleScenes + + UISceneConfigurations + + UIWindowSceneSessionRoleApplication + + + UISceneConfigurationName + Default Configuration + UISceneDelegateClassName + $(PRODUCT_MODULE_NAME).SceneDelegate + UISceneStoryboardFile + Main + + + + + UIApplicationSupportsIndirectInputEvents + + UILaunchStoryboardName + LaunchScreen + UIMainStoryboardFile + Main + UIRequiredDeviceCapabilities + + armv7 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + UISupportedInterfaceOrientations~ipad + + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + + diff --git a/tests/integration/ios/DylibExample/DylibExample/SceneDelegate.swift b/tests/integration/ios/DylibExample/DylibExample/SceneDelegate.swift new file mode 100644 index 0000000000..2ca113a352 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/SceneDelegate.swift @@ -0,0 +1,52 @@ +// +// SceneDelegate.swift +// DylibExample +// +// Created by Nathan Horrigan on 15/08/2021. +// + +import UIKit + +class SceneDelegate: UIResponder, UIWindowSceneDelegate { + + var window: UIWindow? + + + func scene(_ scene: UIScene, willConnectTo session: UISceneSession, options connectionOptions: UIScene.ConnectionOptions) { + // Use this method to optionally configure and attach the UIWindow `window` to the provided UIWindowScene `scene`. + // If using a storyboard, the `window` property will automatically be initialized and attached to the scene. + // This delegate does not imply the connecting scene or session are new (see `application:configurationForConnectingSceneSession` instead). + guard let _ = (scene as? UIWindowScene) else { return } + } + + func sceneDidDisconnect(_ scene: UIScene) { + // Called as the scene is being released by the system. + // This occurs shortly after the scene enters the background, or when its session is discarded. + // Release any resources associated with this scene that can be re-created the next time the scene connects. + // The scene may re-connect later, as its session was not necessarily discarded (see `application:didDiscardSceneSessions` instead). + } + + func sceneDidBecomeActive(_ scene: UIScene) { + // Called when the scene has moved from an inactive state to an active state. + // Use this method to restart any tasks that were paused (or not yet started) when the scene was inactive. + } + + func sceneWillResignActive(_ scene: UIScene) { + // Called when the scene will move from an active state to an inactive state. + // This may occur due to temporary interruptions (ex. an incoming phone call). + } + + func sceneWillEnterForeground(_ scene: UIScene) { + // Called as the scene transitions from the background to the foreground. + // Use this method to undo the changes made on entering the background. + } + + func sceneDidEnterBackground(_ scene: UIScene) { + // Called as the scene transitions from the foreground to the background. + // Use this method to save data, release shared resources, and store enough scene-specific state information + // to restore the scene back to its current state. + } + + +} + diff --git a/tests/integration/ios/DylibExample/DylibExample/ViewController.swift b/tests/integration/ios/DylibExample/DylibExample/ViewController.swift new file mode 100644 index 0000000000..b1da0225b7 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/ViewController.swift @@ -0,0 +1,18 @@ +// +// ViewController.swift +// DylibExample +// +// Created by Nathan Horrigan on 15/08/2021. +// + +import UIKit + +class ViewController: UIViewController { + @IBOutlet weak var label: UILabel! + + override func viewDidLoad() { + super.viewDidLoad() + let sum = calculate_sum(1, 3) + label.text = "The sum of 1 + 3 = \(sum)" + } +} diff --git a/tests/integration/ios/DylibExample/DylibExample/calc.cpp b/tests/integration/ios/DylibExample/DylibExample/calc.cpp new file mode 100644 index 0000000000..55a67016b7 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/calc.cpp @@ -0,0 +1,123 @@ +// +// WASM.cpp +// DylibExample +// +// Created by Nathan Horrigan on 17/08/2021. +// + +#include "calc.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +std::string get_resources_dir() +{ + + CFURLRef resourceURL = CFBundleCopyResourcesDirectoryURL(CFBundleGetMainBundle()); + char resourcePath[PATH_MAX]; + if (CFURLGetFileSystemRepresentation(resourceURL, true, + (UInt8 *)resourcePath, + PATH_MAX)) + { + if (resourceURL != NULL) + { + CFRelease(resourceURL); + } + + return resourcePath; + } + + return ""; +} + +inline std::vector read_vector_from_disk(std::string file_path) +{ + std::ifstream instream(file_path, std::ios::in | std::ios::binary); + std::vector data((std::istreambuf_iterator(instream)), std::istreambuf_iterator()); + return data; +} + +int calculate_sum(int a, int b) +{ + printf("Creating the store...\n"); + wasm_engine_t *engine = wasm_engine_new(); + wasm_store_t *store = wasm_store_new(engine); + + printf("Loading .dylib file...\n"); + std::string wasm_path = get_resources_dir() + "/sum.dylib"; + std::vector dylib = read_vector_from_disk(wasm_path.c_str()); + uint8_t *wasm_bytes = dylib.data(); + + wasm_byte_vec_t imported_bytes; + imported_bytes.size = dylib.size(); + imported_bytes.data = (wasm_byte_t *)wasm_bytes; + + printf("Compiling module...\n"); + wasm_module_t *module; + module = wasm_module_deserialize(store, &imported_bytes); + + if (!module) + { + printf("> Error compiling module!\n"); + + return 1; + } + + printf("Creating imports...\n"); + wasm_extern_vec_t import_object = WASM_EMPTY_VEC; + + printf("Instantiating module...\n"); + wasm_instance_t *instance = wasm_instance_new(store, module, &import_object, NULL); + + if (!instance) + { + printf("> Error instantiating module!\n"); + + return 1; + } + + printf("Retrieving exports...\n"); + wasm_extern_vec_t exports; + wasm_instance_exports(instance, &exports); + + if (exports.size == 0) + { + printf("> Error accessing exports!\n"); + + return 1; + } + + printf("Retrieving the `sum` function...\n"); + wasm_func_t *sum_func = wasm_extern_as_func(exports.data[0]); + + if (sum_func == NULL) + { + printf("> Failed to get the `sum` function!\n"); + + return 1; + } + + printf("Calling `sum` function...\n"); + wasm_val_t args_val[2] = {WASM_I32_VAL(a), WASM_I32_VAL(b)}; + wasm_val_t results_val[1] = {WASM_INIT_VAL}; + wasm_val_vec_t args = WASM_ARRAY_VEC(args_val); + wasm_val_vec_t results = WASM_ARRAY_VEC(results_val); + + if (wasm_func_call(sum_func, &args, &results)) + { + printf("> Error calling the `sum` function!\n"); + + return 1; + } + + printf("Results of `sum`: %d\n", results_val[0].of.i32); + + return results_val[0].of.i32; +} diff --git a/tests/integration/ios/DylibExample/DylibExample/calc.h b/tests/integration/ios/DylibExample/DylibExample/calc.h new file mode 100644 index 0000000000..e1c71165f3 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/calc.h @@ -0,0 +1,24 @@ +// +// WASM.hpp +// DylibExample +// +// Created by Nathan Horrigan on 17/08/2021. +// + +#ifndef calc_h +#define calc_h +#include "wasm.h" +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + + int calculate_sum(int a, int b); + +#ifdef __cplusplus +} +#endif + +#endif /* calc_h */ diff --git a/tests/integration/ios/DylibExample/DylibExample/sum.wasm b/tests/integration/ios/DylibExample/DylibExample/sum.wasm new file mode 100644 index 0000000000..f1b05efb9d --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExample/sum.wasm @@ -0,0 +1,7 @@ +(module +(type $sum_t (func (param i32 i32) (result i32))) +(func $sum_f (type $sum_t) (param $x i32) (param $y i32) (result i32) +local.get $x +local.get $y +i32.add) +(export "sum" (func $sum_f))) diff --git a/tests/integration/ios/DylibExample/DylibExampleTests/DylibExampleTests.swift b/tests/integration/ios/DylibExample/DylibExampleTests/DylibExampleTests.swift new file mode 100644 index 0000000000..7ee1e2ab53 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExampleTests/DylibExampleTests.swift @@ -0,0 +1,33 @@ +// +// DylibExampleTests.swift +// DylibExampleTests +// +// Created by Nathan Horrigan on 15/08/2021. +// + +import XCTest +@testable import DylibExample + +class DylibExampleTests: XCTestCase { + + override func setUpWithError() throws { + // Put setup code here. This method is called before the invocation of each test method in the class. + } + + override func tearDownWithError() throws { + // Put teardown code here. This method is called after the invocation of each test method in the class. + } + + func testExample() throws { + let sum = calculate_sum(5, 2) + assert(sum == 7, "WASM loaded successfully") + } + + func testPerformanceExample() throws { + // This is an example of a performance test case. + self.measure { + // Put the code you want to measure the time of here. + } + } + +} diff --git a/tests/integration/ios/DylibExample/DylibExampleTests/Info.plist b/tests/integration/ios/DylibExample/DylibExampleTests/Info.plist new file mode 100644 index 0000000000..64d65ca495 --- /dev/null +++ b/tests/integration/ios/DylibExample/DylibExampleTests/Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + $(DEVELOPMENT_LANGUAGE) + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + $(PRODUCT_BUNDLE_PACKAGE_TYPE) + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/tests/integration/ios/tests/dylib.rs b/tests/integration/ios/tests/dylib.rs new file mode 100644 index 0000000000..5c042d4443 --- /dev/null +++ b/tests/integration/ios/tests/dylib.rs @@ -0,0 +1,55 @@ +#[cfg(test)] +#[cfg(target_os = "macos")] +mod tests { + use std::process::{Command, Output, Stdio}; + + #[test] + fn test_runtime() { + // Remove anuthing left over from tests + remove_existing_artificats(); + + // Tets the 'DylibExample' scheme + let success = run_ios_test("DylibExample/DylibExample.xcodeproj", "DylibExample"); + if !success { + panic!("Dylib iOS Tests failed with the above output!"); + } + } + + fn run_ios_test(dir: &str, scheme: &str) -> bool { + let command = Command::new("xcodebuild") + .arg("test") + .arg("-project") + .arg(dir) + .arg("-scheme") + .arg(scheme) + .arg("-destination") + .arg("platform=iOS Simulator,name=iPhone 12 Pro") + .arg("CODE_SIGNING_ALLOWED=NO") + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .output() + .expect("Could not run iOS Test"); + + // Get output from xcodebuild CLI: + let stderr = String::from_utf8(command.stderr).unwrap(); + + /* + An iOS Test Result is quite odd, we check stderr for the phrase 'TEST FAILED' + and then return stdout which contains the failure reason; + We also check that the command executed correctly! + */ + let command_success = command.status.success(); + let test_success = stderr.contains("** TEST FAILED **") == false; + let success = command_success && test_success; + + return success; + } + + fn remove_existing_artificats() -> Output { + Command::new("rm") + .arg("-f") + .arg("DylibExample/DylibExample/sum.dylib") + .output() + .expect("Could not clear artificats") + } +} diff --git a/tests/lib/compiler-test-derive/src/ignores.rs b/tests/lib/compiler-test-derive/src/ignores.rs index e710b59dc1..e33ca4febd 100644 --- a/tests/lib/compiler-test-derive/src/ignores.rs +++ b/tests/lib/compiler-test-derive/src/ignores.rs @@ -119,7 +119,7 @@ impl Ignores { engine = Some(alias.to_string()); } // Compilers - "singlepass" => { + "cranelift" | "llvm" | "singlepass" => { compiler = Some(alias.to_string()); } other => { diff --git a/tests/lib/compiler-test-derive/src/lib.rs b/tests/lib/compiler-test-derive/src/lib.rs index 4447e9ff59..75be483c4e 100644 --- a/tests/lib/compiler-test-derive/src/lib.rs +++ b/tests/lib/compiler-test-derive/src/lib.rs @@ -127,6 +127,8 @@ pub fn compiler_test(attrs: TokenStream, input: TokenStream) -> TokenStream { }; let singlepass_compiler_test = construct_compiler_test(&my_fn, "Singlepass"); + let cranelift_compiler_test = construct_compiler_test(&my_fn, "Cranelift"); + let llvm_compiler_test = construct_compiler_test(&my_fn, "LLVM"); // We remove the method decorators my_fn.attrs = vec![]; @@ -140,6 +142,8 @@ pub fn compiler_test(attrs: TokenStream, input: TokenStream) -> TokenStream { #my_fn #singlepass_compiler_test + #cranelift_compiler_test + #llvm_compiler_test } }; x.into() diff --git a/tests/lib/compiler-test-derive/src/tests.rs b/tests/lib/compiler-test-derive/src/tests.rs index 89acdc6b06..1833479762 100644 --- a/tests/lib/compiler-test-derive/src/tests.rs +++ b/tests/lib/compiler-test-derive/src/tests.rs @@ -73,6 +73,52 @@ gen_tests! { )) } } + + #[cfg(feature = "cranelift")] + mod cranelift { + use super::*; + #[test_log::test] + #[cold] + #[cfg(feature = "universal")] + fn universal() { + foo(crate::Config::new( + crate::Engine::Universal, + crate::Compiler::Cranelift + )) + } + #[test_log::test] + #[cold] + #[cfg(feature = "dylib")] + fn dylib() { + foo(crate::Config::new( + crate::Engine::Dylib, + crate::Compiler::Cranelift + )) + } + } + + #[cfg(feature = "llvm")] + mod llvm { + use super::*; + #[test_log::test] + #[cold] + #[cfg(feature = "universal")] + fn universal() { + foo(crate::Config::new( + crate::Engine::Universal, + crate::Compiler::LLVM + )) + } + #[test_log::test] + #[cold] + #[cfg(feature = "dylib")] + fn dylib() { + foo(crate::Config::new( + crate::Engine::Dylib, + crate::Compiler::LLVM + )) + } + } } }; } diff --git a/tests/lib/wast/Cargo.toml b/tests/lib/wast/Cargo.toml index 125af267fc..26fc2bad0e 100644 --- a/tests/lib/wast/Cargo.toml +++ b/tests/lib/wast/Cargo.toml @@ -12,7 +12,7 @@ edition = "2018" [dependencies] anyhow = "1.0" -wasmer = { path = "../../../lib/api", version = "=2.4.0", package = "wasmer-near", default-features = false } +wasmer = { path = "../../../lib/api", version = "=2.4.0", package = "wasmer-near", default-features = false, features = ["experimental-reference-types-extern-ref"] } wast = "38.0" tempfile = "3" thiserror = "1.0" diff --git a/tests/lib/wast/src/wast.rs b/tests/lib/wast/src/wast.rs index b71817dff0..94ad929310 100644 --- a/tests/lib/wast/src/wast.rs +++ b/tests/lib/wast/src/wast.rs @@ -399,7 +399,6 @@ impl Wast { args: &[Val], ) -> Result> { let instance = self.get_instance(instance_name.as_deref())?; - instance.handle().instance().as_ref().reset_stack_meter(); let func: Function = instance .lookup_function(field) .expect("should find the function"); diff --git a/tests/wast/wasmer/max_locals.wast b/tests/wast/wasmer/max_locals.wast deleted file mode 100644 index 6fe0355d99..0000000000 --- a/tests/wast/wasmer/max_locals.wast +++ /dev/null @@ -1,37 +0,0 @@ -;; As per https://www.w3.org/TR/wasm-core-1/#functions%E2%91%A0 functions may not specify more than -;; u32::MAX locals. Make sure this holds and does not break. - - -;; This is a validation failure -(assert_invalid - (module binary - "\00asm\01\00\00\00" ;; the header - "\01\04\01" ;; 4 byte type section with 1 element - "\60\00\00" ;; fn() -> () - "\03\02\01" ;; 2 byte func section with 1 element - "\00" ;; signature 0 - "\0a\0a\01" ;; 11 byte code section with 1 element - "\08" ;; 4 bytes for this function - "\01\ff\ff\ff\ff\0f\7f" ;; 1 local block containing 0xffff_ffff locals of type i32 - "\0b" ;; end - ) - "locals exceed maximum" -) - -;; Ensure that we don't hit any panics with > 0xFFFF_FFFF locals. -(assert_invalid - (module binary - "\00asm\01\00\00\00" ;; the header - "\01\04\01" ;; 4 byte type section with 1 element - "\60\00\00" ;; fn() -> () - "\03\02\01" ;; 2 byte func section with 1 element - "\00" ;; signature 0 - "\0a\0c\01" ;; 11 byte code section with 1 element - "\0a" ;; 11 bytes for this function - "\02" ;; 2 local blocks - "\ff\ff\ff\ff\0f\7f" ;; local block containing 0xffff_ffff locals of type i32 - "\7f\7f" ;; local block containing 0x7f locals of type i32 - "\0b" ;; end - ) - "locals exceed maximum" -)