diff --git a/src/picom.c b/src/picom.c index 55c65d2ce2..1fd66b3bae 100644 --- a/src/picom.c +++ b/src/picom.c @@ -1989,6 +1989,17 @@ static bool load_shader_source_for_condition(const c2_lptr_t *cond, void *data) return load_shader_source(data, c2_list_get_data(cond)); } +#ifndef NDEBUG +#define probe __attribute__((noinline)) void +probe xcb_connection_probe(xcb_connection_t *conn) { + __asm__ volatile("" : : "r"(conn)); +} +#undef probe +#else +static inline void xcb_connection_probe(xcb_connection_t *conn attr_unused) { +} +#endif + /** * Initialize a session. * @@ -2088,6 +2099,7 @@ static session_t *session_init(int argc, char **argv, Display *dpy, // Use the same Display across reset, primarily for resource leak checking x_connection_init(&ps->c, dpy); + xcb_connection_probe(ps->c.c); // We store width/height from screen_info instead using them directly because they // can change, see configure_root(). ps->root_width = ps->c.screen_info->width_in_pixels; diff --git a/tracer/.gitignore b/tracer/.gitignore new file mode 100644 index 0000000000..ea8c4bf7f3 --- /dev/null +++ b/tracer/.gitignore @@ -0,0 +1 @@ +/target diff --git a/tracer/Cargo.lock b/tracer/Cargo.lock new file mode 100644 index 0000000000..f1d3398e25 --- /dev/null +++ b/tracer/Cargo.lock @@ -0,0 +1,737 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "anstyle" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" + +[[package]] +name = "anyhow" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bitflags" +version = "2.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_derive" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "flate2" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0c62115964e08cb8039170eb33c1d0e2388a256930279edca206fff675f82c3" + +[[package]] +name = "indexmap" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libbpf-cargo" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a58dfb8194a9b8ff960890a8d71e905b4635aa89c3dad4ce6c1a99c2eec759f" +dependencies = [ + "anyhow", + "cargo_metadata", + "clap", + "libbpf-rs", + "libbpf-sys", + "memmap2", + "num_enum", + "regex", + "scroll", + "scroll_derive", + "semver", + "serde", + "serde_json", + "tempfile", +] + +[[package]] +name = "libbpf-rs" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98e789d5b4dab748a2b8415974bf5b0fc2e9e782d0b118166222195acb4ac3b6" +dependencies = [ + "bitflags", + "lazy_static", + "libbpf-sys", + "libc", + "nix", + "num_enum", + "strum_macros", + "thiserror", + "vsprintf", +] + +[[package]] +name = "libbpf-sys" +version = "1.3.0+v1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6e68987fe8f2dd7b76930f800a4e5b958c766171fc3a7c33dd67c06a0f1e801" +dependencies = [ + "cc", + "nix", + "num_cpus", + "pkg-config", +] + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + +[[package]] +name = "memchr" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" + +[[package]] +name = "memmap2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" +dependencies = [ + "libc", +] + +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "miniz_oxide" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +dependencies = [ + "adler", +] + +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags", + "cfg-if", + "libc", + "memoffset", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "flate2", + "memchr", + "ruzstd", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "picom-tracer" +version = "0.1.0" +dependencies = [ + "libbpf-cargo", + "libbpf-rs", + "object", +] + +[[package]] +name = "pkg-config" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.78" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "rustix" +version = "0.38.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "ruzstd" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" +dependencies = [ + "byteorder", + "derive_more", + "twox-hash", +] + +[[package]] +name = "ryu" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" + +[[package]] +name = "scroll" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" + +[[package]] +name = "scroll_derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "semver" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.196" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.196" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "serde_json" +version = "1.0.113" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +dependencies = [ + "cfg-if", + "fastrand", + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "1.0.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "static_assertions", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "vsprintf" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aec2f81b75ca063294776b4f7e8da71d1d5ae81c2b1b149c8d89969230265d63" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "winnow" +version = "0.5.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5389a154b01683d28c77f8f68f49dea75f0a4da32557a58f68ee51ebba472d29" +dependencies = [ + "memchr", +] diff --git a/tracer/Cargo.toml b/tracer/Cargo.toml new file mode 100644 index 0000000000..3957259c7d --- /dev/null +++ b/tracer/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "picom-tracer" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +libbpf-rs = { version = "0.22.1", default-features = false, features = ["novendor"] } +object = "0.32.2" + +[build-dependencies] +libbpf-cargo = { version = "0.22.1", default-features = false, features = ["novendor"] } diff --git a/tracer/build.rs b/tracer/build.rs new file mode 100644 index 0000000000..ab910bc7dd --- /dev/null +++ b/tracer/build.rs @@ -0,0 +1,24 @@ +use std::env; +use std::env::consts::ARCH; +use std::ffi::OsStr; +use std::path::Path; +use std::path::PathBuf; + +use libbpf_cargo::SkeletonBuilder; + +const SRC: &str = concat!(env!("CARGO_MANIFEST_DIR"),"/src/bpf/uprobe.bpf.c"); + +fn main() { + let mut out = + PathBuf::from(env::var_os("OUT_DIR").expect("OUT_DIR must be set in build script")); + out.push("uprobe.skel.rs"); + SkeletonBuilder::new() + .source(SRC) + .clang_args( + format!("-I{}", Path::new("../vmlinux").join(ARCH).display()) + ) + .build_and_generate(&out) + .unwrap(); + println!("cargo:rerun-if-changed={SRC}"); + println!("cargo:rustc-link-lib=static=elf"); +} diff --git a/tracer/flake.lock b/tracer/flake.lock new file mode 100644 index 0000000000..02415a3170 --- /dev/null +++ b/tracer/flake.lock @@ -0,0 +1,77 @@ +{ + "nodes": { + "fenix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-analyzer-src": "rust-analyzer-src" + }, + "locked": { + "lastModified": 1706941198, + "narHash": "sha256-t6/qloMYdknVJ9a3QzjylQIZnQfgefJ5kMim50B7dwA=", + "owner": "nix-community", + "repo": "fenix", + "rev": "28dbd8b43ea328ee708f7da538c63e03d5ed93c8", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1707546158, + "narHash": "sha256-nYYJTpzfPMDxI8mzhQsYjIUX+grorqjKEU9Np6Xwy/0=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "d934204a0f8d9198e1e4515dd6fec76a139c87f0", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "type": "indirect" + } + }, + "root": { + "inputs": { + "fenix": "fenix", + "nixpkgs": "nixpkgs", + "rust-manifest": "rust-manifest" + } + }, + "rust-analyzer-src": { + "flake": false, + "locked": { + "lastModified": 1706875368, + "narHash": "sha256-KOBXxNurIU2lEmO6lR2A5El32X9x8ITt25McxKZ/Ew0=", + "owner": "rust-lang", + "repo": "rust-analyzer", + "rev": "8f6a72871ec87ed53cfe43a09fb284168a284e7e", + "type": "github" + }, + "original": { + "owner": "rust-lang", + "ref": "nightly", + "repo": "rust-analyzer", + "type": "github" + } + }, + "rust-manifest": { + "flake": false, + "locked": { + "narHash": "sha256-dJzHy2rwdhnnXQ+kZnygagv7i2sesiOPSsuF+/U/PZY=", + "type": "file", + "url": "https://static.rust-lang.org/dist/2024-02-12/channel-rust-nightly.toml" + }, + "original": { + "type": "file", + "url": "https://static.rust-lang.org/dist/2024-02-12/channel-rust-nightly.toml" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/tracer/flake.nix b/tracer/flake.nix new file mode 100644 index 0000000000..0d51c443f1 --- /dev/null +++ b/tracer/flake.nix @@ -0,0 +1,51 @@ +{ + description = "picom tracer"; + inputs.fenix = { + inputs.nixpkgs.follows = "nixpkgs"; + url = github:nix-community/fenix; + }; + inputs.rust-manifest = { + flake = false; + url = "https://static.rust-lang.org/dist/2024-02-12/channel-rust-nightly.toml"; + }; + + outputs = { self, nixpkgs, fenix, ... } @ inputs: let + system = "x86_64-linux"; + pkgs = import nixpkgs { inherit system; overlays = [ fenix.overlays.default ]; }; + rust-toolchain = (pkgs.fenix.fromManifestFile inputs.rust-manifest).withComponents [ + "rustfmt" + "rust-src" + "clippy" + "rustc" + "cargo" + ]; + makeLinuxHeaders = kernel: pkgs.stdenv.mkDerivation { + name = "linux-source-${kernel.version}"; + phases = [ "installPhase" ]; + installPhase = '' + mkdir -p $out/include + kpath=${kernel.dev}/lib/modules/${kernel.version} + cp -rv $kpath/source/include/ $out + chmod +w -R $out + cp -rv $kpath/build/include/* $out/include + chmod +w -R $out + cp -rv $kpath/source/arch/x86/include/* $out/include + chmod +w -R $out + cp -rv $kpath/build/arch/x86/include/generated/* $out/include + ''; + }; + linuxHeaders = makeLinuxHeaders pkgs.linuxPackages_latest.kernel; + in { + packages.${system}.rust-toolchain = rust-toolchain; + devShells.${system}.default = pkgs.stdenv.mkDerivation (with pkgs; { + name = "picom-tracer"; + nativeBuildInputs = [ rust-toolchain bpftool cargo-generate bcc python311 linuxHeaders pkg-config libbpf clang ]; + buildInputs = [ elfutils zlib ]; + shellHook = '' + export PYTHONPATH="$PYTHONPATH:${bcc}/lib/python3.11/site-packages/bcc-${bcc.version}-py3.11.egg" + export BCC_KERNEL_SOURCE="${linuxHeaders}" + export RUSTFLAGS="-L native=${libbpf}/lib -l elf -l z" + ''; + }); + }; +} diff --git a/tracer/src/bpf/uprobe.bpf.c b/tracer/src/bpf/uprobe.bpf.c new file mode 100644 index 0000000000..ff3db02c40 --- /dev/null +++ b/tracer/src/bpf/uprobe.bpf.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2020 Facebook */ +// clang-format off +#include "vmlinux.h" +#include +#include +// clang-format on + +char LICENSE[] SEC("license") = "Dual BSD/GPL"; + +struct xcb_connection_t { + /* This must be the first field; see _xcb_conn_ret_error(). */ + int has_error; + + /* constant data */ + void *setup; + int fd; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u64); + __uint(max_entries, 256); +} my_map SEC(".maps"); + +struct event { + u8 task[TASK_COMM_LEN]; + __u64 delta_us; + pid_t pid; +}; + +struct event _event = {0}; + +SEC("uprobe") +int BPF_KPROBE(uprobe_epoll_wait) { + u64 *curr_pid = bpf_map_lookup_elem(&my_map, (u32[]){0}); + u32 pid = bpf_get_current_pid_tgid() >> 32; + if (!curr_pid || pid != *curr_pid) { + return 0; + } + void **conn_ptr = bpf_map_lookup_elem(&my_map, (u32[]){1}); + if (!conn_ptr) { + return 0; + } + struct xcb_connection_t conn; + if (bpf_probe_read_user(&conn, sizeof(conn), *conn_ptr)) { + bpf_printk("cannot read"); + return 0; + } + + u32 queue_len; + if (bpf_probe_read_user(&queue_len, sizeof(queue_len), (*conn_ptr) + 4212)) { + bpf_printk("cannot read"); + } + + if (queue_len != 0) + bpf_printk("epoll_wait %d %u", conn.fd, queue_len); + return 0; +} + +SEC("uprobe") +int BPF_KPROBE(uprobe_xcb_conn, void *ptr) { + struct xcb_connection_t conn; + u64 pid = bpf_get_current_pid_tgid() >> 32; + bpf_map_update_elem(&my_map, (u32[]){0}, &pid, 0); + bpf_printk("xcb connection is %p", ptr); + if (bpf_probe_read_user(&conn, sizeof(conn), ptr)) { + bpf_printk("cannot read"); + } else { + bpf_map_update_elem(&my_map, (u32[]){1}, &ptr, 0); + bpf_printk("fd is %d", conn.fd); + } + return 0; +} diff --git a/tracer/src/bpf/vmlinux.h b/tracer/src/bpf/vmlinux.h new file mode 100644 index 0000000000..a78a51e011 --- /dev/null +++ b/tracer/src/bpf/vmlinux.h @@ -0,0 +1,96533 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push(__attribute__((preserve_access_index)), apply_to = record) +#endif + +struct pt_regs { + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long bp; + unsigned long bx; + unsigned long r11; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long orig_ax; + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; +}; + +typedef unsigned short __u16; + +typedef __u16 u16; + +typedef unsigned long long __u64; + +typedef __u64 u64; + +struct saved_msr; + +struct saved_msrs { + unsigned int num; + struct saved_msr *array; +}; + +struct desc_ptr { + unsigned short size; + unsigned long address; +} __attribute__((packed)); + +typedef _Bool bool; + +struct saved_context { + struct pt_regs regs; + u16 ds; + u16 es; + u16 fs; + u16 gs; + unsigned long kernelmode_gs_base; + unsigned long usermode_gs_base; + unsigned long fs_base; + unsigned long cr0; + unsigned long cr2; + unsigned long cr3; + unsigned long cr4; + u64 misc_enable; + struct saved_msrs saved_msrs; + unsigned long efer; + u16 gdt_pad; + struct desc_ptr gdt_desc; + u16 idt_pad; + struct desc_ptr idt; + u16 ldt; + u16 tss; + unsigned long tr; + unsigned long safety; + unsigned long return_address; + bool misc_enable_saved; +} __attribute__((packed)); + +typedef unsigned int __u32; + +typedef __u32 u32; + +struct msr { + union { + struct { + u32 l; + u32 h; + }; + u64 q; + }; +}; + +struct msr_info { + u32 msr_no; + struct msr reg; + struct msr *msrs; + int err; +}; + +struct saved_msr { + bool valid; + struct msr_info info; +}; + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, unsigned long, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block __attribute__((btf_type_tag("rcu"))) * next; + int priority; +}; + +struct dmi_strmatch { + unsigned char slot : 7; + unsigned char exact_match : 1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; + +typedef unsigned long kernel_ulong_t; + +struct x86_cpu_id { + __u16 vendor; + __u16 family; + __u16 model; + __u16 steppings; + __u16 feature; + kernel_ulong_t driver_data; +}; + +enum { + false = 0, + true = 1, +}; + +enum { + DESC_TSS = 9, + DESC_LDT = 2, + DESCTYPE_S = 16, +}; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +enum fault_flag { + FAULT_FLAG_WRITE = 1, + FAULT_FLAG_MKWRITE = 2, + FAULT_FLAG_ALLOW_RETRY = 4, + FAULT_FLAG_RETRY_NOWAIT = 8, + FAULT_FLAG_KILLABLE = 16, + FAULT_FLAG_TRIED = 32, + FAULT_FLAG_USER = 64, + FAULT_FLAG_REMOTE = 128, + FAULT_FLAG_INSTRUCTION = 256, + FAULT_FLAG_INTERRUPTIBLE = 512, + FAULT_FLAG_UNSHARE = 1024, + FAULT_FLAG_ORIG_PTE_VALID = 2048, + FAULT_FLAG_VMA_LOCK = 4096, +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT = 2, + MEMORY_DEVICE_FS_DAX = 3, + MEMORY_DEVICE_GENERIC = 4, + MEMORY_DEVICE_PCI_P2PDMA = 5, +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +enum class_map_type { + DD_CLASS_TYPE_DISJOINT_BITS = 0, + DD_CLASS_TYPE_LEVEL_NUM = 1, + DD_CLASS_TYPE_DISJOINT_NAMES = 2, + DD_CLASS_TYPE_LEVEL_NAMES = 3, +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +enum rpm_status { + RPM_INVALID = -1, + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +enum device_physical_location_panel { + DEVICE_PANEL_TOP = 0, + DEVICE_PANEL_BOTTOM = 1, + DEVICE_PANEL_LEFT = 2, + DEVICE_PANEL_RIGHT = 3, + DEVICE_PANEL_FRONT = 4, + DEVICE_PANEL_BACK = 5, + DEVICE_PANEL_UNKNOWN = 6, +}; + +enum device_physical_location_vertical_position { + DEVICE_VERT_POS_UPPER = 0, + DEVICE_VERT_POS_CENTER = 1, + DEVICE_VERT_POS_LOWER = 2, +}; + +enum device_physical_location_horizontal_position { + DEVICE_HORI_POS_LEFT = 0, + DEVICE_HORI_POS_CENTER = 1, + DEVICE_HORI_POS_RIGHT = 2, +}; + +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, + DEVICE_REMOVABLE_UNKNOWN = 1, + DEVICE_FIXED = 2, + DEVICE_REMOVABLE = 3, +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +enum freeze_holder { + FREEZE_HOLDER_KERNEL = 1, + FREEZE_HOLDER_USERSPACE = 2, +}; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +enum zone_type { + ZONE_DMA = 0, + ZONE_DMA32 = 1, + ZONE_NORMAL = 2, + ZONE_MOVABLE = 3, + ZONE_DEVICE = 4, + __MAX_NR_ZONES = 5, +}; + +typedef unsigned short umode_t; + +typedef unsigned long __kernel_ulong_t; + +typedef __kernel_ulong_t __kernel_size_t; + +typedef __kernel_size_t size_t; + +struct ctl_table; + +typedef long long __kernel_loff_t; + +typedef __kernel_loff_t loff_t; + +typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + enum { + SYSCTL_TABLE_TYPE_DEFAULT = 0, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, + } type; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +typedef struct { + int counter; +} atomic_t; + +typedef unsigned char __u8; + +typedef __u8 u8; + +struct qspinlock { + union { + atomic_t val; + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; + }; +}; + +typedef struct qspinlock arch_spinlock_t; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, + MIGRATE_SYNC_NO_COPY = 3, +}; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +enum vtime_state { + VTIME_INACTIVE = 0, + VTIME_IDLE = 1, + VTIME_SYS = 2, + VTIME_USER = 3, + VTIME_GUEST = 4, +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +enum ftrace_ops_cmd { + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF = 0, + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER = 1, + FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER = 2, +}; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_RANDOM_START = 0, + KMALLOC_RANDOM_END = 15, + KMALLOC_RECLAIM = 16, + KMALLOC_DMA = 17, + KMALLOC_CGROUP = 18, + NR_KMALLOC_TYPES = 19, +}; + +struct desc_struct { + u16 limit0; + u16 base0; + u16 base1 : 8; + u16 type : 4; + u16 s : 1; + u16 dpl : 2; + u16 p : 1; + u16 limit1 : 4; + u16 avl : 1; + u16 l : 1; + u16 d : 1; + u16 g : 1; + u16 base2 : 8; +}; + +struct gdt_page { + struct desc_struct gdt[16]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct cpuinfo_topology { + u32 apicid; + u32 initial_apicid; + u32 pkg_id; + u32 die_id; + u32 cu_id; + u32 core_id; + u32 logical_pkg_id; + u32 logical_die_id; + u32 llc_id; + u32 l2c_id; +}; + +struct cpuinfo_x86 { + __u8 x86; + __u8 x86_vendor; + __u8 x86_model; + __u8 x86_stepping; + int x86_tlbsize; + __u8 x86_virt_bits; + __u8 x86_phys_bits; + __u8 x86_coreid_bits; + __u32 extended_cpuid_level; + int cpuid_level; + union { + __u32 x86_capability[23]; + unsigned long x86_capability_alignment; + }; + char x86_vendor_id[16]; + char x86_model_id[64]; + struct cpuinfo_topology topo; + unsigned int x86_cache_size; + int x86_cache_alignment; + int x86_cache_max_rmid; + int x86_cache_occ_scale; + int x86_cache_mbm_width_offset; + int x86_power; + unsigned long loops_per_jiffy; + u64 ppin; + u16 x86_max_cores; + u16 x86_clflush_size; + u16 booted_cores; + u16 cpu_index; + bool smt_active; + u32 microcode; + u8 x86_cache_bits; + unsigned int initialized : 1; +}; + +struct thread_info { + unsigned long flags; + unsigned long syscall_work; + u32 status; + u32 cpu; +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; + u16 src; + u16 dst; +}; + +typedef long long __s64; + +typedef __s64 s64; + +struct sched_statistics { + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + u64 block_start; + u64 block_max; + s64 sum_block_runtime; + u64 exec_max; + u64 slice_max; + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct hlist_node; + +struct hlist_head { + struct hlist_node *first; +}; + +struct cpumask { + unsigned long bits[1]; +}; + +typedef struct cpumask cpumask_t; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + unsigned long pcount; + unsigned long long run_delay; + unsigned long long last_arrival; + unsigned long long last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +typedef int __kernel_clockid_t; + +typedef __kernel_clockid_t clockid_t; + +struct __kernel_timespec; + +struct old_timespec32; + +struct pollfd; + +struct restart_block { + unsigned long arch_data; + long (*fn)(struct restart_block *); + union { + struct { + u32 __attribute__((btf_type_tag("user"))) * uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __attribute__((btf_type_tag("user"))) * uaddr2; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec + __attribute__((btf_type_tag("user"))) * + rmtp; + struct old_timespec32 + __attribute__((btf_type_tag("user"))) * + compat_rmtp; + }; + u64 expires; + } nanosleep; + struct { + struct pollfd __attribute__((btf_type_tag("user"))) * ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +typedef int __kernel_pid_t; + +typedef __kernel_pid_t pid_t; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; +}; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +struct vtime { + seqcount_t seqcount; + unsigned long long starttime; + enum vtime_state state; + unsigned int cpu; + u64 utime; + u64 stime; + u64 gtime; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +typedef struct { + s64 counter; +} atomic64_t; + +typedef atomic64_t atomic_long_t; + +struct optimistic_spin_queue { + atomic_t tail; +}; + +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; + struct optimistic_spin_queue osq; + struct list_head wait_list; +}; + +struct posix_cputimers_work { + struct callback_head work; + struct mutex mutex; + unsigned int scheduled; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + unsigned long sig[1]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +typedef unsigned int __kernel_uid32_t; + +typedef __kernel_uid32_t uid_t; + +typedef struct { + uid_t val; +} kuid_t; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct syscall_user_dispatch { + char __attribute__((btf_type_tag("user"))) * selector; + unsigned long offset; + unsigned long len; + bool on_dispatch; +}; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + unsigned long bits[1]; +} nodemask_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +struct arch_tlbflush_unmap_batch { + struct cpumask cpumask; +}; + +struct tlbflush_unmap_batch { + struct arch_tlbflush_unmap_batch arch; + bool flush_required; + bool writable; +}; + +struct page; + +struct page_frag { + struct page *page; + __u32 offset; + __u32 size; +}; + +struct latency_record { + unsigned long backtrace[12]; + unsigned int count; + unsigned long time; + unsigned long max; +}; + +typedef unsigned int gfp_t; + +struct kmap_ctrl {}; + +struct timer_list { + struct hlist_node entry; + unsigned long expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct llist_head { + struct llist_node *first; +}; + +struct thread_shstk { + u64 base; + u64 size; +}; + +struct fpu_state_perm { + u64 __state_perm; + unsigned int __state_size; + unsigned int __user_state_size; +}; + +struct fregs_state { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; + u32 status; +}; + +struct fxregs_state { + u16 cwd; + u16 swd; + u16 twd; + u16 fop; + union { + struct { + u64 rip; + u64 rdp; + }; + struct { + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + }; + }; + u32 mxcsr; + u32 mxcsr_mask; + u32 st_space[32]; + u32 xmm_space[64]; + u32 padding[12]; + union { + u32 padding1[12]; + u32 sw_reserved[12]; + }; +}; + +struct math_emu_info; + +struct swregs_state { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; + u8 ftop; + u8 changed; + u8 lookahead; + u8 no_update; + u8 rm; + u8 alimit; + struct math_emu_info *info; + u32 entry_eip; +}; + +struct xstate_header { + u64 xfeatures; + u64 xcomp_bv; + u64 reserved[6]; +}; + +struct xregs_state { + struct fxregs_state i387; + struct xstate_header header; + u8 extended_state_area[0]; +}; + +union fpregs_state { + struct fregs_state fsave; + struct fxregs_state fxsave; + struct swregs_state soft; + struct xregs_state xsave; + u8 __padding[4096]; +}; + +struct fpstate { + unsigned int size; + unsigned int user_size; + u64 xfeatures; + u64 user_xfeatures; + u64 xfd; + unsigned int is_valloc : 1; + unsigned int is_guest : 1; + unsigned int is_confidential : 1; + unsigned int in_use : 1; + long : 64; + long : 64; + long : 64; + union fpregs_state regs; +}; + +struct fpu { + unsigned int last_cpu; + unsigned long avx512_timestamp; + struct fpstate *fpstate; + struct fpstate *__task_fpstate; + struct fpu_state_perm perm; + struct fpu_state_perm guest_perm; + struct fpstate __fpstate; +}; + +struct perf_event; + +struct io_bitmap; + +struct thread_struct { + struct desc_struct tls_array[3]; + unsigned long sp; + unsigned short es; + unsigned short ds; + unsigned short fsindex; + unsigned short gsindex; + unsigned long fsbase; + unsigned long gsbase; + struct perf_event *ptrace_bps[4]; + unsigned long virtual_dr6; + unsigned long ptrace_dr7; + unsigned long cr2; + unsigned long trap_nr; + unsigned long error_code; + struct io_bitmap *io_bitmap; + unsigned long iopl_emul; + unsigned int iopl_warn : 1; + unsigned int sig_on_uaccess_err : 1; + u32 pkru; + unsigned long features; + unsigned long features_locked; + struct thread_shstk shstk; + long : 64; + struct fpu fpu; +}; + +struct task_group; + +struct rcu_node; + +struct mm_struct; + +struct address_space; + +struct pid; + +struct completion; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct audit_context; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct compat_robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct mempolicy; + +struct rseq; + +struct pipe_inode_info; + +struct task_delay_info; + +struct ftrace_ret_stack; + +struct mem_cgroup; + +struct obj_cgroup; + +struct gendisk; + +struct uprobe_task; + +struct vm_struct; + +struct bpf_local_storage; + +struct bpf_run_ctx; + +struct task_struct { + struct thread_info thread_info; + unsigned int __state; + unsigned int saved_state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + struct __call_single_node wake_entry; + int on_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + u64 last_ran; + s64 time_slice; + int sq_idx; + struct list_head sq_node; + int boost_prio; + u64 sched_time; + struct task_group *sched_task_group; + long : 64; + long : 64; + long : 64; + long : 64; + struct sched_statistics stats; + struct hlist_head preempt_notifiers; + unsigned int btrace_seq; + unsigned int policy; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + unsigned short migration_disabled; + unsigned short migration_flags; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; + unsigned long rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + unsigned long jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork : 1; + unsigned int sched_contributes_to_load : 1; + unsigned int sched_migrated : 1; + long : 29; + unsigned int sched_remote_wakeup : 1; + unsigned int sched_rt_mutex : 1; + unsigned int in_execve : 1; + unsigned int in_iowait : 1; + unsigned int restore_sigmask : 1; + unsigned int in_user_fault : 1; + unsigned int in_lru_fault : 1; + unsigned int no_cgroup_migration : 1; + unsigned int frozen : 1; + unsigned int use_memdelay : 1; + unsigned int in_eventfd : 1; + unsigned int in_thrashing : 1; + unsigned long atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + unsigned long stack_canary; + struct task_struct __attribute__((btf_type_tag("rcu"))) * real_parent; + struct task_struct __attribute__((btf_type_tag("rcu"))) * parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_node; + struct completion *vfork_done; + int __attribute__((btf_type_tag("user"))) * set_child_tid; + int __attribute__((btf_type_tag("user"))) * clear_child_tid; + void *worker_private; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + struct vtime vtime; + atomic_t tick_dep_mask; + unsigned long nvcsw; + unsigned long nivcsw; + u64 start_time; + u64 start_boottime; + unsigned long min_flt; + unsigned long maj_flt; + struct posix_cputimers posix_cputimers; + struct posix_cputimers_work posix_cputimers_work; + const struct cred __attribute__((btf_type_tag("rcu"))) * ptracer_cred; + const struct cred __attribute__((btf_type_tag("rcu"))) * real_cred; + const struct cred __attribute__((btf_type_tag("rcu"))) * cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + unsigned long last_switch_count; + unsigned long last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct __attribute__((btf_type_tag("rcu"))) * sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct audit_context *audit_context; + kuid_t loginuid; + unsigned int sessionid; + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct io_context *io_context; + struct capture_control *capture_control; + unsigned long ptrace_message; + kernel_siginfo_t *last_siginfo; + struct task_io_accounting ioac; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; + struct css_set __attribute__((btf_type_tag("rcu"))) * cgroups; + struct list_head cg_list; + struct robust_list_head __attribute__((btf_type_tag("user"))) * robust_list; + struct compat_robust_list_head __attribute__((btf_type_tag("user"))) * compat_robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct mempolicy *mempolicy; + short il_prev; + short pref_node_fork; + struct rseq __attribute__((btf_type_tag("user"))) * rseq; + u32 rseq_len; + u32 rseq_sig; + unsigned long rseq_event_mask; + int mm_cid; + int last_mm_cid; + int migrate_from_cpu; + int mm_cid_active; + struct callback_head cid_work; + struct tlbflush_unmap_batch tlb_ubc; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + unsigned long dirty_paused_when; + int latency_record_count; + struct latency_record latency_record[32]; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + int curr_ret_stack; + int curr_ret_depth; + struct ftrace_ret_stack *ret_stack; + unsigned long long ftrace_timestamp; + atomic_t trace_overrun; + atomic_t tracing_graph_pause; + unsigned long trace_recursion; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct obj_cgroup *objcg; + struct gendisk *throttle_disk; + struct uprobe_task *utask; + struct kmap_ctrl kmap_ctrl; + struct callback_head rcu; + refcount_t rcu_users; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; + struct vm_struct *stack_vm_area; + refcount_t stack_refcount; + struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) * bpf_storage; + struct bpf_run_ctx *bpf_ctx; + void __attribute__((btf_type_tag("user"))) * mce_vaddr; + __u64 mce_kflags; + u64 mce_addr; + __u64 mce_ripv : 1; + __u64 mce_whole_page : 1; + __u64 __mce_reserved : 62; + struct callback_head mce_kill_me; + int mce_count; + struct llist_head kretprobe_instances; + struct llist_head rethooks; + struct callback_head l1d_flush_kill; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct thread_struct thread; +}; + +typedef struct { +} lockdep_map_p; + +struct maple_tree { + union { + spinlock_t ma_lock; + lockdep_map_p ma_external_lock; + }; + unsigned int ma_flags; + void __attribute__((btf_type_tag("rcu"))) * ma_root; +}; + +typedef unsigned long pgdval_t; + +typedef struct { + pgdval_t pgd; +} pgd_t; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + struct optimistic_spin_queue osq; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +typedef int __s32; + +typedef __s32 s32; + +struct percpu_counter { + raw_spinlock_t lock; + s64 count; + struct list_head list; + s32 __attribute__((btf_type_tag("percpu"))) * counters; +}; + +typedef short __s16; + +typedef __s16 s16; + +struct ldt_struct; + +struct vdso_image; + +typedef struct { + u64 ctx_id; + atomic64_t tlb_gen; + struct rw_semaphore ldt_usr_sem; + struct ldt_struct *ldt; + unsigned long flags; + unsigned long lam_cr3_mask; + u64 untag_mask; + struct mutex lock; + void __attribute__((btf_type_tag("user"))) * vdso; + const struct vdso_image *vdso_image; + atomic_t perf_rdpmc_allowed; + u16 pkey_allocation_map; + s16 execute_only_pkey; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +struct file; + +struct mm_cid; + +struct linux_binfmt; + +struct kioctx_table; + +struct user_namespace; + +struct mmu_notifier_subscriptions; + +struct mm_struct { + struct { + struct { + atomic_t mm_count; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + }; + struct maple_tree mm_mt; + unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); + unsigned long mmap_base; + unsigned long mmap_legacy_base; + unsigned long mmap_compat_base; + unsigned long mmap_compat_legacy_base; + unsigned long task_size; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + struct mm_cid __attribute__((btf_type_tag("percpu"))) * pcpu_cid; + unsigned long mm_cid_next_scan; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + int mm_lock_seq; + unsigned long hiwater_rss; + unsigned long hiwater_vm; + unsigned long total_vm; + unsigned long locked_vm; + atomic64_t pinned_vm; + unsigned long data_vm; + unsigned long exec_vm; + unsigned long stack_vm; + unsigned long def_flags; + seqcount_t write_protect_seq; + spinlock_t arg_lock; + unsigned long start_code; + unsigned long end_code; + unsigned long start_data; + unsigned long end_data; + unsigned long start_brk; + unsigned long brk; + unsigned long start_stack; + unsigned long arg_start; + unsigned long arg_end; + unsigned long env_start; + unsigned long env_end; + unsigned long saved_auxv[52]; + struct percpu_counter rss_stat[4]; + struct linux_binfmt *binfmt; + mm_context_t context; + unsigned long flags; + spinlock_t ioctx_lock; + struct kioctx_table __attribute__((btf_type_tag("rcu"))) * ioctx_table; + struct task_struct __attribute__((btf_type_tag("rcu"))) * owner; + struct user_namespace *user_ns; + struct file __attribute__((btf_type_tag("rcu"))) * exe_file; + struct mmu_notifier_subscriptions *notifier_subscriptions; + atomic_t tlb_flush_pending; + atomic_t tlb_flush_batched; + struct uprobes_state uprobes_state; + atomic_long_t hugetlb_usage; + struct work_struct async_put_work; + unsigned long ksm_merging_pages; + unsigned long ksm_rmap_items; + unsigned long ksm_zero_pages; + struct { + struct list_head list; + unsigned long bitmap; + struct mem_cgroup *memcg; + } lru_gen; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + }; + unsigned long cpu_bitmap[0]; +}; + +typedef unsigned int fmode_t; + +struct qrwlock { + union { + atomic_t cnts; + struct { + u8 wlocked; + u8 __lstate[3]; + }; + }; + arch_spinlock_t wait_lock; +}; + +typedef struct qrwlock arch_rwlock_t; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +struct fown_struct { + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +struct file_ra_state { + unsigned long start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; +}; + +struct vfsmount; + +struct dentry; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +typedef u32 errseq_t; + +struct inode; + +struct file_operations; + +struct file { + union { + struct llist_node f_llist; + struct callback_head f_rcuhead; + unsigned int f_iocb_flags; + }; + spinlock_t f_lock; + fmode_t f_mode; + atomic_long_t f_count; + struct mutex f_pos_lock; + loff_t f_pos; + unsigned int f_flags; + struct fown_struct f_owner; + const struct cred *f_cred; + struct file_ra_state f_ra; + struct path f_path; + struct inode *f_inode; + const struct file_operations *f_op; + u64 f_version; + void *private_data; + struct hlist_head *f_ep; + struct address_space *f_mapping; + errseq_t f_wb_err; + errseq_t f_sb_err; +}; + +struct pid_namespace; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[0]; +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void __attribute__((btf_type_tag("rcu"))) * xa_head; +}; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct proc_ns_operations; + +struct ns_common { + atomic_long_t stashed; + const struct proc_ns_operations *ops; + unsigned int inum; + refcount_t count; +}; + +struct kmem_cache; + +struct fs_pin; + +struct ucounts; + +struct pid_namespace { + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; + int memfd_noexec_scope; +}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + u32 nr_extents; + union { + struct uid_gid_extent extent[5]; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +typedef unsigned int __kernel_gid32_t; + +typedef __kernel_gid32_t gid_t; + +typedef struct { + gid_t val; +} kgid_t; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + struct ctl_table *ctl_table; + int ctl_table_size; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct binfmt_misc; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + unsigned long flags; + bool parent_could_setfcap; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct key *persistent_keyring_register; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + long ucount_max[12]; + long rlimit_max[4]; + struct binfmt_misc *binfmt_misc; +}; + +struct nsset; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common *(*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace *(*owner)(struct ns_common *); + struct ns_common *(*get_parent)(struct ns_common *); +}; + +typedef __s64 time64_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + unsigned long hash; + union { + struct { + u16 desc_len; + char desc[6]; + }; + unsigned long x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + unsigned long nr_leaves_on_tree; +}; + +union key_payload { + void __attribute__((btf_type_tag("rcu"))) * rcu_data0; + void *data[4]; +}; + +typedef s32 int32_t; + +typedef int32_t key_serial_t; + +typedef u32 uint32_t; + +typedef uint32_t key_perm_t; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct rw_semaphore sem; + struct key_user *user; + void *security; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + unsigned short quotalen; + unsigned short datalen; + short state; + unsigned long flags; + union { + struct keyring_index_key index_key; + struct { + unsigned long hash; + unsigned long len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, + const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct lock_class_key {}; + +struct key_preparsed_payload; + +struct key_match_data; + +struct seq_file; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction *(*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set *(*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, + kgid_t *); + int (*permissions)(struct ctl_table_header *, struct ctl_table *); +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + atomic_t count; + atomic_long_t ucount[12]; + atomic_long_t rlimit[4]; +}; + +typedef struct { + u64 val; +} kernel_cap_t; + +struct user_struct; + +struct group_info; + +struct cred { + atomic_long_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + struct user_struct *user; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; +}; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + unsigned long begin; + unsigned long flags; +}; + +struct user_struct { + refcount_t __count; + struct percpu_counter epoll_watches; + unsigned long unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + atomic_t nr_watches; + struct ratelimit_state ratelimit; +}; + +struct group_info { + refcount_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct super_block; + +struct mnt_idmap; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; + struct mnt_idmap *mnt_idmap; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct qstr { + union { + struct { + u32 hash; + u32 len; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + +struct lockref { + union { + __u64 lock_count; + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[32]; + struct lockref d_lockref; + const struct dentry_operations *d_op; + struct super_block *d_sb; + unsigned long d_time; + void *d_fsdata; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct list_head d_child; + struct list_head d_subdirs; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; +}; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +struct timespec64 { + time64_t tv_sec; + long tv_nsec; +}; + +typedef u64 blkcnt_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + struct rw_semaphore invalidate_lock; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + unsigned long nrpages; + unsigned long writeback_index; + const struct address_space_operations *a_ops; + unsigned long flags; + struct rw_semaphore i_mmap_rwsem; + errseq_t wb_err; + spinlock_t private_lock; + struct list_head private_list; + void *private_data; +}; + +struct posix_acl; + +struct inode_operations; + +struct bdi_writeback; + +struct file_lock_context; + +struct cdev; + +struct fsnotify_mark_connector; + +struct inode { + umode_t i_mode; + unsigned short i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + unsigned long i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + struct timespec64 __i_atime; + struct timespec64 __i_mtime; + struct timespec64 __i_ctime; + spinlock_t i_lock; + unsigned short i_bytes; + u8 i_blkbits; + u8 i_write_hint; + blkcnt_t i_blocks; + unsigned long i_state; + struct rw_semaphore i_rwsem; + unsigned long dirtied_when; + unsigned long dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_generation; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) * i_fsnotify_marks; + void *i_private; +}; + +typedef long __kernel_long_t; + +typedef __kernel_long_t __kernel_ssize_t; + +typedef __kernel_ssize_t ssize_t; + +struct delayed_call; + +struct iattr; + +struct kstat; + +struct fiemap_extent_info; + +struct fileattr; + +struct offset_ctx; + +struct inode_operations { + struct dentry *(*lookup)(struct inode *, struct dentry *, unsigned int); + const char *(*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct mnt_idmap *, struct inode *, int); + struct posix_acl *(*get_inode_acl)(struct inode *, int, bool); + int (*readlink)(struct dentry *, char __attribute__((btf_type_tag("user"))) *, int); + int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); + int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, + struct dentry *, unsigned int); + int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); + int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, + unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, + umode_t); + int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); + struct posix_acl *(*get_acl)(struct mnt_idmap *, struct dentry *, int); + int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); + int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); + int (*fileattr_get)(struct dentry *, struct fileattr *); + struct offset_ctx *(*get_offset_ctx)(struct inode *); + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; + u32 dio_mem_align; + u32 dio_offset_align; + u64 change_cookie; +}; + +struct offset_ctx { + struct xarray xa; + u32 next_offset; +}; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct mtd_info; + +typedef long long qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + unsigned long dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct __attribute__((btf_type_tag("rcu"))) * task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int __attribute__((btf_type_tag("percpu"))) * read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + unsigned short frozen; + unsigned short freeze_holders; + struct percpu_rw_semaphore rw_sem[3]; +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; + struct xarray xa; +}; + +struct file_system_type; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct unicode_map; + +struct block_device; + +struct bdev_handle; + +struct backing_dev_info; + +struct shrinker; + +struct workqueue_struct; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + unsigned long s_blocksize; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + unsigned long s_flags; + unsigned long s_iflags; + unsigned long s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + const struct xattr_handler *const *s_xattr; + struct unicode_map *s_encoding; + __u16 s_encoding_flags; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct bdev_handle *s_bdev_handle; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + __u32 s_fsnotify_mask; + struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) * s_fsnotify_marks; + char s_id[32]; + uuid_t s_uuid; + unsigned int s_max_links; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + struct shrinker *s_shrink; + atomic_long_t s_remove_count; + atomic_long_t s_fsnotify_connectors; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + long : 64; + long : 64; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long : 64; + long : 64; +}; + +struct fs_context; + +struct fs_parameter_spec; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry *(*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct p_log; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, + struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + unsigned short flags; + const void *data; +}; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kernfs_node; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + const struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized : 1; + unsigned int state_in_sysfs : 1; + unsigned int state_add_uevent_sent : 1; + unsigned int state_remove_uevent_sent : 1; + unsigned int uevent_suppress : 1; +}; + +struct module_param_attrs; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_memory { + void *base; + unsigned int size; + struct mod_tree_node mtn; +}; + +struct orc_entry; + +struct mod_arch_specific { + unsigned int num_orcs; + int *orc_unwind_ip; + struct orc_entry *orc_unwind; +}; + +struct elf64_sym; + +typedef struct elf64_sym Elf64_Sym; + +struct mod_kallsyms { + Elf64_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +typedef const int tracepoint_ptr_t; + +struct _ddebug; + +struct ddebug_class_map; + +struct _ddebug_info { + struct _ddebug *descs; + struct ddebug_class_map *classes; + unsigned int num_descs; + unsigned int num_classes; +}; + +struct module_attribute; + +struct kernel_symbol; + +struct kernel_param; + +struct exception_table_entry; + +struct bug_entry; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct srcu_struct; + +struct bpf_raw_event_map; + +struct jump_entry; + +struct trace_event_call; + +struct trace_eval_map; + +struct static_call_site; + +struct module { + enum module_state state; + struct list_head list; + char name[56]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + bool async_probe_requested; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + struct module_memory mem[7]; + struct mod_arch_specific arch; + unsigned long taints; + unsigned int num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + struct mod_kallsyms __attribute__((btf_type_tag("rcu"))) * kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void __attribute__((btf_type_tag("percpu"))) * percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + unsigned int btf_data_size; + void *btf_data; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + unsigned int num_ftrace_callsites; + unsigned long *ftrace_callsites; + void *kprobes_text_start; + unsigned int kprobes_text_size; + unsigned long *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + int num_static_call_sites; + struct static_call_site *static_call_sites; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + struct _ddebug_info dyndbg_info; +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_uevent_env; + +struct kset_uevent_ops { + int (*const filter)(const struct kobject *); + const char *(*const name)(const struct kobject *); + int (*const uevent)(const struct kobject *, struct kobj_uevent_env *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct sysfs_ops; + +struct attribute_group; + +struct kobj_ns_type_operations; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *); + const void *(*namespace)(const struct kobject *); + void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); +}; + +struct attribute; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct bin_attribute; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); + struct attribute **attrs; + struct bin_attribute **bin_attrs; +}; + +struct vm_area_struct; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + struct address_space *(*f_mapping)(); + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, + loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, + loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *, loff_t, int); + int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, + struct vm_area_struct *); +}; + +typedef u64 sector_t; + +struct writeback_control; + +struct folio; + +struct readahead_control; + +struct kiocb; + +struct iov_iter; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*read_folio)(struct file *, struct folio *); + int (*writepages)(struct address_space *, struct writeback_control *); + bool (*dirty_folio)(struct address_space *, struct folio *); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, + struct page **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, + unsigned int, struct page *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio)(struct folio *, size_t, size_t); + bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, + enum migrate_mode); + int (*launder_folio)(struct folio *); + bool (*is_partially_uptodate)(struct folio *, size_t, size_t); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); + int (*error_remove_page)(struct address_space *, struct page *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); + int (*swap_rw)(struct kiocb *, struct iov_iter *); +}; + +struct page_pool; + +struct dev_pagemap; + +struct page { + unsigned long flags; + union { + struct { + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + struct list_head buddy_list; + struct list_head pcp_list; + }; + struct address_space *mapping; + union { + unsigned long index; + unsigned long share; + }; + unsigned long private; + }; + struct { + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; + unsigned long dma_addr; + atomic_long_t pp_frag_count; + }; + struct { + unsigned long compound_head; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + atomic_t _mapcount; + unsigned int page_type; + }; + atomic_t _refcount; + unsigned long memcg_data; +}; + +struct range { + u64 start; + u64 end; +}; + +struct vmem_altmap { + unsigned long base_pfn; + const unsigned long end_pfn; + const unsigned long reserve; + unsigned long free; + unsigned long align; + unsigned long alloc; +}; + +struct percpu_ref_data; + +struct percpu_ref { + unsigned long percpu_count_ptr; + struct percpu_ref_data *data; +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref ref; + struct completion done; + enum memory_type type; + unsigned int flags; + unsigned long vmemmap_shift; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + union { + struct range range; + struct { + struct { + } __empty_ranges; + struct range ranges[0]; + }; + }; +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic : 1; + bool allow_reinit : 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +typedef unsigned int vm_fault_t; + +struct vm_fault; + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); + int (*memory_failure)(struct dev_pagemap *, unsigned long, unsigned long, int); +}; + +typedef unsigned long pteval_t; + +typedef struct { + pteval_t pte; +} pte_t; + +typedef unsigned long pmdval_t; + +typedef struct { + pmdval_t pmd; +} pmd_t; + +typedef unsigned long pudval_t; + +typedef struct { + pudval_t pud; +} pud_t; + +typedef struct page *pgtable_t; + +struct vm_fault { + struct { + struct vm_area_struct *vma; + gfp_t gfp_mask; + unsigned long pgoff; + unsigned long address; + unsigned long real_address; + }; + enum fault_flag flags; + pmd_t *pmd; + pud_t *pud; + union { + pte_t orig_pte; + pmd_t orig_pmd; + }; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +typedef unsigned long vm_flags_t; + +typedef unsigned long pgprotval_t; + +struct pgprot { + pgprotval_t pgprot; +}; + +typedef struct pgprot pgprot_t; + +struct userfaultfd_ctx; + +struct vm_userfaultfd_ctx { + struct userfaultfd_ctx *ctx; +}; + +struct vma_lock; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + union { + struct { + unsigned long vm_start; + unsigned long vm_end; + }; + struct callback_head vm_rcu; + }; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + union { + const vm_flags_t vm_flags; + vm_flags_t __vm_flags; + }; + int vm_lock_seq; + struct vma_lock *vm_lock; + bool detached; + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + unsigned long vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct mempolicy *vm_policy; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +struct vma_lock { + struct rw_semaphore lock; +}; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*may_split)(struct vm_area_struct *, unsigned long); + int (*mremap)(struct vm_area_struct *); + int (*mprotect)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); + vm_fault_t (*map_pages)(struct vm_fault *, unsigned long, unsigned long); + unsigned long (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); + const char *(*name)(struct vm_area_struct *); + int (*set_policy)(struct vm_area_struct *, struct mempolicy *); + struct mempolicy *(*get_policy)(struct vm_area_struct *, unsigned long, + unsigned long *); + struct page *(*find_special_page)(struct vm_area_struct *, unsigned long); +}; + +struct swap_iocb; + +struct writeback_control { + long nr_to_write; + long pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate : 1; + unsigned int for_background : 1; + unsigned int tagged_writepages : 1; + unsigned int for_reclaim : 1; + unsigned int range_cyclic : 1; + unsigned int for_sync : 1; + unsigned int unpinned_fscache_wb : 1; + unsigned int no_cgroup_owner : 1; + struct swap_iocb **swap_plug; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct cgroup_subsys_state; + +struct bdi_writeback { + struct backing_dev_info *bdi; + unsigned long state; + unsigned long last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + atomic_t writeback_inodes; + struct percpu_counter stat[4]; + unsigned long bw_time_stamp; + unsigned long dirtied_stamp; + unsigned long written_stamp; + unsigned long write_bandwidth; + unsigned long avg_write_bandwidth; + unsigned long dirty_ratelimit; + unsigned long balanced_dirty_ratelimit; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + struct delayed_work bw_dwork; + unsigned long dirty_sleep; + struct list_head bdi_node; + struct percpu_ref refcnt; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + struct list_head b_attached; + struct list_head offline_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +struct device; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + unsigned long ra_pages; + unsigned long io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head defer_sync; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +typedef s64 ktime_t; + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; + +struct wakeup_source; + +struct wake_irq; + +struct pm_subsys_data; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + unsigned int can_wakeup : 1; + unsigned int async_suspend : 1; + bool in_dpm_list : 1; + bool is_prepared : 1; + bool is_suspended : 1; + bool is_noirq_suspended : 1; + bool is_late_suspended : 1; + bool no_pm : 1; + bool early_init : 1; + bool direct_complete : 1; + u32 driver_flags; + spinlock_t lock; + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path : 1; + bool syscore : 1; + bool no_pm_callbacks : 1; + unsigned int must_resume : 1; + unsigned int may_skip_resume : 1; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth : 3; + unsigned int idle_notification : 1; + unsigned int request_pending : 1; + unsigned int deferred_resume : 1; + unsigned int needs_force_resume : 1; + unsigned int runtime_auto : 1; + bool ignore_children : 1; + unsigned int no_callbacks : 1; + unsigned int irq_safe : 1; + unsigned int use_autosuspend : 1; + unsigned int timer_autosuspends : 1; + unsigned int memalloc_noio : 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + enum rpm_status last_status; + int runtime_error; + int autosuspend_delay; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +struct irq_domain; + +struct msi_device_data; + +struct dev_msi_info { + struct irq_domain *domain; + struct msi_device_data *data; +}; + +struct dev_archdata {}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct em_perf_domain; + +struct dma_map_ops; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct io_tlb_mem; + +struct device_node; + +struct fwnode_handle; + +struct class; + +struct iommu_group; + +struct dev_iommu; + +struct device_physical_location; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + const struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct em_perf_domain *em_pd; + struct dev_msi_info msi; + const struct dma_map_ops *dma_ops; + u64 *dma_mask; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct io_tlb_mem *dma_io_tlb_mem; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + int numa_node; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + const struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + struct device_physical_location *physical_location; + enum device_removable removable; + bool offline_disabled : 1; + bool offline : 1; + bool of_node_reused : 1; + bool state_synced : 1; + bool can_match : 1; +}; + +struct dev_pm_ops; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(const struct device *, struct kobj_uevent_env *); + char *(*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct iommu_ops; + +struct bus_type { + const char *name; + const char *dev_name; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, struct device_driver *); + int (*uevent)(const struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + void (*dma_cleanup)(struct device *); + const struct dev_pm_ops *pm; + const struct iommu_ops *iommu_ops; + bool need_parent_lock; +}; + +struct of_device_id; + +struct acpi_device_id; + +struct driver_private; + +struct device_driver { + const char *name; + const struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +struct acpi_device_id { + __u8 id[16]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +enum iommu_cap { + IOMMU_CAP_CACHE_COHERENCY = 0, + IOMMU_CAP_NOEXEC = 1, + IOMMU_CAP_PRE_BOOT_PROTECTION = 2, + IOMMU_CAP_ENFORCE_CACHE_COHERENCY = 3, + IOMMU_CAP_DEFERRED_FLUSH = 4, + IOMMU_CAP_DIRTY_TRACKING = 5, +}; + +enum iommu_dev_features { + IOMMU_DEV_FEAT_SVA = 0, + IOMMU_DEV_FEAT_IOPF = 1, +}; + +typedef unsigned int ioasid_t; + +struct iommu_domain; + +struct iommu_user_data; + +struct iommu_device; + +struct of_phandle_args; + +struct iommu_fault_event; + +struct iommu_page_response; + +struct iommu_domain_ops; + +struct iommu_ops { + bool (*capable)(struct device *, enum iommu_cap); + void *(*hw_info)(struct device *, u32 *, u32 *); + struct iommu_domain *(*domain_alloc)(unsigned int); + struct iommu_domain *(*domain_alloc_user)(struct device *, u32, struct iommu_domain *, + const struct iommu_user_data *); + struct iommu_domain *(*domain_alloc_paging)(struct device *); + struct iommu_device *(*probe_device)(struct device *); + void (*release_device)(struct device *); + void (*probe_finalize)(struct device *); + struct iommu_group *(*device_group)(struct device *); + void (*get_resv_regions)(struct device *, struct list_head *); + int (*of_xlate)(struct device *, struct of_phandle_args *); + bool (*is_attach_deferred)(struct device *); + int (*dev_enable_feat)(struct device *, enum iommu_dev_features); + int (*dev_disable_feat)(struct device *, enum iommu_dev_features); + int (*page_response)(struct device *, struct iommu_fault_event *, + struct iommu_page_response *); + int (*def_domain_type)(struct device *); + void (*remove_dev_pasid)(struct device *, ioasid_t); + const struct iommu_domain_ops *default_domain_ops; + unsigned long pgsize_bitmap; + struct module *owner; + struct iommu_domain *identity_domain; + struct iommu_domain *blocked_domain; + struct iommu_domain *default_domain; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + unsigned long timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + unsigned long event_count; + unsigned long active_count; + unsigned long relax_count; + unsigned long expire_count; + unsigned long wakeup_count; + struct device *dev; + bool active : 1; + bool autosleep_enabled : 1; +}; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active : 1; + unsigned int in_hrtirq : 1; + unsigned int hang_detected : 1; + unsigned int softirq_activated : 1; + unsigned int nr_events; + unsigned short nr_retries; + unsigned short nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[8]; +}; + +struct pm_domain_data; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + struct pm_domain_data *domain_data; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); + int (*set_performance_state)(struct device *, unsigned int); +}; + +struct em_perf_state; + +struct em_perf_domain { + struct em_perf_state *table; + int nr_perf_states; + unsigned long flags; + unsigned long cpus[0]; +}; + +struct em_perf_state { + unsigned long frequency; + unsigned long power; + unsigned long cost; + unsigned long flags; +}; + +typedef u64 dma_addr_t; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +typedef u64 phys_addr_t; + +struct sg_table; + +struct scatterlist; + +struct dma_map_ops { + unsigned int flags; + void *(*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, unsigned long); + void (*free)(struct device *, size_t, void *, dma_addr_t, unsigned long); + struct page *(*alloc_pages)(struct device *, size_t, dma_addr_t *, + enum dma_data_direction, gfp_t); + void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, + enum dma_data_direction); + struct sg_table *(*alloc_noncontiguous)(struct device *, size_t, + enum dma_data_direction, gfp_t, unsigned long); + void (*free_noncontiguous)(struct device *, size_t, struct sg_table *, + enum dma_data_direction); + int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, + unsigned long); + int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, + unsigned long); + dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t, + enum dma_data_direction, unsigned long); + void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, + unsigned long); + int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, + unsigned long); + void (*unmap_sg)(struct device *, struct scatterlist *, int, + enum dma_data_direction, unsigned long); + dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, + enum dma_data_direction, unsigned long); + void (*unmap_resource)(struct device *, dma_addr_t, size_t, + enum dma_data_direction, unsigned long); + void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, + enum dma_data_direction); + void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, + enum dma_data_direction); + void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, + enum dma_data_direction); + void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, + enum dma_data_direction); + void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); + int (*dma_supported)(struct device *, u64); + u64 (*get_required_mask)(struct device *); + size_t (*max_mapping_size)(struct device *); + size_t (*opt_mapping_size)(); + unsigned long (*get_merge_boundary)(struct device *); +}; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; + u64 offset; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + unsigned int min_align_mask; + unsigned long segment_boundary_mask; +}; + +struct class { + const char *name; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); + char *(*devnode)(const struct device *, umode_t *); + void (*class_release)(const struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void *(*namespace)(const struct device *); + void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void *(*grab_current_ns)(); + const void *(*netlink_ns)(struct sock *); + const void *(*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct device_physical_location { + enum device_physical_location_panel panel; + enum device_physical_location_vertical_position vertical_position; + enum device_physical_location_horizontal_position horizontal_position; + bool dock; + bool lid; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +struct cgroup; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; +}; + +struct cgroup_file { + struct kernfs_node *kn; + unsigned long notified_at; + struct timer_list notify_timer; +}; + +struct task_cputime { + u64 stime; + u64 utime; + unsigned long long sum_exec_runtime; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + struct bpf_prog_array __attribute__((btf_type_tag("rcu"))) * effective[28]; + struct hlist_head progs[28]; + u8 flags[28]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + int e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct psi_group; + +struct cgroup { + struct cgroup_subsys_state self; + unsigned long flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + struct cgroup_file psi_files[0]; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state __attribute__((btf_type_tag("rcu"))) * subsys[12]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[12]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu __attribute__((btf_type_tag("percpu"))) * rstat_cpu; + struct list_head rstat_css_list; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group *psi; + struct cgroup_bpf bpf; + atomic_t congestion_count; + struct cgroup_freezer_state freezer; + struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) * bpf_cgrp_storage; + struct cgroup *ancestors[0]; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + unsigned long subdirs; + struct rb_root children; + struct kernfs_root *root; + unsigned long rev; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node __attribute__((btf_type_tag("rcu"))) * open; + loff_t size; + struct kernfs_node *notify_next; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + void *priv; + u64 id; + unsigned short flags; + umode_t mode; + struct kernfs_iattrs *iattr; +}; + +typedef unsigned int __poll_t; + +struct kernfs_open_file; + +struct poll_table_struct; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void *(*seq_start)(struct seq_file *, loff_t *); + void *(*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); + loff_t (*llseek)(struct kernfs_open_file *, loff_t, int); +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped : 1; + bool released : 1; + const struct vm_operations_struct *vm_ops; +}; + +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +struct seq_operations { + void *(*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void *(*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +struct kernfs_open_node { + struct callback_head callback_head; + atomic_t event; + wait_queue_head_t poll; + struct list_head files; + unsigned int nr_mmapped; + unsigned int nr_to_release; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct cgroup cgrp; + struct cgroup *cgrp_ancestor_storage; + atomic_t nr_cgrps; + struct list_head root_list; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; +}; + +struct u64_stats_sync {}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat subtree_bstat; + struct cgroup_base_stat last_subtree_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct psi_group {}; + +struct bpf_prog; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + union { + struct bpf_cgroup_storage *cgroup_storage[2]; + u64 bpf_cookie; + }; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct bpf_local_storage_data; + +struct bpf_local_storage_map; + +struct bpf_local_storage { + struct bpf_local_storage_data __attribute__((btf_type_tag("rcu"))) * cache[16]; + struct bpf_local_storage_map __attribute__((btf_type_tag("rcu"))) * smap; + struct hlist_head list; + void *owner; + struct callback_head rcu; + raw_spinlock_t lock; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init : 1; + bool implicit_on_dfl : 1; + bool threaded : 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct css_set { + struct cgroup_subsys_state *subsys[12]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[12]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +struct cftype { + char name[64]; + unsigned long private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void *(*seq_start)(struct seq_file *, loff_t *); + void *(*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); +}; + +typedef struct { + unsigned long val; +} swp_entry_t; + +struct folio { + union { + struct { + unsigned long flags; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; + struct address_space *mapping; + unsigned long index; + union { + void *private; + swp_entry_t swap; + }; + atomic_t _mapcount; + atomic_t _refcount; + unsigned long memcg_data; + }; + struct page page; + }; + union { + struct { + unsigned long _flags_1; + unsigned long _head_1; + unsigned long _folio_avail; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + unsigned int _folio_nr_pages; + }; + struct page __page_1; + }; + union { + struct { + unsigned long _flags_2; + unsigned long _head_2; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; + }; + struct { + unsigned long _flags_2a; + unsigned long _head_2a; + struct list_head _deferred_list; + }; + struct page __page_2; + }; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + struct file_ra_state *ra; + unsigned long _index; + unsigned int _nr_pages; + unsigned int _batch_count; + bool _workingset; + unsigned long _pflags; +}; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long); + void *private; + int ki_flags; + u16 ki_ioprio; + union { + struct wait_page_queue *ki_waitq; + ssize_t (*dio_complete)(void *); + }; +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +struct wait_page_queue { + struct folio *folio; + int bit_nr; + wait_queue_entry_t wait; +}; + +struct iovec { + void __attribute__((btf_type_tag("user"))) * iov_base; + __kernel_size_t iov_len; +}; + +struct kvec; + +struct bio_vec; + +struct iov_iter { + u8 iter_type; + bool copy_mc; + bool nofault; + bool data_source; + size_t iov_offset; + union { + struct iovec __ubuf_iovec; + struct { + union { + const struct iovec *__iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + struct xarray *xarray; + void __attribute__((btf_type_tag("user"))) * ubuf; + }; + size_t count; + }; + }; + union { + unsigned long nr_segs; + loff_t xarray_start; + }; +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct swap_cluster_info { + spinlock_t lock; + unsigned int data : 24; + unsigned int flags : 8; +}; + +struct swap_cluster_list { + struct swap_cluster_info head; + struct swap_cluster_info tail; +}; + +struct percpu_cluster; + +struct swap_info_struct { + struct percpu_ref users; + unsigned long flags; + short prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + struct swap_cluster_info *cluster_info; + struct swap_cluster_list free_clusters; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int __attribute__((btf_type_tag("percpu"))) * cluster_next_cpu; + struct percpu_cluster __attribute__((btf_type_tag("percpu"))) * percpu_cluster; + struct rb_root swap_extent_root; + struct bdev_handle *bdev_handle; + struct block_device *bdev; + struct file *swap_file; + unsigned int old_block_size; + struct completion comp; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct swap_cluster_list discard_clusters; + struct plist_node avail_lists[0]; +}; + +struct percpu_cluster { + struct swap_cluster_info index; + unsigned int next; +}; + +struct request_queue; + +struct disk_stats; + +struct blk_holder_ops; + +struct partition_meta_info; + +struct block_device { + sector_t bd_start_sect; + sector_t bd_nr_sectors; + struct gendisk *bd_disk; + struct request_queue *bd_queue; + struct disk_stats __attribute__((btf_type_tag("percpu"))) * bd_stats; + unsigned long bd_stamp; + bool bd_read_only; + u8 bd_partno; + bool bd_write_holder; + bool bd_has_submit_bio; + dev_t bd_dev; + struct inode *bd_inode; + atomic_t bd_openers; + spinlock_t bd_size_lock; + void *bd_claiming; + void *bd_holder; + const struct blk_holder_ops *bd_holder_ops; + struct mutex bd_holder_lock; + int bd_fsfreeze_count; + int bd_holders; + struct kobject *bd_holder_dir; + struct mutex bd_fsfreeze_mutex; + struct super_block *bd_fsfreeze_sb; + struct partition_meta_info *bd_meta_info; + bool bd_ro_warned; + struct device bd_device; +}; + +typedef struct { + atomic_long_t a; +} local_t; + +struct disk_stats { + u64 nsecs[4]; + unsigned long sectors[4]; + unsigned long ios[4]; + unsigned long merges[4]; + unsigned long io_ticks; + local_t in_flight[2]; +}; + +struct blk_holder_ops { + void (*mark_dead)(struct block_device *, bool); + void (*sync)(struct block_device *); +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, + size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct kernel_symbol { + int value_offset; + int name_offset; + int namespace_offset; +}; + +typedef signed char __s8; + +typedef __s8 s8; + +struct kernel_param_ops; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +struct exception_table_entry { + int insn; + int fixup; + int data; +}; + +struct orc_entry { + s16 sp_offset; + s16 bp_offset; + unsigned int sp_reg : 4; + unsigned int bp_reg : 4; + unsigned int type : 3; + unsigned int signal : 1; +} __attribute__((packed)); + +struct bug_entry { + int bug_addr_disp; + int file_disp; + unsigned short line; + unsigned short flags; +}; + +typedef __u32 Elf64_Word; + +typedef __u16 Elf64_Half; + +typedef __u64 Elf64_Addr; + +typedef __u64 Elf64_Xword; + +struct elf64_sym { + Elf64_Word st_name; + unsigned char st_info; + unsigned char st_other; + Elf64_Half st_shndx; + Elf64_Addr st_value; + Elf64_Xword st_size; +}; + +struct lockdep_map {}; + +struct srcu_data; + +struct srcu_usage; + +struct srcu_struct { + unsigned int srcu_idx; + struct srcu_data __attribute__((btf_type_tag("percpu"))) * sda; + struct lockdep_map dep_map; + struct srcu_usage *srcu_sup; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + unsigned long gp_seq[4]; + atomic_long_t len; + long seglen[4]; + u8 flags; +}; + +struct srcu_node; + +struct srcu_data { + atomic_long_t srcu_lock_count[2]; + atomic_long_t srcu_unlock_count[2]; + int srcu_nmi_safety; + long : 64; + long : 64; + long : 64; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + unsigned long srcu_gp_seq_needed; + unsigned long srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + unsigned long grpmask; + int cpu; + struct srcu_struct *ssp; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct srcu_node { + spinlock_t lock; + unsigned long srcu_have_cbs[4]; + unsigned long srcu_data_have_cbs[4]; + unsigned long srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_usage { + struct srcu_node *node; + struct srcu_node *level[3]; + int srcu_size_state; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + unsigned long srcu_gp_seq; + unsigned long srcu_gp_seq_needed; + unsigned long srcu_gp_seq_needed_exp; + unsigned long srcu_gp_start; + unsigned long srcu_last_gp_end; + unsigned long srcu_size_jiffies; + unsigned long srcu_n_lock_retries; + unsigned long srcu_n_exp_nodelay; + bool sda_is_static; + unsigned long srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + unsigned long reschedule_jiffies; + unsigned long reschedule_count; + struct delayed_work work; + struct srcu_struct *srcu_ssp; +}; + +struct tracepoint; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long : 64; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + unsigned long type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_call_key; + +struct tracepoint_func; + +struct tracepoint { + const char *name; + struct static_key key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + void *probestub; + int (*regfunc)(); + void (*unregfunc)(); + struct tracepoint_func __attribute__((btf_type_tag("rcu"))) * funcs; +}; + +struct jump_entry { + s32 code; + s32 target; + long key; +}; + +struct static_call_mod; + +struct static_call_key { + void *func; + union { + unsigned long type; + struct static_call_mod *mods; + struct static_call_site *sites; + }; +}; + +struct static_call_mod { + struct static_call_mod *next; + struct module *mod; + struct static_call_site *sites; +}; + +struct static_call_site { + s32 addr; + s32 key; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct trace_eval_map { + const char *system; + const char *eval_string; + unsigned long eval_value; +}; + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno : 18; + unsigned int class_id : 6; + unsigned int flags : 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; +}; + +struct ddebug_class_map { + struct list_head link; + struct module *mod; + const char *mod_name; + const char **class_names; + const int length; + const int base; + enum class_map_type map_type; +}; + +struct kstatfs; + +struct dquot; + +struct shrink_control; + +struct super_operations { + struct inode *(*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *, enum freeze_holder); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *, enum freeze_holder); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); + ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot **(*get_dquots)(struct inode *); + long (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long (*free_cached_objects)(struct super_block *, struct shrink_control *); + void (*shutdown)(struct super_block *); +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + unsigned long dq_flags; + struct mem_dqblk dq_dqb; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + unsigned long nr_to_scan; + unsigned long nr_scanned; + struct mem_cgroup *memcg; +}; + +struct cacheline_padding { + char x[0]; +}; + +struct page_counter { + atomic_long_t usage; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct cacheline_padding _pad1_; + unsigned long emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + unsigned long elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + unsigned long watermark; + unsigned long failcnt; + struct cacheline_padding _pad2_; + unsigned long min; + unsigned long low; + unsigned long high; + unsigned long max; + struct page_counter *parent; + long : 64; + long : 64; + long : 64; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct vmpressure { + unsigned long scanned; + unsigned long reclaimed; + unsigned long tree_scanned; + unsigned long tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct mem_cgroup_threshold_ary; + +struct mem_cgroup_thresholds { + struct mem_cgroup_threshold_ary *primary; + struct mem_cgroup_threshold_ary *spare; +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + struct fprop_global completions; + struct timer_list period_timer; + unsigned long period_time; + unsigned long dirty_limit_tstamp; + unsigned long dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + u64 at; + struct wb_completion done; +}; + +struct deferred_split { + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; +}; + +struct lru_gen_mm_list { + struct list_head fifo; + spinlock_t lock; +}; + +struct memcg_vmstats; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct page_counter kmem; + struct page_counter tcpmem; + struct work_struct high_work; + unsigned long zswap_max; + unsigned long soft_limit; + struct vmpressure vmpressure; + bool oom_group; + bool oom_lock; + int under_oom; + int swappiness; + int oom_kill_disable; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct mutex thresholds_lock; + struct mem_cgroup_thresholds thresholds; + struct mem_cgroup_thresholds memsw_thresholds; + struct list_head oom_notify; + unsigned long move_charge_at_immigrate; + spinlock_t move_lock; + unsigned long move_lock_flags; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct cacheline_padding _pad1_; + struct memcg_vmstats *vmstats; + atomic_long_t memory_events[9]; + atomic_long_t memory_events_local[9]; + unsigned long socket_pressure; + bool tcpmem_active; + int tcpmem_pressure; + int kmemcg_id; + struct obj_cgroup __attribute__((btf_type_tag("rcu"))) * objcg; + struct obj_cgroup *orig_objcg; + struct list_head objcg_list; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct cacheline_padding _pad2_; + atomic_t moving_account; + struct task_struct *move_lock_task; + struct memcg_vmstats_percpu __attribute__((btf_type_tag("percpu"))) * vmstats_percpu; + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct list_head event_list; + spinlock_t event_list_lock; + struct deferred_split deferred_split_queue; + struct lru_gen_mm_list mm_list; + struct mem_cgroup_per_node *nodeinfo[0]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct eventfd_ctx; + +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + unsigned long threshold; +}; + +struct mem_cgroup_threshold_ary { + int current_threshold; + unsigned int size; + struct mem_cgroup_threshold entries[0]; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct memcg_vmstats_percpu { + long state[48]; + unsigned long events[21]; + long state_prev[48]; + unsigned long events_prev[21]; + unsigned long nr_page_events; + unsigned long targets[2]; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next; + struct hlist_nulls_node **pprev; +}; + +struct lru_gen_folio { + unsigned long max_seq; + unsigned long min_seq[2]; + unsigned long timestamps[4]; + struct list_head folios[40]; + long nr_pages[40]; + unsigned long avg_refaulted[8]; + unsigned long avg_total[8]; + unsigned long protected[6]; + atomic_long_t evicted[8]; + atomic_long_t refaulted[8]; + bool enabled; + u8 gen; + u8 seg; + struct hlist_nulls_node list; +}; + +struct lru_gen_mm_state { + unsigned long seq; + struct list_head *head; + struct list_head *tail; + unsigned long *filters[2]; + unsigned long stats[6]; +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + spinlock_t lru_lock; + unsigned long anon_cost; + unsigned long file_cost; + atomic_long_t nonresident_age; + unsigned long refaults[2]; + unsigned long flags; + struct lru_gen_folio lrugen; + struct lru_gen_mm_state mm_state; + struct pglist_data *pgdat; +}; + +struct lruvec_stats { + long state[41]; + long state_local[41]; + long state_pending[41]; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + unsigned int generation; +}; + +struct lruvec_stats_percpu; + +struct shrinker_info; + +struct mem_cgroup_per_node { + struct lruvec lruvec; + struct lruvec_stats_percpu __attribute__((btf_type_tag("percpu"))) * lruvec_stats_percpu; + struct lruvec_stats lruvec_stats; + unsigned long lru_zone_size[25]; + struct mem_cgroup_reclaim_iter iter; + struct shrinker_info __attribute__((btf_type_tag("rcu"))) * shrinker_info; + struct rb_node tree_node; + unsigned long usage_in_excess; + bool on_tree; + struct mem_cgroup *memcg; +}; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +struct free_area { + struct list_head free_list[5]; + unsigned long nr_free; +}; + +struct per_cpu_pages; + +struct per_cpu_zonestat; + +struct zone { + unsigned long _watermark[4]; + unsigned long watermark_boost; + unsigned long nr_reserved_highatomic; + long lowmem_reserve[5]; + int node; + struct pglist_data *zone_pgdat; + struct per_cpu_pages __attribute__((btf_type_tag("percpu"))) * per_cpu_pageset; + struct per_cpu_zonestat __attribute__((btf_type_tag("percpu"))) * per_cpu_zonestats; + int pageset_high_min; + int pageset_high_max; + int pageset_batch; + unsigned long zone_start_pfn; + atomic_long_t managed_pages; + unsigned long spanned_pages; + unsigned long present_pages; + unsigned long present_early_pages; + const char *name; + unsigned long nr_isolate_pageblock; + seqlock_t span_seqlock; + int initialized; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct cacheline_padding _pad1_; + struct free_area free_area[11]; + struct list_head unaccepted_pages; + unsigned long flags; + spinlock_t lock; + long : 64; + long : 64; + long : 64; + struct cacheline_padding _pad2_; + unsigned long percpu_drift_mark; + unsigned long compact_cached_free_pfn; + unsigned long compact_cached_migrate_pfn[2]; + unsigned long compact_init_migrate_pfn; + unsigned long compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + long : 0; + struct cacheline_padding _pad3_; + atomic_long_t vm_stat[12]; + atomic_long_t vm_numa_event[6]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[321]; +}; + +struct lru_gen_mm_walk { + struct lruvec *lruvec; + unsigned long max_seq; + unsigned long next_addr; + int nr_pages[40]; + int mm_stats[6]; + int batched; + bool can_swap; + bool force_scan; +}; + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct lru_gen_memcg { + unsigned long seq; + unsigned long nr_memcgs[3]; + struct hlist_nulls_head fifo[24]; + spinlock_t lock; +}; + +struct memory_failure_stats { + unsigned long total; + unsigned long ignored; + unsigned long failed; + unsigned long delayed; + unsigned long recovered; +}; + +struct per_cpu_nodestat; + +struct memory_tier; + +struct pglist_data { + struct zone node_zones[5]; + struct zonelist node_zonelists[2]; + int nr_zones; + spinlock_t node_size_lock; + unsigned long node_start_pfn; + unsigned long node_present_pages; + unsigned long node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + wait_queue_head_t reclaim_wait[4]; + atomic_t nr_writeback_throttled; + unsigned long nr_reclaim_start; + struct mutex kswapd_lock; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + bool proactive_compact_trigger; + unsigned long totalreserve_pages; + unsigned long min_unmapped_pages; + unsigned long min_slab_pages; + long : 64; + long : 64; + long : 64; + struct cacheline_padding _pad1_; + unsigned long first_deferred_pfn; + struct deferred_split deferred_split_queue; + struct lruvec __lruvec; + unsigned long flags; + struct lru_gen_mm_walk mm_walk; + struct lru_gen_memcg memcg_lru; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct cacheline_padding _pad2_; + struct per_cpu_nodestat __attribute__((btf_type_tag("percpu"))) * per_cpu_nodestats; + atomic_long_t vm_stat[41]; + struct memory_tier __attribute__((btf_type_tag("rcu"))) * memtier; + struct memory_failure_stats mf_stats; +}; + +struct per_cpu_pages { + spinlock_t lock; + int count; + int high; + int high_min; + int high_max; + int batch; + u8 flags; + u8 alloc_factor; + u8 expire; + short free_count; + struct list_head lists[13]; + long : 64; + long : 64; +}; + +struct per_cpu_zonestat { + s8 vm_stat_diff[12]; + s8 stat_threshold; + unsigned long vm_numa_event[6]; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[41]; +}; + +struct memory_tier { + struct list_head list; + struct list_head memory_types; + int adistance_start; + struct device dev; + nodemask_t lower_tier_mask; +}; + +struct lruvec_stats_percpu { + long state[41]; + long state_prev[41]; +}; + +struct shrinker_info_unit; + +struct shrinker_info { + struct callback_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[0]; +}; + +struct shrinker_info_unit { + atomic_long_t nr_deferred[64]; + unsigned long map[1]; +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot *(*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t *(*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_info; + +struct qc_dqblk; + +struct qc_state; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct qc_dqblk { + int d_fieldmask; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + unsigned long long ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + struct qc_type_state s_state[3]; +}; + +struct fid; + +struct iomap; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry *(*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry *(*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry *(*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); + unsigned long flags; +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, + const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, + struct inode *, const char *, const void *, size_t, int); +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +typedef struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) * fsnotify_connp_t; + +struct fsnotify_mark_connector { + spinlock_t lock; + unsigned short type; + unsigned short flags; + __kernel_fsid_t fsid; + union { + fsnotify_connp_t *obj; + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, + const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char *(*d_dname)(struct dentry *, char *, int); + struct vfsmount *(*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry *(*d_real)(struct dentry *, const struct inode *); + long : 64; + long : 64; + long : 64; +}; + +struct shrinker { + unsigned long (*count_objects)(struct shrinker *, struct shrink_control *); + unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *); + long batch; + int seeks; + unsigned int flags; + refcount_t refcount; + struct completion done; + struct callback_head rcu; + void *private_data; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +struct list_lru_one { + struct list_head list; + long nr_items; +}; + +struct list_lru_node { + spinlock_t lock; + struct list_lru_one lru; + long nr_items; + long : 64; + long : 64; + long : 64; +}; + +typedef void *fl_owner_t; + +struct io_comp_batch; + +struct dir_context; + +struct file_lock; + +struct io_uring_cmd; + +struct file_operations { + struct module *owner; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char __attribute__((btf_type_tag("user"))) *, + size_t, loff_t *); + ssize_t (*write)(struct file *, const char __attribute__((btf_type_tag("user"))) *, + size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); + long (*compat_ioctl)(struct file *, unsigned int, unsigned long); + int (*mmap)(struct file *, struct vm_area_struct *); + unsigned long mmap_supported_flags; + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, + unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, + unsigned int); + void (*splice_eof)(struct file *); + int (*setlease)(struct file *, int, struct file_lock **, void **); + long (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, + unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, + unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); + int (*uring_cmd)(struct io_uring_cmd *, unsigned int); + int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); +}; + +typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +struct mm_cid { + u64 time; + int cid; +}; + +struct ldt_struct { + struct desc_struct *entries; + unsigned int nr_entries; + int slot; +}; + +struct vdso_image { + void *data; + unsigned long size; + unsigned long alt; + unsigned long alt_len; + unsigned long extable_base; + unsigned long extable_len; + const void *extable; + long sym_vvar_start; + long sym_vvar_page; + long sym_pvclock_page; + long sym_hvclock_page; + long sym_timens_page; + long sym_VDSO32_NOTE_MASK; + long sym___kernel_sigreturn; + long sym___kernel_rt_sigreturn; + long sym___kernel_vsyscall; + long sym_int80_landing_pad; + long sym_vdso32_sigreturn_landing_pad; + long sym_vdso32_rt_sigreturn_landing_pad; +}; + +struct kioctx; + +struct kioctx_table { + struct callback_head rcu; + unsigned int nr; + struct kioctx __attribute__((btf_type_tag("rcu"))) * table[0]; +}; + +typedef long long __kernel_time64_t; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long tv_nsec; +}; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct pollfd { + int fd; + short events; + short revents; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + refcount_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct cgroup_namespace { + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + u64 ac_utime; + u64 ac_stime; + unsigned long ac_minflt; + unsigned long ac_majflt; +}; + +struct core_state; + +struct tty_struct; + +struct taskstats; + +struct tty_audit_buf; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + int quick_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exec_task; + int group_stop_count; + unsigned int flags; + struct core_state *core_state; + unsigned int is_child_subreaper : 1; + unsigned int has_child_subreaper : 1; + unsigned int next_posix_timer_id; + struct list_head posix_timers; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + atomic_t tick_dep_mask; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + seqlock_t stats_lock; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw; + unsigned long nivcsw; + unsigned long cnvcsw; + unsigned long cnivcsw; + unsigned long min_flt; + unsigned long maj_flt; + unsigned long cmin_flt; + unsigned long cmaj_flt; + unsigned long inblock; + unsigned long oublock; + unsigned long cinblock; + unsigned long coublock; + unsigned long maxrss; + unsigned long cmaxrss; + struct task_io_accounting ioac; + unsigned long long sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + unsigned int audit_tty; + struct tty_audit_buf *tty_audit_buf; + bool oom_flag_origin; + short oom_score_adj; + short oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + long : 0; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; + __u64 compact_count; + __u64 compact_delay_total; + __u32 ac_tgid; + __u64 ac_tgetime; + __u64 ac_exe_dev; + __u64 ac_exe_inode; + __u64 wpcopy_count; + __u64 wpcopy_delay_total; + __u64 irq_count; + __u64 irq_delay_total; +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t __attribute__((btf_type_tag("user"))) * __sighandler_t; + +typedef void __restorefn_t(); + +typedef __restorefn_t __attribute__((btf_type_tag("user"))) * __sigrestore_t; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + __sigrestore_t sa_restorer; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +typedef __u32 blk_opf_t; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +} __attribute__((packed)); + +typedef unsigned int blk_qc_t; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct blkcg_gq; + +struct bio_integrity_payload; + +struct bio_set; + +struct bio { + struct bio *bi_next; + struct block_device *bi_bdev; + blk_opf_t bi_opf; + unsigned short bi_flags; + unsigned short bi_ioprio; + blk_status_t bi_status; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + blk_qc_t bi_cookie; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + union { + struct bio_integrity_payload *bi_integrity; + }; + unsigned short bi_vcnt; + unsigned short bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +struct bio_integrity_payload { + struct bio *bip_bio; + struct bvec_iter bip_iter; + unsigned short bip_vcnt; + unsigned short bip_max_vcnt; + unsigned short bip_flags; + struct bvec_iter bio_iter; + struct work_struct bip_work; + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0]; +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_alloc_cache; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + struct bio_alloc_cache __attribute__((btf_type_tag("percpu"))) * cache; + mempool_t bio_pool; + mempool_t bvec_pool; + mempool_t bio_integrity_pool; + mempool_t bvec_integrity_pool; + unsigned int back_pad; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; + struct hlist_node cpuhp_dead; +}; + +struct bio_alloc_cache { + struct bio *free_list; + struct bio *free_list_irq; + unsigned int nr; + unsigned int nr_irq; +}; + +struct reclaim_state { + unsigned long reclaimed; + struct lru_gen_mm_walk *mm_walk; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + unsigned short ioprio; + spinlock_t lock; + struct xarray icq_tree; + struct io_cq __attribute__((btf_type_tag("rcu"))) * icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +typedef int __kernel_timer_t; + +union sigval { + int sival_int; + void __attribute__((btf_type_tag("user"))) * sival_ptr; +}; + +typedef union sigval sigval_t; + +typedef __kernel_long_t __kernel_clock_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void __attribute__((btf_type_tag("user"))) * _addr; + union { + int _trapno; + short _addr_lsb; + struct { + char _dummy_bnd[8]; + void __attribute__((btf_type_tag("user"))) * _lower; + void __attribute__((btf_type_tag("user"))) * _upper; + } _addr_bnd; + struct { + char _dummy_pkey[8]; + __u32 _pkey; + } _addr_pkey; + struct { + unsigned long _data; + __u32 _type; + __u32 _flags; + } _perf; + }; + } _sigfault; + struct { + long _band; + int _fd; + } _sigpoll; + struct { + void __attribute__((btf_type_tag("user"))) * _call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; +}; + +struct robust_list { + struct robust_list __attribute__((btf_type_tag("user"))) * next; +}; + +struct robust_list_head { + struct robust_list list; + long futex_offset; + struct robust_list __attribute__((btf_type_tag("user"))) * list_op_pending; +}; + +typedef u32 compat_uptr_t; + +struct compat_robust_list { + compat_uptr_t next; +}; + +typedef s32 compat_long_t; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + +struct perf_event_context { + raw_spinlock_t lock; + struct mutex mutex; + struct list_head pmu_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + int nr_events; + int nr_user; + int is_active; + int nr_task_data; + int nr_stat; + int nr_freq; + int rotate_disable; + refcount_t refcount; + struct task_struct *task; + u64 time; + u64 timestamp; + u64 timeoffset; + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + struct callback_head callback_head; + local_t nr_pending; +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + __u64 rseq_cs; + __u32 flags; + __u32 node_id; + __u32 mm_cid; + char end[0]; +}; + +struct ftrace_ret_stack { + unsigned long ret; + unsigned long func; + unsigned long long calltime; + unsigned long long subtime; + unsigned long *retp; +}; + +struct arch_uprobe_task { + unsigned long saved_scratch_register; + unsigned int saved_trap_nr; + unsigned int saved_tf; +}; + +struct uprobe; + +struct return_instance; + +struct uprobe_task { + enum uprobe_task_state state; + union { + struct { + struct arch_uprobe_task autask; + unsigned long vaddr; + }; + struct { + struct callback_head dup_xol_work; + unsigned long dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + unsigned long xol_vaddr; + struct return_instance *return_instances; + unsigned int depth; +}; + +struct return_instance { + struct uprobe *uprobe; + unsigned long func; + unsigned long stack; + unsigned long orig_ret_vaddr; + bool chained; + struct return_instance *next; +}; + +typedef struct { + local_t a; +} local64_t; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled : 1; + __u64 inherit : 1; + __u64 pinned : 1; + __u64 exclusive : 1; + __u64 exclude_user : 1; + __u64 exclude_kernel : 1; + __u64 exclude_hv : 1; + __u64 exclude_idle : 1; + __u64 mmap : 1; + __u64 comm : 1; + __u64 freq : 1; + __u64 inherit_stat : 1; + __u64 enable_on_exec : 1; + __u64 task : 1; + __u64 watermark : 1; + __u64 precise_ip : 2; + __u64 mmap_data : 1; + __u64 sample_id_all : 1; + __u64 exclude_host : 1; + __u64 exclude_guest : 1; + __u64 exclude_callchain_kernel : 1; + __u64 exclude_callchain_user : 1; + __u64 mmap2 : 1; + __u64 comm_exec : 1; + __u64 use_clockid : 1; + __u64 context_switch : 1; + __u64 write_backward : 1; + __u64 namespaces : 1; + __u64 ksymbol : 1; + __u64 bpf_event : 1; + __u64 aux_output : 1; + __u64 cgroup : 1; + __u64 text_poke : 1; + __u64 build_id : 1; + __u64 inherit_thread : 1; + __u64 remove_on_exec : 1; + __u64 sigtrap : 1; + __u64 __reserved_1 : 26; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + __u32 __reserved_3; + __u64 sig_data; + __u64 config3; +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; +}; + +struct arch_hw_breakpoint { + unsigned long address; + unsigned long mask; + u8 len; + u8 type; +}; + +struct rhash_head { + struct rhash_head __attribute__((btf_type_tag("rcu"))) * next; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head __attribute__((btf_type_tag("rcu"))) * next; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + unsigned long config_base; + unsigned long event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + struct arch_hw_breakpoint info; + struct rhlist_head bp_list; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + unsigned long addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct irq_work { + struct __call_single_node node; + void (*func)(struct irq_work *); + struct rcuwait irqwait; +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, + struct pt_regs *); + +struct ftrace_ops; + +struct ftrace_regs; + +typedef void (*ftrace_func_t)(unsigned long, unsigned long, struct ftrace_ops *, + struct ftrace_regs *); + +struct ftrace_hash; + +struct ftrace_ops_hash { + struct ftrace_hash __attribute__((btf_type_tag("rcu"))) * notrace_hash; + struct ftrace_hash __attribute__((btf_type_tag("rcu"))) * filter_hash; + struct mutex regex_lock; +}; + +typedef int (*ftrace_ops_func_t)(struct ftrace_ops *, enum ftrace_ops_cmd); + +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops __attribute__((btf_type_tag("rcu"))) * next; + unsigned long flags; + void *private; + ftrace_func_t saved_func; + struct ftrace_ops_hash local_hash; + struct ftrace_ops_hash *func_hash; + struct ftrace_ops_hash old_hash; + unsigned long trampoline; + unsigned long trampoline_size; + struct list_head list; + ftrace_ops_func_t ops_func; + unsigned long direct_call; +}; + +struct pmu; + +struct perf_event_pmu_context; + +struct perf_buffer; + +struct fasync_struct; + +struct perf_addr_filter_range; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + unsigned int group_generation; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + struct perf_event_pmu_context *pmu_ctx; + atomic_long_t refcount; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + unsigned long rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + unsigned int pending_wakeup; + unsigned int pending_kill; + unsigned int pending_disable; + unsigned int pending_sigtrap; + unsigned long pending_addr; + struct irq_work pending_irq; + struct callback_head pending_task; + unsigned int pending_work; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + unsigned long addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + atomic64_t lost_samples; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + perf_overflow_handler_t orig_overflow_handler; + struct bpf_prog *prog; + u64 bpf_cookie; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct ftrace_ops ftrace_ops; + struct perf_cgroup *cgrp; + struct list_head sb_list; + __u32 orig_type; +}; + +struct perf_cpu_pmu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + struct device *parent; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + int __attribute__((btf_type_tag("percpu"))) * pmu_disable_count; + struct perf_cpu_pmu_context __attribute__((btf_type_tag("percpu"))) * cpu_pmu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_pmu_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); + void *(*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, unsigned long); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + bool (*filter)(struct pmu *, int); + int (*check_period)(struct perf_event *, u64); +}; + +struct perf_event_pmu_context { + struct pmu *pmu; + struct perf_event_context *ctx; + struct list_head pmu_ctx_entry; + struct list_head pinned_active; + struct list_head flexible_active; + unsigned int embedded : 1; + unsigned int nr_events; + unsigned int nr_cgroups; + atomic_t refcount; + struct callback_head callback_head; + void *task_ctx_data; + int rotate_necessary; +}; + +struct perf_cpu_pmu_context { + struct perf_event_pmu_context epc; + struct perf_event_pmu_context *task_epc; + struct list_head sched_cb_entry; + int sched_cb_usage; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + unsigned long wakeup; + unsigned long size; + u64 aux_flags; + union { + void *addr; + unsigned long head; + }; + int page; +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +struct perf_addr_filter_range { + unsigned long start; + unsigned long size; +}; + +union perf_sample_weight { + __u64 full; + struct { + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; + }; +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_op : 5; + __u64 mem_lvl : 14; + __u64 mem_snoop : 5; + __u64 mem_lock : 2; + __u64 mem_dtlb : 7; + __u64 mem_lvl_num : 4; + __u64 mem_remote : 1; + __u64 mem_snoopx : 2; + __u64 mem_blk : 3; + __u64 mem_hops : 3; + __u64 mem_rsvd : 18; + }; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + +struct perf_callchain_entry; + +struct perf_raw_record; + +struct perf_branch_stack; + +struct perf_sample_data { + u64 sample_flags; + u64 period; + u64 dyn_size; + u64 type; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 ip; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + union perf_sample_weight weight; + union perf_mem_data_src data_src; + u64 txn; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 stream_id; + u64 cgroup; + u64 addr; + u64 phys_addr; + u64 data_page_size; + u64 code_page_size; + u64 aux_size; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +typedef unsigned long (*perf_copy_f)(void *, const void *, unsigned long, unsigned long); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + unsigned long pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __attribute__((packed)); + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred : 1; + __u64 predicted : 1; + __u64 in_tx : 1; + __u64 abort : 1; + __u64 cycles : 16; + __u64 type : 4; + __u64 spec : 2; + __u64 new_type : 4; + __u64 priv : 3; + __u64 reserved : 31; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct ftrace_regs { + struct pt_regs regs; +}; + +struct ftrace_hash { + unsigned long size_bits; + struct hlist_head *buckets; + unsigned long count; + unsigned long flags; + struct callback_head rcu; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info __attribute__((btf_type_tag("percpu"))) * info; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; + u64 timeoffset; + int active; +}; + +struct math_emu_info { + long ___orig_eip; + struct pt_regs *regs; +}; + +typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); + +struct ldttss_desc { + u16 limit0; + u16 base0; + u16 base1 : 8; + u16 type : 5; + u16 dpl : 2; + u16 p : 1; + u16 limit1 : 4; + u16 zero0 : 3; + u16 g : 1; + u16 base2 : 8; + u32 base3; + u32 zero1; +}; + +typedef struct ldttss_desc tss_desc; + +struct x86_hw_tss { + u32 reserved1; + u64 sp0; + u64 sp1; + u64 sp2; + u64 reserved2; + u64 ist[7]; + u32 reserved3; + u32 reserved4; + u16 reserved5; + u16 io_bitmap_base; +} __attribute__((packed)); + +struct entry_stack { + char stack[4096]; +}; + +struct entry_stack_page { + struct entry_stack stack; +}; + +struct x86_io_bitmap { + u64 prev_sequence; + unsigned int prev_max; + unsigned long bitmap[1025]; + unsigned long mapall[1025]; +}; + +struct tss_struct { + struct x86_hw_tss x86_tss; + struct x86_io_bitmap io_bitmap; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct cea_exception_stacks { + char DF_stack_guard[4096]; + char DF_stack[8192]; + char NMI_stack_guard[4096]; + char NMI_stack[8192]; + char DB_stack_guard[4096]; + char DB_stack[8192]; + char MCE_stack_guard[4096]; + char MCE_stack[8192]; + char VC_stack_guard[4096]; + char VC_stack[8192]; + char VC2_stack_guard[4096]; + char VC2_stack[8192]; + char IST_top_guard[4096]; +}; + +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[48]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct debug_store_buffers { + char bts_buffer[65536]; + char pebs_buffer[65536]; +}; + +struct cpu_entry_area { + char gdt[4096]; + struct entry_stack_page entry_stack_page; + struct tss_struct tss; + struct cea_exception_stacks estacks; + struct debug_store cpu_debug_store; + struct debug_store_buffers cpu_debug_buffers; +}; + +struct msr_enumeration { + u32 msr_no; + u32 feature; +}; + +struct fc_log; + +struct p_log { + const char *prefix; + struct fc_log *log; +}; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT = 0, + FS_CONTEXT_FOR_SUBMOUNT = 1, + FS_CONTEXT_FOR_RECONFIGURE = 2, +}; + +enum fs_context_phase { + FS_CONTEXT_CREATE_PARAMS = 0, + FS_CONTEXT_CREATING = 1, + FS_CONTEXT_AWAITING_MOUNT = 2, + FS_CONTEXT_AWAITING_RECONF = 3, + FS_CONTEXT_RECONF_PARAMS = 4, + FS_CONTEXT_RECONFIGURING = 5, + FS_CONTEXT_FAILED = 6, +}; + +struct fs_context_operations; + +struct fs_context { + const struct fs_context_operations *ops; + struct mutex uapi_mutex; + struct file_system_type *fs_type; + void *fs_private; + void *sget_key; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + struct p_log log; + const char *source; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + unsigned int s_iflags; + enum fs_context_purpose purpose : 8; + enum fs_context_phase phase : 8; + bool need_free : 1; + bool global : 1; + bool oldapi : 1; + bool exclusive : 1; +}; + +struct fs_context_operations { + void (*free)(struct fs_context *); + int (*dup)(struct fs_context *, struct fs_context *); + int (*parse_param)(struct fs_context *, struct fs_parameter *); + int (*parse_monolithic)(struct fs_context *, void *); + int (*get_tree)(struct fs_context *); + int (*reconfigure)(struct fs_context *); +}; + +enum fs_value_type { + fs_value_is_undefined = 0, + fs_value_is_flag = 1, + fs_value_is_string = 2, + fs_value_is_blob = 3, + fs_value_is_filename = 4, + fs_value_is_file = 5, +}; + +struct filename; + +struct fs_parameter { + const char *key; + enum fs_value_type type : 8; + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +struct audit_names; + +struct filename { + const char *name; + const char __attribute__((btf_type_tag("user"))) * uptr; + atomic_t refcnt; + struct audit_names *aname; + const char iname[0]; +}; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + unsigned long min_coredump; +}; + +struct linux_binprm { + struct vm_area_struct *vma; + unsigned long vma_pages; + struct mm_struct *mm; + unsigned long p; + unsigned long argmin; + unsigned int have_execfd : 1; + unsigned int execfd_creds : 1; + unsigned int secureexec : 1; + unsigned int point_of_no_return : 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + unsigned long loader; + unsigned long exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +struct anon_vma { + struct anon_vma *root; + struct rw_semaphore rwsem; + atomic_t refcount; + unsigned long num_children; + unsigned long num_active_vmas; + struct anon_vma *parent; + struct rb_root_cached rb_root; +}; + +struct mempolicy { + atomic_t refcnt; + unsigned short mode; + unsigned short flags; + nodemask_t nodes; + int home_node; + union { + nodemask_t cpuset_mems_allowed; + nodemask_t user_nodemask; + } w; +}; + +struct nsset { + unsigned int flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +struct ref_tracker_dir {}; + +struct raw_notifier_head { + struct notifier_block __attribute__((btf_type_tag("rcu"))) * head; +}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + u8 sysctl_txrehash; + struct prot_inuse __attribute__((btf_type_tag("percpu"))) * prot_inuse; + struct cpumask *rps_default_mask; +}; + +struct ipstats_mib; + +struct tcp_mib; + +struct linux_mib; + +struct udp_mib; + +struct mptcp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct proc_dir_entry; + +struct netns_mib { + struct ipstats_mib __attribute__((btf_type_tag("percpu"))) * ip_statistics; + struct ipstats_mib __attribute__((btf_type_tag("percpu"))) * ipv6_statistics; + struct tcp_mib __attribute__((btf_type_tag("percpu"))) * tcp_statistics; + struct linux_mib __attribute__((btf_type_tag("percpu"))) * net_statistics; + struct udp_mib __attribute__((btf_type_tag("percpu"))) * udp_statistics; + struct udp_mib __attribute__((btf_type_tag("percpu"))) * udp_stats_in6; + struct mptcp_mib __attribute__((btf_type_tag("percpu"))) * mptcp_statistics; + struct udp_mib __attribute__((btf_type_tag("percpu"))) * udplite_statistics; + struct udp_mib __attribute__((btf_type_tag("percpu"))) * udplite_stats_in6; + struct icmp_mib __attribute__((btf_type_tag("percpu"))) * icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct icmpv6_mib __attribute__((btf_type_tag("percpu"))) * icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct unix_table { + spinlock_t *locks; + struct hlist_head *buckets; +}; + +struct netns_unix { + struct unix_table table; + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block __attribute__((btf_type_tag("rcu"))) * head; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + refcount_t tw_refcount; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct local_ports { + seqlock_t lock; + int range[2]; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct udp_table; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct fib_rules_ops; + +struct fib_table; + +struct inet_peer_base; + +struct fqdir; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + struct inet_timewait_death_row tcp_death_row; + struct udp_table *udp_table; + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain __attribute__((btf_type_tag("rcu"))) * ra_chain; + struct mutex ra_mutex; + struct fib_rules_ops *rules_ops; + struct fib_table __attribute__((btf_type_tag("rcu"))) * fib_main; + struct fib_table __attribute__((btf_type_tag("rcu"))) * fib_default; + unsigned int fib_rules_require_fldissect; + bool fib_has_custom_rules; + bool fib_has_custom_local_routes; + bool fib_offload_disabled; + u8 sysctl_tcp_shrink_window; + struct hlist_head *fib_table_hash; + struct sock *fibnl; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct fqdir *fqdir; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + u32 ip_rt_min_pmtu; + int ip_rt_mtu_expires; + int ip_rt_min_advmss; + struct local_ports ip_local_ports; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_use_pmtu; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; + u8 sysctl_ip_dynaddr; + u8 sysctl_ip_early_demux; + u8 sysctl_raw_l3mdev_accept; + u8 sysctl_tcp_early_demux; + u8 sysctl_udp_early_demux; + u8 sysctl_nexthop_compat_mode; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; + u8 sysctl_tcp_l3mdev_accept; + u8 sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_min_snd_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; + u8 sysctl_tcp_comp_sack_nr; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + int sysctl_tcp_reordering; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; + int sysctl_tcp_fin_timeout; + unsigned int sysctl_tcp_notsent_lowat; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_adv_win_scale; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_moderate_rcvbuf; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_challenge_ack_limit; + int sysctl_tcp_min_rtt_wlen; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_tso_rtt_log; + u8 sysctl_tcp_autocorking; + u8 sysctl_tcp_reflect_tos; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + int sysctl_tcp_wmem[3]; + int sysctl_tcp_rmem[3]; + unsigned int sysctl_tcp_child_ehash_entries; + unsigned long sysctl_tcp_comp_sack_delay_ns; + unsigned long sysctl_tcp_comp_sack_slack_ns; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops __attribute__((btf_type_tag("rcu"))) * + tcp_congestion_control; + struct tcp_fastopen_context __attribute__((btf_type_tag("rcu"))) * tcp_fastopen_ctx; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + unsigned long tfo_active_disable_stamp; + u32 tcp_challenge_timestamp; + u32 tcp_challenge_count; + u8 sysctl_tcp_plb_enabled; + u8 sysctl_tcp_plb_idle_rehash_rounds; + u8 sysctl_tcp_plb_rehash_rounds; + u8 sysctl_tcp_plb_suspend_rto_sec; + int sysctl_tcp_plb_cong_thresh; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_tcp_syn_linear_timeouts; + u8 sysctl_udp_l3mdev_accept; + u8 sysctl_igmp_llm_reports; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + unsigned int sysctl_udp_child_hash_entries; + unsigned long *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct list_head mr_tables; + struct fib_rules_ops *mr_rules_ops; + u32 sysctl_fib_multipath_hash_fields; + u8 sysctl_fib_multipath_use_neigh; + u8 sysctl_fib_multipath_hash_policy; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + siphash_key_t ip_id_key; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct dst_entry; + +struct net_device; + +struct sk_buff; + +struct neighbour; + +struct dst_ops { + unsigned short family; + unsigned int gc_thresh; + void (*gc)(struct dst_ops *); + struct dst_entry *(*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 *(*cow_metrics)(struct dst_entry *, unsigned long); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *); + struct dst_entry *(*negative_advice)(struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour *(*neigh_lookup)(const struct dst_entry *, struct sk_buff *, + const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + struct percpu_counter pcpuc_entries; + long : 64; + long : 64; + long : 64; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + u32 multipath_hash_fields; + u8 multipath_hash_policy; + u8 bindv6only; + u8 flowlabel_consistency; + u8 auto_flowlabels; + int icmpv6_time; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; + unsigned long icmpv6_ratemask[4]; + unsigned long *icmpv6_ratemask_ptr; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; + int idgen_retries; + int idgen_delay; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; + u8 skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; + u8 icmpv6_error_anycast_as_unicast; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct ioam6_pernet_data; + +struct netns_ipv6 { + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + atomic_t ip6_rt_gc_expire; + unsigned long ip6_rt_last_gc; + unsigned char flowlabel_has_excl; + bool fib6_has_custom_rules; + unsigned int fib6_rules_require_fldissect; + unsigned int fib6_routes_require_src; + struct rt6_info *ip6_prohibit_entry; + struct rt6_info *ip6_blk_hole_entry; + struct fib6_table *fib6_local_tbl; + struct fib_rules_ops *fib6_rules_ops; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct hlist_head *inet6_addr_lst; + spinlock_t addrconf_hash_lock; + struct delayed_work addr_chk_work; + struct list_head mr6_tables; + struct fib_rules_ops *mr6_rules_ops; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; + struct ioam6_pernet_data *ioam6_data; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_logger __attribute__((btf_type_tag("rcu"))) * nf_loggers[11]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) * hooks_ipv4[5]; + struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) * hooks_ipv6[5]; + struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) * hooks_arp[3]; + struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) * hooks_bridge[5]; + unsigned int defrag_ipv4_users; + unsigned int defrag_ipv6_users; +}; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_dccp_net { + u8 dccp_loose; + unsigned int dccp_timeout[10]; +}; + +struct nf_sctp_net { + unsigned int timeouts[10]; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; + struct nf_dccp_net dccp; + struct nf_sctp_net sctp; +}; + +struct ip_conntrack_stat; + +struct nf_ct_event_notifier; + +struct netns_ct { + bool ecache_dwork_pending; + u8 sysctl_log_invalid; + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_tstamp; + u8 sysctl_checksum; + struct ip_conntrack_stat __attribute__((btf_type_tag("percpu"))) * stat; + struct nf_ct_event_notifier __attribute__((btf_type_tag("rcu"))) * nf_conntrack_event_cb; + struct nf_ip_net nf_ct_proto; +}; + +struct netns_nftables { + u8 gencursor; +}; + +struct sk_buff_list { + struct sk_buff *next; + struct sk_buff *prev; +}; + +struct sk_buff_head { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + }; + struct sk_buff_list list; + }; + __u32 qlen; + spinlock_t lock; +}; + +struct netns_bpf { + struct bpf_prog_array __attribute__((btf_type_tag("rcu"))) * run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct netns_ipvs; + +struct netns_xdp { + struct mutex lock; + struct hlist_head list; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + spinlock_t rules_mod_lock; + atomic_t dev_unreg_count; + unsigned int dev_base_seq; + u32 ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct ref_tracker_dir refcnt_tracker; + struct ref_tracker_dir notrefcnt_tracker; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct xarray dev_by_index; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_nf nf; + struct netns_ct ct; + struct netns_nftables nft; + struct sk_buff_head wext_nlevents; + struct net_generic __attribute__((btf_type_tag("rcu"))) * gen; + struct netns_bpf bpf; + u64 net_cookie; + struct netns_ipvs *ipvs; + struct netns_xdp xdp; + struct sock *crypto_nlsk; + struct sock *diag_nlsk; + long : 64; + long : 64; +}; + +struct prot_inuse { + int all; + int val[64]; +}; + +struct ipstats_mib { + u64 mibs[38]; + struct u64_stats_sync syncp; +}; + +struct tcp_mib { + unsigned long mibs[16]; +}; + +struct linux_mib { + unsigned long mibs[132]; +}; + +struct udp_mib { + unsigned long mibs[10]; +}; + +struct mptcp_mib { + unsigned long mibs[58]; +}; + +struct icmp_mib { + unsigned long mibs[30]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + unsigned long mibs[7]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct ip_ra_chain { + struct ip_ra_chain __attribute__((btf_type_tag("rcu"))) * next; + struct sock *sk; + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct callback_head rcu; +}; + +struct fib_table { + struct hlist_node tb_hlist; + u32 tb_id; + int tb_num_default; + struct callback_head rcu; + unsigned long *tb_data; + unsigned long __data[0]; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +struct rhashtable_compare_arg; + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table __attribute__((btf_type_tag("rcu"))) * tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct inet_frags; + +struct fqdir { + long high_thresh; + long low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long : 64; + long : 64; + struct rhashtable rhashtable; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + atomic_long_t mem; + struct work_struct destroy_work; + struct llist_node free_list; + long : 64; + long : 64; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +typedef __u32 __be32; + +typedef __u16 __be16; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + u8 mono_delivery_time; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +typedef __u32 __wsum; + +typedef unsigned int sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + unsigned long dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + struct llist_node ll_node; + }; + union { + struct sock *sk; + int ip_defrag_offset; + }; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + unsigned long _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + unsigned long _sk_redir; + }; + unsigned long _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned : 1; + __u8 nohdr : 1; + __u8 fclone : 2; + __u8 peeked : 1; + __u8 head_frag : 1; + __u8 pfmemalloc : 1; + __u8 pp_recycle : 1; + __u8 active_extensions; + union { + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type : 3; + __u8 ignore_df : 1; + __u8 dst_pending_confirm : 1; + __u8 ip_summed : 2; + __u8 ooo_okay : 1; + __u8 __mono_tc_offset[0]; + __u8 mono_delivery_time : 1; + __u8 tc_at_ingress : 1; + __u8 tc_skip_classify : 1; + __u8 remcsum_offload : 1; + __u8 csum_complete_sw : 1; + __u8 csum_level : 2; + __u8 inner_protocol_type : 1; + __u8 l4_hash : 1; + __u8 sw_hash : 1; + __u8 wifi_acked_valid : 1; + __u8 wifi_acked : 1; + __u8 no_fcs : 1; + __u8 encapsulation : 1; + __u8 encap_hdr_csum : 1; + __u8 csum_valid : 1; + __u8 ipvs_property : 1; + __u8 nf_trace : 1; + __u8 redirected : 1; + __u8 slow_gro : 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + __u32 secmark; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + }; + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type : 3; + __u8 ignore_df : 1; + __u8 dst_pending_confirm : 1; + __u8 ip_summed : 2; + __u8 ooo_okay : 1; + __u8 __mono_tc_offset[0]; + __u8 mono_delivery_time : 1; + __u8 tc_at_ingress : 1; + __u8 tc_skip_classify : 1; + __u8 remcsum_offload : 1; + __u8 csum_complete_sw : 1; + __u8 csum_level : 2; + __u8 inner_protocol_type : 1; + __u8 l4_hash : 1; + __u8 sw_hash : 1; + __u8 wifi_acked_valid : 1; + __u8 wifi_acked : 1; + __u8 no_fcs : 1; + __u8 encapsulation : 1; + __u8 encap_hdr_csum : 1; + __u8 csum_valid : 1; + __u8 ipvs_property : 1; + __u8 nf_trace : 1; + __u8 redirected : 1; + __u8 slow_gro : 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + __u32 secmark; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + } headers; + }; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[2]; + u8 chunks; + long : 0; + char data[0]; +}; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +struct rhash_lock_head; + +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + struct bucket_table __attribute__((btf_type_tag("rcu"))) * future_tbl; + struct lockdep_map dep_map; + long : 64; + struct rhash_lock_head __attribute__((btf_type_tag("rcu"))) * buckets[0]; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, + CA_EVENT_TLP_RECOVERY = 6, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*tso_segs)(struct sock *, unsigned int); + void (*skb_marked_lost)(struct sock *, const struct sk_buff *); + void (*cong_control)(struct sock *, const struct rate_sample *); + u32 (*undo_cwnd)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct tcp_fastopen_context { + siphash_key_t key[2]; + int num; + struct callback_head rcu; +}; + +typedef struct { + atomic_t refcnt; +} rcuref_t; + +typedef struct { +} netdevice_tracker; + +struct uncached_list; + +struct lwtunnel_state; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + unsigned long _metrics; + unsigned long expires; + void *__pad1; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + unsigned short flags; + short obsolete; + unsigned short header_len; + unsigned short trailer_len; + rcuref_t __rcuref; + int __use; + unsigned long lastuse; + struct callback_head callback_head; + short error; + short __pad; + __u32 tclassid; + netdevice_tracker dev_tracker; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; + struct lwtunnel_state *lwtstate; +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, + const struct net_device *, const struct net_device *, + const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +struct nf_hook_state; + +typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + struct nf_hook_entry hooks[0]; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; + unsigned int chaintoolong; +}; + +struct nf_ct_event; + +struct nf_exp_event; + +struct nf_ct_event_notifier { + int (*ct_event)(unsigned int, const struct nf_ct_event *); + int (*exp_event)(unsigned int, const struct nf_exp_event *); +}; + +struct net_generic { + union { + struct { + unsigned int len; + struct callback_head rcu; + } s; + struct { + struct { + } __empty_ptr; + void *ptr[0]; + }; + }; +}; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +struct binfmt_misc { + struct list_head entries; + rwlock_t entries_lock; + bool enabled; +}; + +typedef unsigned int blk_mode_t; + +struct block_device_operations; + +struct timer_rand_state; + +struct disk_events; + +struct cdrom_device_info; + +struct badblocks; + +struct blk_independent_access_ranges; + +struct gendisk { + int major; + int first_minor; + int minors; + char disk_name[32]; + unsigned short events; + unsigned short event_flags; + struct xarray part_tbl; + struct block_device *part0; + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + struct bio_set bio_split; + int flags; + unsigned long state; + struct mutex open_mutex; + unsigned int open_partitions; + struct backing_dev_info *bdi; + struct kobject queue_kobj; + struct kobject *slave_dir; + struct list_head slave_bdevs; + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + struct cdrom_device_info *cdi; + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; + u64 diskseq; + blk_mode_t open_mode; + struct blk_independent_access_ranges *ia_ranges; +}; + +struct blk_zone; + +typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); + +enum blk_unique_id { + BLK_UID_T10 = 1, + BLK_UID_EUI64 = 2, + BLK_UID_NAA = 3, +}; + +struct hd_geometry; + +struct pr_ops; + +struct block_device_operations { + void (*submit_bio)(struct bio *); + int (*poll_bio)(struct bio *, struct io_comp_batch *, unsigned int); + int (*open)(struct gendisk *, blk_mode_t); + void (*release)(struct gendisk *); + int (*ioctl)(struct block_device *, blk_mode_t, unsigned int, unsigned long); + int (*compat_ioctl)(struct block_device *, blk_mode_t, unsigned int, unsigned long); + unsigned int (*check_events)(struct gendisk *, unsigned int); + void (*unlock_native_capacity)(struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + int (*set_read_only)(struct block_device *, bool); + void (*free_disk)(struct gendisk *); + void (*swap_slot_free_notify)(struct block_device *, unsigned long); + int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); + char *(*devnode)(struct gendisk *, umode_t *); + int (*get_unique_id)(struct gendisk *, u8 *, enum blk_unique_id); + struct module *owner; + const struct pr_ops *pr_ops; + int (*alternative_gpt_sector)(struct gendisk *, sector_t *); +}; + +struct request; + +struct io_comp_batch { + struct request *req_list; + bool need_ts; + void (*complete)(struct io_comp_batch *); +}; + +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 resv[4]; + __u64 capacity; + __u8 reserved[24]; +}; + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_keys; + +struct pr_held_reservation; + +struct pr_ops { + int (*pr_register)(struct block_device *, u64, u64, u32); + int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); + int (*pr_release)(struct block_device *, u64, enum pr_type); + int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); + int (*pr_clear)(struct block_device *, u64); + int (*pr_read_keys)(struct block_device *, struct pr_keys *); + int (*pr_read_reservation)(struct block_device *, struct pr_held_reservation *); +}; + +struct blk_integrity_profile; + +struct blk_integrity { + const struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +enum blk_bounce { + BLK_BOUNCE_NONE = 0, + BLK_BOUNCE_HIGH = 1, +}; + +enum blk_zoned_model { + BLK_ZONED_NONE = 0, + BLK_ZONED_HA = 1, + BLK_ZONED_HM = 2, +}; + +struct queue_limits { + enum blk_bounce bounce; + unsigned long seg_boundary_mask; + unsigned long virt_boundary_mask; + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_user_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_secure_erase_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int zone_write_granularity; + unsigned short max_segments; + unsigned short max_integrity_segments; + unsigned short max_discard_segments; + unsigned char misaligned; + unsigned char discard_misaligned; + unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; + unsigned int dma_alignment; +}; + +struct elevator_queue; + +struct blk_queue_stats; + +struct rq_qos; + +struct blk_mq_ops; + +struct blk_mq_ctx; + +struct blk_mq_tags; + +struct blk_trace; + +struct blk_flush_queue; + +struct throtl_data; + +struct blk_mq_tag_set; + +struct request_queue { + struct request *last_merge; + struct elevator_queue *elevator; + struct percpu_ref q_usage_counter; + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + struct mutex rq_qos_mutex; + const struct blk_mq_ops *mq_ops; + struct blk_mq_ctx __attribute__((btf_type_tag("percpu"))) * queue_ctx; + unsigned int queue_depth; + struct xarray hctx_table; + unsigned int nr_hw_queues; + void *queuedata; + unsigned long queue_flags; + atomic_t pm_only; + int id; + spinlock_t queue_lock; + struct gendisk *disk; + refcount_t refs; + struct kobject *mq_kobj; + struct blk_integrity integrity; + struct device *dev; + enum rpm_status rpm_status; + unsigned long nr_requests; + unsigned int dma_pad_mask; + unsigned int rq_timeout; + struct timer_list timeout; + struct work_struct timeout_work; + atomic_t nr_active_requests_shared_tags; + struct blk_mq_tags *sched_shared_tags; + struct list_head icq_list; + unsigned long blkcg_pols[1]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct mutex blkcg_mutex; + struct queue_limits limits; + unsigned int required_elevator_features; + int node; + struct blk_trace __attribute__((btf_type_tag("rcu"))) * blk_trace; + struct blk_flush_queue *fq; + struct list_head flush_list; + struct list_head requeue_list; + spinlock_t requeue_lock; + struct delayed_work requeue_work; + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + int mq_freeze_depth; + struct throtl_data *td; + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + struct mutex mq_freeze_lock; + int quiesce_depth; + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; + struct mutex debugfs_mutex; + bool mq_sysfs_init_done; +}; + +enum blk_eh_timer_return { + BLK_EH_DONE = 0, + BLK_EH_RESET_TIMER = 1, +}; + +struct blk_mq_hw_ctx; + +struct blk_mq_queue_data; + +struct blk_mq_ops { + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); + void (*commit_rqs)(struct blk_mq_hw_ctx *); + void (*queue_rqs)(struct request **); + int (*get_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *, int); + void (*set_rq_budget_token)(struct request *, int); + int (*get_rq_budget_token)(struct request *); + enum blk_eh_timer_return (*timeout)(struct request *); + int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); + void (*complete)(struct request *); + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, + unsigned int); + void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); + void (*cleanup_rq)(struct request *); + bool (*busy)(struct request_queue *); + void (*map_queues)(struct blk_mq_tag_set *); + void (*show_rq)(struct seq_file *, struct request *); +}; + +struct blk_mq_ctxs; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[3]; + long : 64; + }; + unsigned int cpu; + unsigned short index_hw[3]; + struct blk_mq_hw_ctx *hctxs[3]; + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; + long : 64; +}; + +struct blk_integrity_iter; + +typedef blk_status_t integrity_processing_fn(struct blk_integrity_iter *); + +typedef void integrity_prepare_fn(struct request *); + +typedef void integrity_complete_fn(struct request *, unsigned int); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; + const char *name; +}; + +typedef u32 phandle; + +struct fwnode_operations; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct property; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + unsigned long _flags; + void *data; +}; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle *(*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void *(*device_get_match_data)(const struct fwnode_handle *, + const struct device *); + bool (*device_dma_supported)(const struct fwnode_handle *); + enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, + unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, + const char **, size_t); + const char *(*get_name)(const struct fwnode_handle *); + const char *(*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle *(*get_parent)(const struct fwnode_handle *); + struct fwnode_handle *(*get_next_child_node)(const struct fwnode_handle *, + struct fwnode_handle *); + struct fwnode_handle *(*get_named_child_node)(const struct fwnode_handle *, + const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, + unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle *(*graph_get_next_endpoint)(const struct fwnode_handle *, + struct fwnode_handle *); + struct fwnode_handle *(*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle *(*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + void *(*iomap)(struct fwnode_handle *, int); + int (*irq_get)(const struct fwnode_handle *, unsigned int); + int (*add_links)(struct fwnode_handle *); +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct property { + char *name; + int length; + void *value; + struct property *next; +}; + +struct rchan; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + unsigned long __attribute__((btf_type_tag("percpu"))) * sequence; + unsigned char __attribute__((btf_type_tag("percpu"))) * msg_data; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct list_head running_list; + atomic_t dropped; +}; + +struct blk_independent_access_range { + struct kobject kobj; + sector_t sector; + sector_t nr_sectors; +}; + +struct blk_independent_access_ranges { + struct kobject kobj; + bool sysfs_registered; + unsigned int nr_ia_ranges; + struct blk_independent_access_range ia_range[0]; +}; + +struct partition_meta_info { + char uuid[37]; + u8 volname[64]; +}; + +struct blk_plug { + struct request *mq_list; + struct request *cached_rq; + unsigned short nr_ios; + unsigned short rq_count; + bool multiple_queues; + bool has_elevator; + struct list_head cb_list; +}; + +struct task_delay_info { + raw_spinlock_t lock; + u64 blkio_start; + u64 blkio_delay; + u64 swapin_start; + u64 swapin_delay; + u32 blkio_count; + u32 swapin_count; + u64 freepages_start; + u64 freepages_delay; + u64 thrashing_start; + u64 thrashing_delay; + u64 compact_start; + u64 compact_delay; + u64 wpcopy_start; + u64 wpcopy_delay; + u64 irq_delay; + u32 freepages_count; + u32 thrashing_count; + u32 compact_count; + u32 wpcopy_count; + u32 irq_count; +}; + +struct vm_struct { + struct vm_struct *next; + void *addr; + unsigned long size; + unsigned long flags; + struct page **pages; + unsigned int page_order; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + struct event_filter *filter; + union { + void *module; + atomic_t refcnt; + }; + void *data; + int flags; + int perf_refcount; + struct hlist_head __attribute__((btf_type_tag("percpu"))) * perf_events; + struct bpf_prog_array __attribute__((btf_type_tag("rcu"))) * prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head *(*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + const int len; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +struct trace_iterator; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, + struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; +}; + +struct trace_seq { + char buffer[4096]; + struct seq_buf seq; + size_t readpos; + int full; +}; + +typedef struct cpumask cpumask_var_t[1]; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_entry; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + unsigned long iter_flags; + void *temp; + unsigned int temp_size; + char *fmt; + unsigned int fmt_size; + long wait_index; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + unsigned long lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + loff_t pos; + long idx; +}; + +struct trace_entry { + unsigned short type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct fc_log { + refcount_t usage; + u8 head; + u8 tail; + u8 need_free; + struct module *owner; + char *buffer[8]; +}; + +struct fs_parse_result { + bool negated; + union { + bool boolean; + int int_32; + unsigned int uint_32; + u64 uint_64; + }; +}; + +struct bdev_handle { + struct block_device *bdev; + void *holder; + blk_mode_t mode; +}; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +typedef void (*btf_trace_initcall_level)(void *, const char *); + +typedef int (*initcall_t)(); + +typedef void (*btf_trace_initcall_start)(void *, initcall_t); + +typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_FREEING_INITMEM = 2, + SYSTEM_RUNNING = 3, + SYSTEM_HALT = 4, + SYSTEM_POWER_OFF = 5, + SYSTEM_RESTART = 6, + SYSTEM_SUSPEND = 7, +}; + +typedef int initcall_entry_t; + +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_APB_DEAD = 8, + CPUHP_X86_MCE_DEAD = 9, + CPUHP_VIRT_NET_DEAD = 10, + CPUHP_IBMVNIC_DEAD = 11, + CPUHP_SLUB_DEAD = 12, + CPUHP_DEBUG_OBJ_DEAD = 13, + CPUHP_MM_WRITEBACK_DEAD = 14, + CPUHP_MM_DEMOTION_DEAD = 15, + CPUHP_MM_VMSTAT_DEAD = 16, + CPUHP_SOFTIRQ_DEAD = 17, + CPUHP_NET_MVNETA_DEAD = 18, + CPUHP_CPUIDLE_DEAD = 19, + CPUHP_ARM64_FPSIMD_DEAD = 20, + CPUHP_ARM_OMAP_WAKE_DEAD = 21, + CPUHP_IRQ_POLL_DEAD = 22, + CPUHP_BLOCK_SOFTIRQ_DEAD = 23, + CPUHP_BIO_DEAD = 24, + CPUHP_ACPI_CPUDRV_DEAD = 25, + CPUHP_S390_PFAULT_DEAD = 26, + CPUHP_BLK_MQ_DEAD = 27, + CPUHP_FS_BUFF_DEAD = 28, + CPUHP_PRINTK_DEAD = 29, + CPUHP_MM_MEMCQ_DEAD = 30, + CPUHP_PERCPU_CNT_DEAD = 31, + CPUHP_RADIX_DEAD = 32, + CPUHP_PAGE_ALLOC = 33, + CPUHP_NET_DEV_DEAD = 34, + CPUHP_PCI_XGENE_DEAD = 35, + CPUHP_IOMMU_IOVA_DEAD = 36, + CPUHP_LUSTRE_CFS_DEAD = 37, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 38, + CPUHP_PADATA_DEAD = 39, + CPUHP_AP_DTPM_CPU_DEAD = 40, + CPUHP_RANDOM_PREPARE = 41, + CPUHP_WORKQUEUE_PREP = 42, + CPUHP_POWER_NUMA_PREPARE = 43, + CPUHP_HRTIMERS_PREPARE = 44, + CPUHP_PROFILE_PREPARE = 45, + CPUHP_X2APIC_PREPARE = 46, + CPUHP_SMPCFD_PREPARE = 47, + CPUHP_RELAY_PREPARE = 48, + CPUHP_SLAB_PREPARE = 49, + CPUHP_MD_RAID5_PREPARE = 50, + CPUHP_RCUTREE_PREP = 51, + CPUHP_CPUIDLE_COUPLED_PREPARE = 52, + CPUHP_POWERPC_PMAC_PREPARE = 53, + CPUHP_POWERPC_MMU_CTX_PREPARE = 54, + CPUHP_XEN_PREPARE = 55, + CPUHP_XEN_EVTCHN_PREPARE = 56, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 57, + CPUHP_SH_SH3X_PREPARE = 58, + CPUHP_NET_FLOW_PREPARE = 59, + CPUHP_TOPOLOGY_PREPARE = 60, + CPUHP_NET_IUCV_PREPARE = 61, + CPUHP_ARM_BL_PREPARE = 62, + CPUHP_TRACE_RB_PREPARE = 63, + CPUHP_MM_ZS_PREPARE = 64, + CPUHP_MM_ZSWP_MEM_PREPARE = 65, + CPUHP_MM_ZSWP_POOL_PREPARE = 66, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 67, + CPUHP_ZCOMP_PREPARE = 68, + CPUHP_TIMERS_PREPARE = 69, + CPUHP_MIPS_SOC_PREPARE = 70, + CPUHP_BP_PREPARE_DYN = 71, + CPUHP_BP_PREPARE_DYN_END = 91, + CPUHP_BP_KICK_AP = 92, + CPUHP_BRINGUP_CPU = 93, + CPUHP_AP_IDLE_DEAD = 94, + CPUHP_AP_OFFLINE = 95, + CPUHP_AP_CACHECTRL_STARTING = 96, + CPUHP_AP_SCHED_STARTING = 97, + CPUHP_AP_RCUTREE_DYING = 98, + CPUHP_AP_CPU_PM_STARTING = 99, + CPUHP_AP_IRQ_GIC_STARTING = 100, + CPUHP_AP_IRQ_HIP04_STARTING = 101, + CPUHP_AP_IRQ_APPLE_AIC_STARTING = 102, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 103, + CPUHP_AP_IRQ_BCM2836_STARTING = 104, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 105, + CPUHP_AP_IRQ_RISCV_STARTING = 106, + CPUHP_AP_IRQ_LOONGARCH_STARTING = 107, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 108, + CPUHP_AP_ARM_MVEBU_COHERENCY = 109, + CPUHP_AP_MICROCODE_LOADER = 110, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 111, + CPUHP_AP_PERF_X86_STARTING = 112, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 113, + CPUHP_AP_PERF_X86_CQM_STARTING = 114, + CPUHP_AP_PERF_X86_CSTATE_STARTING = 115, + CPUHP_AP_PERF_XTENSA_STARTING = 116, + CPUHP_AP_MIPS_OP_LOONGSON3_STARTING = 117, + CPUHP_AP_ARM_VFP_STARTING = 118, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 119, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 120, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 121, + CPUHP_AP_PERF_ARM_STARTING = 122, + CPUHP_AP_PERF_RISCV_STARTING = 123, + CPUHP_AP_ARM_L2X0_STARTING = 124, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 125, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 126, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING = 127, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 128, + CPUHP_AP_JCORE_TIMER_STARTING = 129, + CPUHP_AP_ARM_TWD_STARTING = 130, + CPUHP_AP_QCOM_TIMER_STARTING = 131, + CPUHP_AP_TEGRA_TIMER_STARTING = 132, + CPUHP_AP_ARMADA_TIMER_STARTING = 133, + CPUHP_AP_MARCO_TIMER_STARTING = 134, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 135, + CPUHP_AP_ARC_TIMER_STARTING = 136, + CPUHP_AP_RISCV_TIMER_STARTING = 137, + CPUHP_AP_CLINT_TIMER_STARTING = 138, + CPUHP_AP_CSKY_TIMER_STARTING = 139, + CPUHP_AP_TI_GP_TIMER_STARTING = 140, + CPUHP_AP_HYPERV_TIMER_STARTING = 141, + CPUHP_AP_DUMMY_TIMER_STARTING = 142, + CPUHP_AP_ARM_XEN_STARTING = 143, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 144, + CPUHP_AP_ARM_CORESIGHT_STARTING = 145, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 146, + CPUHP_AP_ARM64_ISNDEP_STARTING = 147, + CPUHP_AP_SMPCFD_DYING = 148, + CPUHP_AP_HRTIMERS_DYING = 149, + CPUHP_AP_X86_TBOOT_DYING = 150, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 151, + CPUHP_AP_ONLINE = 152, + CPUHP_TEARDOWN_CPU = 153, + CPUHP_AP_ONLINE_IDLE = 154, + CPUHP_AP_HYPERV_ONLINE = 155, + CPUHP_AP_KVM_ONLINE = 156, + CPUHP_AP_SCHED_WAIT_EMPTY = 157, + CPUHP_AP_SMPBOOT_THREADS = 158, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 159, + CPUHP_AP_BLK_MQ_ONLINE = 160, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 161, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 162, + CPUHP_AP_PERF_ONLINE = 163, + CPUHP_AP_PERF_X86_ONLINE = 164, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 165, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 166, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 167, + CPUHP_AP_PERF_X86_RAPL_ONLINE = 168, + CPUHP_AP_PERF_X86_CQM_ONLINE = 169, + CPUHP_AP_PERF_X86_CSTATE_ONLINE = 170, + CPUHP_AP_PERF_X86_IDXD_ONLINE = 171, + CPUHP_AP_PERF_S390_CF_ONLINE = 172, + CPUHP_AP_PERF_S390_SF_ONLINE = 173, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 174, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 175, + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 176, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 177, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 178, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 179, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 180, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 181, + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 182, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 183, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 184, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 185, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 186, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 187, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 188, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 189, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 190, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 191, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 192, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 193, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 194, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 195, + CPUHP_AP_PERF_CSKY_ONLINE = 196, + CPUHP_AP_WATCHDOG_ONLINE = 197, + CPUHP_AP_WORKQUEUE_ONLINE = 198, + CPUHP_AP_RANDOM_ONLINE = 199, + CPUHP_AP_RCUTREE_ONLINE = 200, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 201, + CPUHP_AP_ONLINE_DYN = 202, + CPUHP_AP_ONLINE_DYN_END = 232, + CPUHP_AP_MM_DEMOTION_ONLINE = 233, + CPUHP_AP_X86_HPET_ONLINE = 234, + CPUHP_AP_X86_KVM_CLK_ONLINE = 235, + CPUHP_AP_ACTIVE = 236, + CPUHP_ONLINE = 237, +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, + EVENT_FILE_FL_FREED = 2048, +}; + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF = 0, + REFCOUNT_ADD_OVF = 1, + REFCOUNT_ADD_UAF = 2, + REFCOUNT_SUB_UAF = 3, + REFCOUNT_DEC_LEAK = 4, +}; + +struct trace_event_raw_initcall_level { + struct trace_entry ent; + u32 __data_loc_level; + char __data[0]; +}; + +struct trace_event_raw_initcall_start { + struct trace_entry ent; + initcall_t func; + char __data[0]; +}; + +struct trace_event_raw_initcall_finish { + struct trace_entry ent; + initcall_t func; + int ret; + char __data[0]; +}; + +typedef __u32 __le32; + +typedef unsigned long uintptr_t; + +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +struct eventfs_inode; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter __attribute__((btf_type_tag("rcu"))) * filter; + struct eventfs_inode *ei; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + unsigned long flags; + atomic_t ref; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +struct prog_entry; + +struct event_filter { + struct prog_entry __attribute__((btf_type_tag("rcu"))) * prog; + char *filter_string; +}; + +struct trace_event_data_offsets_initcall_level { + u32 level; +}; + +struct trace_buffer; + +struct ring_buffer_event; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned int trace_ctx; + struct pt_regs *regs; +}; + +struct ring_buffer_event { + u32 type_len : 5; + u32 time_delta : 27; + u32 array[0]; +}; + +struct trace_event_data_offsets_initcall_start {}; + +struct trace_event_data_offsets_initcall_finish {}; + +struct async_domain { + struct list_head pending; + unsigned int registered : 1; +}; + +struct static_key_mod { + struct static_key_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, + unsigned long, int, void *); + +struct iommu_domain_geometry { + dma_addr_t aperture_start; + dma_addr_t aperture_end; + bool force_aperture; +}; + +enum iommu_page_response_code { + IOMMU_PAGE_RESP_SUCCESS = 0, + IOMMU_PAGE_RESP_INVALID = 1, + IOMMU_PAGE_RESP_FAILURE = 2, +}; + +struct iommu_dirty_ops; + +struct iommu_dma_cookie; + +struct iommu_fault; + +struct iommu_domain { + unsigned int type; + const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + unsigned long pgsize_bitmap; + struct iommu_domain_geometry geometry; + struct iommu_dma_cookie *iova_cookie; + enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *, void *); + void *fault_data; + union { + struct { + iommu_fault_handler_t handler; + void *handler_token; + }; + struct { + struct mm_struct *mm; + int users; + }; + }; +}; + +struct iommu_iotlb_gather; + +struct iommu_domain_ops { + int (*attach_dev)(struct iommu_domain *, struct device *); + int (*set_dev_pasid)(struct iommu_domain *, struct device *, ioasid_t); + int (*map_pages)(struct iommu_domain *, unsigned long, phys_addr_t, size_t, + size_t, int, gfp_t, size_t *); + size_t (*unmap_pages)(struct iommu_domain *, unsigned long, size_t, size_t, + struct iommu_iotlb_gather *); + void (*flush_iotlb_all)(struct iommu_domain *); + int (*iotlb_sync_map)(struct iommu_domain *, unsigned long, size_t); + void (*iotlb_sync)(struct iommu_domain *, struct iommu_iotlb_gather *); + phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t); + bool (*enforce_cache_coherency)(struct iommu_domain *); + int (*enable_nesting)(struct iommu_domain *); + int (*set_pgtable_quirks)(struct iommu_domain *, unsigned long); + void (*free)(struct iommu_domain *); +}; + +struct iommu_iotlb_gather { + unsigned long start; + unsigned long end; + size_t pgsize; + struct list_head freelist; + bool queued; +}; + +struct iommu_dirty_bitmap; + +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *, bool); + int (*read_and_clear_dirty)(struct iommu_domain *, unsigned long, size_t, + unsigned long, struct iommu_dirty_bitmap *); +}; + +struct iova_bitmap; + +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +struct iommu_fault_unrecoverable { + __u32 reason; + __u32 flags; + __u32 pasid; + __u32 perm; + __u64 addr; + __u64 fetch_addr; +}; + +struct iommu_fault_page_request { + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + __u64 private_data[2]; +}; + +struct iommu_fault { + __u32 type; + __u32 padding; + union { + struct iommu_fault_unrecoverable event; + struct iommu_fault_page_request prm; + __u8 padding2[56]; + }; +}; + +struct iommu_user_data { + unsigned int type; + void __attribute__((btf_type_tag("user"))) * uptr; + size_t len; +}; + +struct iommu_device { + struct list_head list; + const struct iommu_ops *ops; + struct fwnode_handle *fwnode; + struct device *dev; + struct iommu_group *singleton_group; + u32 max_pasids; +}; + +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[16]; +}; + +struct iommu_fault_event { + struct iommu_fault fault; + struct list_head list; +}; + +struct iommu_page_response { + __u32 argsz; + __u32 version; + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 code; +}; + +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED = 1, + DOMAIN_BUS_GENERIC_MSI = 2, + DOMAIN_BUS_PCI_MSI = 3, + DOMAIN_BUS_PLATFORM_MSI = 4, + DOMAIN_BUS_NEXUS = 5, + DOMAIN_BUS_IPI = 6, + DOMAIN_BUS_FSL_MC_MSI = 7, + DOMAIN_BUS_TI_SCI_INTA_MSI = 8, + DOMAIN_BUS_WAKEUP = 9, + DOMAIN_BUS_VMD_MSI = 10, + DOMAIN_BUS_PCI_DEVICE_MSI = 11, + DOMAIN_BUS_PCI_DEVICE_MSIX = 12, + DOMAIN_BUS_DMAR = 13, + DOMAIN_BUS_AMDVI = 14, + DOMAIN_BUS_PCI_DEVICE_IMS = 15, +}; + +typedef unsigned long irq_hw_number_t; + +struct irq_domain_ops; + +struct irq_domain_chip_generic; + +struct msi_parent_ops; + +struct irq_data; + +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + struct mutex mutex; + struct irq_domain *root; + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; + struct device *dev; + struct device *pm_dev; + struct irq_domain *parent; + const struct msi_parent_ops *msi_parent_ops; + irq_hw_number_t hwirq_max; + unsigned int revmap_size; + struct xarray revmap_tree; + struct irq_data __attribute__((btf_type_tag("rcu"))) * revmap[0]; +}; + +struct irq_fwspec; + +struct irq_domain_ops { + int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); + int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); + int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); + void (*unmap)(struct irq_domain *, unsigned int); + int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, + unsigned long *, unsigned int *); + int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); + void (*free)(struct irq_domain *, unsigned int, unsigned int); + int (*activate)(struct irq_domain *, struct irq_data *, bool); + void (*deactivate)(struct irq_domain *, struct irq_data *); + int (*translate)(struct irq_domain *, struct irq_fwspec *, unsigned long *, + unsigned int *); +}; + +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[16]; +}; + +struct irq_common_data; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + unsigned long hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + unsigned int node; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; + cpumask_var_t effective_affinity; +}; + +struct pci_msi_desc { + union { + u32 msi_mask; + u32 msix_ctrl; + }; + struct { + u8 is_msix : 1; + u8 multiple : 3; + u8 multi_cap : 3; + u8 can_mask : 1; + u8 is_64 : 1; + u8 is_virtual : 1; + unsigned int default_irq; + } msi_attrib; + union { + u8 mask_pos; + void *mask_base; + }; +}; + +union msi_domain_cookie { + u64 value; + void *ptr; + void *iobase; +}; + +union msi_instance_cookie { + u64 value; + void *ptr; +}; + +struct msi_desc_data { + union msi_domain_cookie dcookie; + union msi_instance_cookie icookie; +}; + +struct x86_msi_addr_lo { + union { + struct { + u32 reserved_0 : 2; + u32 dest_mode_logical : 1; + u32 redirect_hint : 1; + u32 reserved_1 : 1; + u32 virt_destid_8_14 : 7; + u32 destid_0_7 : 8; + u32 base_address : 12; + }; + struct { + u32 dmar_reserved_0 : 2; + u32 dmar_index_15 : 1; + u32 dmar_subhandle_valid : 1; + u32 dmar_format : 1; + u32 dmar_index_0_14 : 15; + u32 dmar_base_address : 12; + }; + }; +}; + +typedef struct x86_msi_addr_lo arch_msi_msg_addr_lo_t; + +struct x86_msi_addr_hi { + u32 reserved : 8; + u32 destid_8_31 : 24; +}; + +typedef struct x86_msi_addr_hi arch_msi_msg_addr_hi_t; + +struct x86_msi_data { + union { + struct { + u32 vector : 8; + u32 delivery_mode : 3; + u32 dest_mode_logical : 1; + u32 reserved : 2; + u32 active_low : 1; + u32 is_level : 1; + }; + u32 dmar_subhandle; + }; +}; + +typedef struct x86_msi_data arch_msi_msg_data_t; + +struct msi_msg { + union { + u32 address_lo; + arch_msi_msg_addr_lo_t arch_addr_lo; + }; + union { + u32 address_hi; + arch_msi_msg_addr_hi_t arch_addr_hi; + }; + union { + u32 data; + arch_msi_msg_data_t arch_data; + }; +}; + +struct irq_affinity_desc; + +struct device_attribute; + +struct msi_desc { + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct irq_affinity_desc *affinity; + const void *iommu_cookie; + struct device_attribute *sysfs_attrs; + void (*write_msi_msg)(struct msi_desc *, void *); + void *write_msi_msg_data; + u16 msi_index; + union { + struct pci_msi_desc pci; + struct msi_desc_data data; + }; +}; + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed : 1; +}; + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *, struct device_attribute *, char *); + ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +struct irq_chip { + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + unsigned long flags; +}; + +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1, + IRQ_GC_INIT_NESTED_LOCK = 2, + IRQ_GC_MASK_CACHE_PER_TYPE = 4, + IRQ_GC_NO_MASK = 8, + IRQ_GC_BE_IO = 16, +}; + +struct irq_chip_generic; + +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + struct irq_chip_generic *gc[0]; +}; + +struct irq_chip_regs { + unsigned long enable; + unsigned long disable; + unsigned long mask; + unsigned long ack; + unsigned long eoi; + unsigned long type; + unsigned long polarity; +}; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +struct irq_chip_generic { + raw_spinlock_t lock; + void *reg_base; + u32 (*reg_readl)(void *); + void (*reg_writel)(u32, void *); + void (*suspend)(struct irq_chip_generic *); + void (*resume)(struct irq_chip_generic *); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 type_cache; + u32 polarity_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + unsigned long installed; + unsigned long unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +struct irqaction; + +struct irq_affinity_notify; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + unsigned int __attribute__((btf_type_tag("percpu"))) * kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + unsigned long last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + cpumask_var_t pending_mask; + unsigned long threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; + unsigned int force_resume_depth; + struct proc_dir_entry *dir; + struct callback_head rcu; + struct kobject kobj; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + struct hlist_node resend_node; + long : 64; + long : 64; + long : 64; +}; + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void __attribute__((btf_type_tag("percpu"))) * percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + unsigned long thread_flags; + unsigned long thread_mask; + const char *name; + struct proc_dir_entry *dir; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +struct msi_domain_info; + +struct msi_parent_ops { + u32 supported_flags; + const char *prefix; + bool (*init_dev_msi_info)(struct device *, struct irq_domain *, + struct irq_domain *, struct msi_domain_info *); +}; + +struct msi_domain_ops; + +struct msi_domain_info { + u32 flags; + enum irq_domain_bus_token bus_token; + unsigned int hwsize; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +struct irq_alloc_info; + +typedef struct irq_alloc_info msi_alloc_info_t; + +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); + int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, + irq_hw_number_t, msi_alloc_info_t *); + void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); + int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); + void (*prepare_desc)(struct irq_domain *, msi_alloc_info_t *, struct msi_desc *); + void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); + int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); + void (*domain_free_irqs)(struct irq_domain *, struct device *); + void (*msi_post_free)(struct irq_domain *, struct device *); +}; + +enum irq_alloc_type { + X86_IRQ_ALLOC_TYPE_IOAPIC = 1, + X86_IRQ_ALLOC_TYPE_HPET = 2, + X86_IRQ_ALLOC_TYPE_PCI_MSI = 3, + X86_IRQ_ALLOC_TYPE_PCI_MSIX = 4, + X86_IRQ_ALLOC_TYPE_DMAR = 5, + X86_IRQ_ALLOC_TYPE_AMDVI = 6, + X86_IRQ_ALLOC_TYPE_UV = 7, +}; + +struct ioapic_alloc_info { + int pin; + int node; + u32 is_level : 1; + u32 active_low : 1; + u32 valid : 1; +}; + +struct uv_alloc_info { + int limit; + int blade; + unsigned long offset; + char *name; +}; + +struct irq_alloc_info { + enum irq_alloc_type type; + u32 flags; + u32 devid; + irq_hw_number_t hwirq; + const struct cpumask *mask; + struct msi_desc *desc; + void *data; + union { + struct ioapic_alloc_info ioapic; + struct uv_alloc_info uv; + }; +}; + +struct msi_dev_domain { + struct xarray store; + struct irq_domain *domain; +}; + +struct platform_msi_priv_data; + +struct msi_device_data { + unsigned long properties; + struct platform_msi_priv_data *platform_data; + struct mutex mutex; + struct msi_dev_domain __domains[2]; + unsigned long __iter_idx; +}; + +struct iopf_device_param; + +struct iommu_fault_param; + +struct iommu_fwspec; + +struct dev_iommu { + struct mutex lock; + struct iommu_fault_param *fault_param; + struct iopf_device_param *iopf_param; + struct iommu_fwspec *fwspec; + struct iommu_device *iommu_dev; + void *priv; + u32 max_pasids; + u32 attach_deferred : 1; + u32 pci_32bit_workaround : 1; + u32 require_direct : 1; + u32 shadow_on_flush : 1; +}; + +typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); + +struct iommu_fault_param { + iommu_dev_fault_handler_t handler; + void *data; + struct list_head faults; + struct mutex lock; +}; + +struct iommu_fwspec { + const struct iommu_ops *ops; + struct fwnode_handle *iommu_fwnode; + u32 flags; + unsigned int num_ids; + u32 ids[0]; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct bpf_insn { + __u8 code; + __u8 dst_reg : 4; + __u8 src_reg : 4; + __s16 off; + __s32 imm; +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, + BPF_PROG_TYPE_NETFILTER = 32, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + BPF_LSM_CGROUP = 43, + BPF_STRUCT_OPS = 44, + BPF_NETFILTER = 45, + BPF_TCX_INGRESS = 46, + BPF_TCX_EGRESS = 47, + BPF_TRACE_UPROBE_MULTI = 48, + BPF_CGROUP_UNIX_CONNECT = 49, + BPF_CGROUP_UNIX_SENDMSG = 50, + BPF_CGROUP_UNIX_RECVMSG = 51, + BPF_CGROUP_UNIX_GETPEERNAME = 52, + BPF_CGROUP_UNIX_GETSOCKNAME = 53, + BPF_NETKIT_PRIMARY = 54, + BPF_NETKIT_PEER = 55, + __MAX_BPF_ATTACH_TYPE = 56, +}; + +struct bpf_prog_stats; + +struct bpf_prog_aux; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited : 1; + u16 jit_requested : 1; + u16 gpl_compatible : 1; + u16 cb_access : 1; + u16 dst_needed : 1; + u16 blinding_requested : 1; + u16 blinded : 1; + u16 is_func : 1; + u16 kprobe_override : 1; + u16 has_callchain_buf : 1; + u16 enforce_expected_attach_type : 1; + u16 call_get_stack : 1; + u16 call_get_func_ip : 1; + u16 tstamp_type_access : 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_stats __attribute__((btf_type_tag("percpu"))) * stats; + int __attribute__((btf_type_tag("percpu"))) * active; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + union { + struct { + struct { + } __empty_insns; + struct sock_filter insns[0]; + }; + struct { + struct { + } __empty_insnsi; + struct bpf_insn insnsi[0]; + }; + }; +}; + +typedef struct { + local64_t v; +} u64_stats_t; + +struct bpf_prog_stats { + u64_stats_t cnt; + u64_stats_t nsecs; + u64_stats_t misses; + struct u64_stats_sync syncp; + long : 64; +}; + +struct bpf_ksym { + unsigned long start; + unsigned long end; + char name[512]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct btf; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct btf_type; + +struct bpf_jit_poke_descriptor; + +struct bpf_kfunc_desc_tab; + +struct bpf_kfunc_btf_tab; + +struct bpf_prog_ops; + +struct bpf_map; + +struct btf_mod_pair; + +struct bpf_prog_offload; + +struct bpf_func_info; + +struct bpf_func_info_aux; + +struct bpf_line_info; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 real_func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + struct btf *attach_btf; + const struct bpf_ctx_arg_aux *ctx_arg_info; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool dev_bound; + bool offload_requested; + bool attach_btf_trace; + bool func_proto_unreliable; + bool sleepable; + bool tail_call_reachable; + bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; + struct bpf_kfunc_btf_tab *kfunc_btf_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct btf_mod_pair *used_btfs; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + u32 verified_insns; + int cgroup_atype; + struct bpf_map *cgroup_storage[2]; + char name[16]; + u64 (*bpf_exception_cb)(u64, u64, u64, u64, u64); + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + struct module *mod; + u32 num_exentries; + struct exception_table_entry *extable; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_KEY = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCK_COMMON = 12, + PTR_TO_TCP_SOCK = 13, + PTR_TO_TP_BUFFER = 14, + PTR_TO_XDP_SOCK = 15, + PTR_TO_BTF_ID = 16, + PTR_TO_MEM = 17, + PTR_TO_BUF = 18, + PTR_TO_FUNC = 19, + CONST_PTR_TO_DYNPTR = 20, + __BPF_REG_TYPE_MAX = 21, + PTR_TO_MAP_VALUE_OR_NULL = 260, + PTR_TO_SOCKET_OR_NULL = 267, + PTR_TO_SOCK_COMMON_OR_NULL = 268, + PTR_TO_TCP_SOCK_OR_NULL = 269, + PTR_TO_BTF_ID_OR_NULL = 272, + __BPF_REG_TYPE_LIMIT = 33554431, +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + u32 btf_id; +}; + +struct btf_func_model { + u8 ret_size; + u8 ret_flags; + u8 nr_args; + u8 arg_size[12]; + u8 arg_flags[12]; +}; + +struct bpf_tramp_image; + +struct bpf_trampoline { + struct hlist_node hlist; + struct ftrace_ops *fops; + struct mutex mutex; + refcount_t refcnt; + u32 flags; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + struct bpf_tramp_image *cur_image; + struct module *mod; +}; + +struct bpf_tramp_image { + void *image; + struct bpf_ksym ksym; + struct percpu_ref pcref; + void *ip_after_call; + void *ip_epilogue; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + void *aux; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, + BPF_MAP_TYPE_USER_RINGBUF = 31, + BPF_MAP_TYPE_CGRP_STORAGE = 32, +}; + +struct bpf_map_ops; + +struct btf_record; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u64 map_extra; + u32 map_flags; + u32 id; + struct btf_record *record; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + u32 btf_vmlinux_value_type_id; + struct btf *btf; + struct obj_cgroup *objcg; + char name[16]; + long : 64; + long : 64; + long : 64; + atomic64_t refcnt; + atomic64_t usercnt; + union { + struct work_struct work; + struct callback_head rcu; + }; + struct mutex freeze_mutex; + atomic64_t writecnt; + struct { + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; + bool bypass_spec_v1; + bool frozen; + bool free_after_mult_rcu_gp; + s64 __attribute__((btf_type_tag("percpu"))) * elem_count; + long : 64; + long : 64; +}; + +typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); + +union bpf_attr; + +struct bpf_verifier_env; + +struct bpf_func_state; + +struct bpf_iter_seq_info; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map *(*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void *(*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, + union bpf_attr __attribute__((btf_type_tag("user"))) *); + int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, + union bpf_attr + __attribute__((btf_type_tag("user"))) *); + int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, + union bpf_attr __attribute__((btf_type_tag("user"))) *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, + union bpf_attr __attribute__((btf_type_tag("user"))) *); + void *(*map_lookup_elem)(struct bpf_map *, void *); + long (*map_update_elem)(struct bpf_map *, void *, void *, u64); + long (*map_delete_elem)(struct bpf_map *, void *); + long (*map_push_elem)(struct bpf_map *, void *, u64); + long (*map_pop_elem)(struct bpf_map *, void *); + long (*map_peek_elem)(struct bpf_map *, void *); + void *(*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + void *(*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, + const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) * + *(*map_owner_storage_ptr)(void *); + long (*map_redirect)(struct bpf_map *, u64, u64); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, + struct bpf_func_state *, + struct bpf_func_state *); + long (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); + u64 (*map_mem_usage)(const struct bpf_map *); + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + __u64 map_extra; + }; + struct { + __u32 map_fd; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; + __u64 fd_array; + __u64 core_relos; + __u32 core_relo_rec_size; + __u32 log_true_size; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + __s32 path_fd; + }; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + union { + __u32 prog_cnt; + __u32 count; + }; + __u64 prog_attach_flags; + __u64 link_ids; + __u64 link_attach_flags; + __u64 revision; + } query; + struct { + __u64 name; + __u32 prog_fd; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + __u32 btf_log_true_size; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + union { + __u32 prog_fd; + __u32 map_fd; + }; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + }; + struct { + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __u64 syms; + __u64 addrs; + __u64 cookies; + } kprobe_multi; + struct { + __u32 target_btf_id; + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } tcx; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; + }; + } link_create; + struct { + __u32 link_fd; + union { + __u32 new_prog_fd; + __u32 new_map_fd; + }; + __u32 flags; + union { + __u32 old_prog_fd; + __u32 old_map_fd; + }; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf_kfunc_set_tab; + +struct btf_id_dtor_kfunc_tab; + +struct btf_struct_metas; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; + struct btf_kfunc_set_tab *kfunc_set_tab; + struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; + struct btf_struct_metas *struct_meta_tab; + struct btf *base_btf; + u32 start_id; + u32 start_str_off; + char name[56]; + bool kernel_btf; +}; + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +enum bpf_cgroup_iter_order { + BPF_CGROUP_ITER_ORDER_UNSPEC = 0, + BPF_CGROUP_ITER_SELF_ONLY = 1, + BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, + BPF_CGROUP_ITER_DESCENDANTS_POST = 3, + BPF_CGROUP_ITER_ANCESTORS_UP = 4, +}; + +enum bpf_iter_task_type { + BPF_TASK_ITER_ALL = 0, + BPF_TASK_ITER_TID = 1, + BPF_TASK_ITER_TGID = 2, +}; + +struct bpf_iter_aux_info { + struct bpf_map *map; + struct { + struct cgroup *start; + enum bpf_cgroup_iter_order order; + } cgroup; + struct { + enum bpf_iter_task_type type; + u32 pid; + } task; +}; + +enum btf_field_type { + BPF_SPIN_LOCK = 1, + BPF_TIMER = 2, + BPF_KPTR_UNREF = 4, + BPF_KPTR_REF = 8, + BPF_KPTR_PERCPU = 16, + BPF_KPTR = 28, + BPF_LIST_HEAD = 32, + BPF_LIST_NODE = 64, + BPF_RB_ROOT = 128, + BPF_RB_NODE = 256, + BPF_GRAPH_NODE_OR_ROOT = 480, + BPF_REFCOUNT = 512, +}; + +typedef void (*btf_dtor_kfunc_t)(void *); + +struct btf_field_kptr { + struct btf *btf; + struct module *module; + btf_dtor_kfunc_t dtor; + u32 btf_id; +}; + +struct btf_field_graph_root { + struct btf *btf; + u32 value_btf_id; + u32 node_offset; + struct btf_record *value_rec; +}; + +struct btf_field { + u32 offset; + u32 size; + enum btf_field_type type; + union { + struct btf_field_kptr kptr; + struct btf_field_graph_root graph_root; + }; +}; + +struct btf_record { + u32 cnt; + u32 field_mask; + int spin_lock_off; + int timer_off; + int refcount_off; + struct btf_field fields[0]; +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, + union bpf_attr __attribute__((btf_type_tag("user"))) *); +}; + +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct io_uring_sqe; + +struct io_uring_cmd { + struct file *file; + const struct io_uring_sqe *sqe; + union { + void (*task_work_cb)(struct io_uring_cmd *, unsigned int); + void *cookie; + }; + u32 cmd_op; + u32 flags; + u8 pdu[32]; +}; + +typedef int __kernel_rwf_t; + +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + struct { + __u32 cmd_op; + __u32 __pad1; + }; + }; + union { + __u64 addr; + __u64 splice_off_in; + struct { + __u32 level; + __u32 optname; + }; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + __u32 hardlink_flags; + __u32 xattr_flags; + __u32 msg_ring_flags; + __u32 uring_cmd_flags; + __u32 waitid_flags; + __u32 futex_flags; + }; + __u64 user_data; + union { + __u16 buf_index; + __u16 buf_group; + }; + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; + __u32 optlen; + struct { + __u16 addr_len; + __u16 __pad3[1]; + }; + }; + union { + struct { + __u64 addr3; + __u64 __pad2[1]; + }; + __u64 optval; + __u8 cmd[0]; + }; +}; + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_spinlock_t seq; + int umask; + int in_exec; + struct path root; + struct path pwd; +}; + +struct fdtable { + unsigned int max_fds; + struct file __attribute__((btf_type_tag("rcu"))) * *fd; + unsigned long *close_on_exec; + unsigned long *open_fds; + unsigned long *full_fds_bits; + struct callback_head rcu; +}; + +struct files_struct { + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable __attribute__((btf_type_tag("rcu"))) * fdt; + struct fdtable fdtab; + long : 64; + long : 64; + long : 64; + long : 64; + spinlock_t file_lock; + unsigned int next_fd; + unsigned long close_on_exec_init[1]; + unsigned long open_fds_init[1]; + unsigned long full_fds_bits_init[1]; + struct file __attribute__((btf_type_tag("rcu"))) * fd_array[64]; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct ld_semaphore { + atomic_long_t count; + raw_spinlock_t wait_lock; + unsigned int wait_readers; + struct list_head read_wait; + struct list_head write_wait; +}; + +typedef unsigned int tcflag_t; + +typedef unsigned char cc_t; + +typedef unsigned int speed_t; + +struct ktermios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +struct tty_driver; + +struct tty_port; + +struct tty_operations; + +struct tty_ldisc; + +struct tty_struct { + struct kref kref; + int index; + struct device *dev; + struct tty_driver *driver; + struct tty_port *port; + const struct tty_operations *ops; + struct tty_ldisc *ldisc; + struct ld_semaphore ldisc_sem; + struct mutex atomic_write_lock; + struct mutex legacy_mutex; + struct mutex throttle_mutex; + struct rw_semaphore termios_rwsem; + struct mutex winsize_mutex; + struct ktermios termios; + struct ktermios termios_locked; + char name[64]; + unsigned long flags; + int count; + unsigned int receive_room; + struct winsize winsize; + struct { + spinlock_t lock; + bool stopped; + bool tco_stopped; + unsigned long unused[0]; + } flow; + struct { + struct pid *pgrp; + struct pid *session; + spinlock_t lock; + unsigned char pktstatus; + bool packet; + unsigned long unused[0]; + } ctrl; + bool hw_stopped; + bool closing; + int flow_change; + struct tty_struct *link; + struct fasync_struct *fasync; + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + spinlock_t files_lock; + int write_cnt; + unsigned char *write_buf; + struct list_head tty_files; + struct work_struct SAK_work; +}; + +struct tty_driver { + struct kref kref; + struct cdev **cdevs; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; + int major; + int minor_start; + unsigned int num; + short type; + short subtype; + struct ktermios init_termios; + unsigned long flags; + struct proc_dir_entry *proc_entry; + struct tty_driver *other; + struct tty_struct **ttys; + struct tty_port **ports; + struct ktermios **termios; + void *driver_state; + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + unsigned int used; + unsigned int size; + unsigned int commit; + unsigned int lookahead; + unsigned int read; + bool flags; + long : 0; + u8 data[0]; +}; + +struct tty_bufhead { + struct tty_buffer *head; + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; + atomic_t mem_used; + int mem_limit; + struct tty_buffer *tail; +}; + +struct tty_port_operations; + +struct tty_port_client_operations; + +struct tty_port { + struct tty_bufhead buf; + struct tty_struct *tty; + struct tty_struct *itty; + const struct tty_port_operations *ops; + const struct tty_port_client_operations *client_ops; + spinlock_t lock; + int blocked_open; + int count; + wait_queue_head_t open_wait; + wait_queue_head_t delta_msr_wait; + unsigned long flags; + unsigned long iflags; + unsigned char console : 1; + struct mutex mutex; + struct mutex buf_mutex; + unsigned char *xmit_buf; + struct { + union { + struct __kfifo kfifo; + unsigned char *type; + const unsigned char *const_type; + char (*rectype)[0]; + unsigned char *ptr; + const unsigned char *ptr_const; + }; + unsigned char buf[0]; + } xmit_fifo; + unsigned int close_delay; + unsigned int closing_wait; + int drain_delay; + struct kref kref; + void *client_data; +}; + +struct tty_port_operations { + bool (*carrier_raised)(struct tty_port *); + void (*dtr_rts)(struct tty_port *, bool); + void (*shutdown)(struct tty_port *); + int (*activate)(struct tty_port *, struct tty_struct *); + void (*destruct)(struct tty_port *); +}; + +struct tty_port_client_operations { + size_t (*receive_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_port *); +}; + +struct serial_icounter_struct; + +struct serial_struct; + +struct tty_operations { + struct tty_struct *(*lookup)(struct tty_driver *, struct file *, int); + int (*install)(struct tty_driver *, struct tty_struct *); + void (*remove)(struct tty_driver *, struct tty_struct *); + int (*open)(struct tty_struct *, struct file *); + void (*close)(struct tty_struct *, struct file *); + void (*shutdown)(struct tty_struct *); + void (*cleanup)(struct tty_struct *); + ssize_t (*write)(struct tty_struct *, const u8 *, size_t); + int (*put_char)(struct tty_struct *, u8); + void (*flush_chars)(struct tty_struct *); + unsigned int (*write_room)(struct tty_struct *); + unsigned int (*chars_in_buffer)(struct tty_struct *); + int (*ioctl)(struct tty_struct *, unsigned int, unsigned long); + long (*compat_ioctl)(struct tty_struct *, unsigned int, unsigned long); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + void (*throttle)(struct tty_struct *); + void (*unthrottle)(struct tty_struct *); + void (*stop)(struct tty_struct *); + void (*start)(struct tty_struct *); + void (*hangup)(struct tty_struct *); + int (*break_ctl)(struct tty_struct *, int); + void (*flush_buffer)(struct tty_struct *); + void (*set_ldisc)(struct tty_struct *); + void (*wait_until_sent)(struct tty_struct *, int); + void (*send_xchar)(struct tty_struct *, char); + int (*tiocmget)(struct tty_struct *); + int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); + int (*resize)(struct tty_struct *, struct winsize *); + int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); + int (*get_serial)(struct tty_struct *, struct serial_struct *); + int (*set_serial)(struct tty_struct *, struct serial_struct *); + void (*show_fdinfo)(struct tty_struct *, struct seq_file *); + int (*proc_show)(struct seq_file *, void *); +}; + +struct tty_ldisc_ops; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + struct tty_struct *tty; +}; + +struct tty_ldisc_ops { + char *name; + int num; + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *); + ssize_t (*read)(struct tty_struct *, struct file *, u8 *, size_t, void **, + unsigned long); + ssize_t (*write)(struct tty_struct *, struct file *, const u8 *, size_t); + int (*ioctl)(struct tty_struct *, unsigned int, unsigned long); + int (*compat_ioctl)(struct tty_struct *, unsigned int, unsigned long); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); + void (*hangup)(struct tty_struct *); + void (*receive_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, bool); + size_t (*receive_buf2)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + struct module *owner; +}; + +struct bpf_run_ctx {}; + +typedef unsigned int slab_flags_t; + +struct reciprocal_value { + u32 m; + u8 sh1; + u8 sh2; +}; + +struct kmem_cache_order_objects { + unsigned int x; +}; + +struct kmem_cache_cpu; + +struct kmem_cache_node; + +struct kmem_cache { + struct kmem_cache_cpu __attribute__((btf_type_tag("percpu"))) * cpu_slab; + slab_flags_t flags; + unsigned long min_partial; + unsigned int size; + unsigned int object_size; + struct reciprocal_value reciprocal_size; + unsigned int offset; + unsigned int cpu_partial; + unsigned int cpu_partial_slabs; + struct kmem_cache_order_objects oo; + struct kmem_cache_order_objects min; + gfp_t allocflags; + int refcount; + void (*ctor)(void *); + unsigned int inuse; + unsigned int align; + unsigned int red_left_pad; + const char *name; + struct list_head list; + struct kobject kobj; + unsigned long random; + unsigned int remote_node_defrag_ratio; + unsigned int *random_seq; + unsigned int useroffset; + unsigned int usersize; + struct kmem_cache_node *node[64]; +}; + +typedef unsigned __int128 __u128; + +typedef __u128 u128; + +typedef u128 freelist_full_t; + +typedef union { + struct { + void *freelist; + unsigned long counter; + }; + freelist_full_t full; +} freelist_aba_t; + +typedef struct { +} local_lock_t; + +struct slab; + +struct kmem_cache_cpu { + union { + struct { + void **freelist; + unsigned long tid; + }; + freelist_aba_t freelist_tid; + }; + struct slab *slab; + struct slab *partial; + local_lock_t lock; +}; + +struct slab { + unsigned long __page_flags; + struct kmem_cache *slab_cache; + union { + struct { + union { + struct list_head slab_list; + struct { + struct slab *next; + int slabs; + }; + }; + union { + struct { + void *freelist; + union { + unsigned long counters; + struct { + unsigned int inuse : 16; + unsigned int objects : 15; + unsigned int frozen : 1; + }; + }; + }; + freelist_aba_t freelist_counter; + }; + }; + struct callback_head callback_head; + }; + unsigned int __unused; + atomic_t __page_refcount; + unsigned long memcg_data; +}; + +struct io_tlb_area; + +struct io_tlb_slot; + +struct io_tlb_pool { + phys_addr_t start; + phys_addr_t end; + void *vaddr; + unsigned long nslabs; + bool late_alloc; + unsigned int nareas; + unsigned int area_nslabs; + struct io_tlb_area *areas; + struct io_tlb_slot *slots; +}; + +struct io_tlb_mem { + struct io_tlb_pool defpool; + unsigned long nslabs; + struct dentry *debugfs; + bool force_bounce; + bool for_alloc; + atomic_long_t total_used; + atomic_long_t used_hiwater; +}; + +struct compact_control; + +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +struct compact_control { + struct list_head freepages; + struct list_head migratepages; + unsigned int nr_freepages; + unsigned int nr_migratepages; + unsigned long free_pfn; + unsigned long migrate_pfn; + unsigned long fast_start_pfn; + struct zone *zone; + unsigned long total_migrate_scanned; + unsigned long total_free_scanned; + unsigned short fast_search_fail; + short search_order; + const gfp_t gfp_mask; + int order; + int migratetype; + const unsigned int alloc_flags; + const int highest_zoneidx; + enum migrate_mode mode; + bool ignore_skip_hint; + bool no_set_skip_hint; + bool ignore_block_suitable; + bool direct_compaction; + bool proactive_compaction; + bool whole_zone; + bool contended; + bool finish_pageblock; + bool alloc_contig; +}; + +struct kmem_cache_node { + spinlock_t list_lock; + unsigned long nr_partial; + struct list_head partial; + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +}; + +typedef __u32 Elf32_Word; + +struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +}; + +struct posix_acl_entry { + short e_tag; + unsigned short e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + struct callback_head a_rcu; + unsigned int a_count; + struct posix_acl_entry a_entries[0]; +}; + +typedef __u64 __addrpair; + +typedef __u32 __portpair; + +typedef struct { + struct net __attribute__((btf_type_tag("rcu"))) * net; +} possible_net_t; + +struct proto; + +struct sock_common { + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + unsigned short skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse : 4; + unsigned char skc_reuseport : 1; + unsigned char skc_ipv6only : 1; + unsigned char skc_net_refcnt : 1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + atomic64_t skc_cookie; + union { + unsigned long skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + int skc_dontcopy_begin[0]; + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + unsigned short skc_tx_queue_mapping; + unsigned short skc_rx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + refcount_t skc_refcnt; + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; +}; + +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; +} socket_lock_t; + +typedef u64 netdev_features_t; + +struct sock_cgroup_data { + struct cgroup *cgroup; + u32 classid; + u16 prioidx; +}; + +typedef struct { +} netns_tracker; + +struct sk_filter; + +struct socket_wq; + +struct socket; + +struct sock_reuseport; + +struct sock { + struct sock_common __sk_common; + struct dst_entry __attribute__((btf_type_tag("rcu"))) * sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + socket_lock_t sk_lock; + atomic_t sk_drops; + int sk_rcvlowat; + struct sk_buff_head sk_error_queue; + struct sk_buff_head sk_receive_queue; + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + int sk_forward_alloc; + u32 sk_reserved_mem; + unsigned int sk_ll_usec; + unsigned int sk_napi_id; + int sk_rcvbuf; + int sk_disconnects; + struct sk_filter __attribute__((btf_type_tag("rcu"))) * sk_filter; + union { + struct socket_wq __attribute__((btf_type_tag("rcu"))) * sk_wq; + struct socket_wq *sk_wq_raw; + }; + struct dst_entry __attribute__((btf_type_tag("rcu"))) * sk_dst_cache; + atomic_t sk_omem_alloc; + int sk_sndbuf; + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + unsigned long sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff_head sk_write_queue; + __s32 sk_peek_off; + int sk_write_pending; + __u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + long sk_sndtimeo; + struct timer_list sk_timer; + __u32 sk_priority; + __u32 sk_mark; + unsigned long sk_pacing_rate; + unsigned long sk_max_pacing_rate; + struct page_frag sk_frag; + netdev_features_t sk_route_caps; + int sk_gso_type; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + __u32 sk_txhash; + u8 sk_gso_disabled : 1; + u8 sk_kern_sock : 1; + u8 sk_no_check_tx : 1; + u8 sk_no_check_rx : 1; + u8 sk_userlocks : 4; + u8 sk_pacing_shift; + u16 sk_type; + u16 sk_protocol; + u16 sk_gso_max_segs; + unsigned long sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err; + int sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + u8 sk_txrehash; + u8 sk_prefer_busy_poll; + u16 sk_busy_poll_budget; + spinlock_t sk_peer_lock; + int sk_bind_phc; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + long sk_rcvtimeo; + ktime_t sk_stamp; + atomic_t sk_tskey; + atomic_t sk_zckey; + u32 sk_tsflags; + u8 sk_shutdown; + u8 sk_clockid; + u8 sk_txtime_deadline_mode : 1; + u8 sk_txtime_report_errors : 1; + u8 sk_txtime_unused : 6; + bool sk_use_task_frag; + struct socket *sk_socket; + void *sk_user_data; + struct sock_cgroup_data sk_cgrp_data; + struct mem_cgroup *sk_memcg; + void (*sk_state_change)(struct sock *); + void (*sk_data_ready)(struct sock *); + void (*sk_write_space)(struct sock *); + void (*sk_error_report)(struct sock *); + int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); + void (*sk_destruct)(struct sock *); + struct sock_reuseport __attribute__((btf_type_tag("rcu"))) * sk_reuseport_cb; + struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) * sk_bpf_storage; + struct callback_head sk_rcu; + netns_tracker ns_tracker; + struct hlist_node sk_bind2_node; +}; + +struct smc_hashinfo; + +typedef struct { + union { + void *kernel; + void __attribute__((btf_type_tag("user"))) * user; + }; + bool is_kernel : 1; +} sockptr_t; + +struct sockaddr; + +struct msghdr; + +struct sk_psock; + +struct request_sock_ops; + +struct timewait_sock_ops; + +struct raw_hashinfo; + +struct proto { + void (*close)(struct sock *, long); + int (*pre_connect)(struct sock *, struct sockaddr *, int); + int (*connect)(struct sock *, struct sockaddr *, int); + int (*disconnect)(struct sock *, int); + struct sock *(*accept)(struct sock *, int, int *, bool); + int (*ioctl)(struct sock *, int, int *); + int (*init)(struct sock *); + void (*destroy)(struct sock *); + void (*shutdown)(struct sock *, int); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, + char __attribute__((btf_type_tag("user"))) *, + int __attribute__((btf_type_tag("user"))) *); + void (*keepalive)(struct sock *, int); + int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); + int (*sendmsg)(struct sock *, struct msghdr *, size_t); + int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int *); + void (*splice_eof)(struct socket *); + int (*bind)(struct sock *, struct sockaddr *, int); + int (*bind_add)(struct sock *, struct sockaddr *, int); + int (*backlog_rcv)(struct sock *, struct sk_buff *); + bool (*bpf_bypass_getsockopt)(int, int); + void (*release_cb)(struct sock *); + int (*hash)(struct sock *); + void (*unhash)(struct sock *); + void (*rehash)(struct sock *); + int (*get_port)(struct sock *, unsigned short); + void (*put_port)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + unsigned int inuse_idx; + int (*forward_alloc_get)(const struct sock *); + bool (*stream_memory_free)(const struct sock *, int); + bool (*sock_is_readable)(struct sock *); + void (*enter_memory_pressure)(struct sock *); + void (*leave_memory_pressure)(struct sock *); + atomic_long_t *memory_allocated; + int __attribute__((btf_type_tag("percpu"))) * per_cpu_fw_alloc; + struct percpu_counter *sockets_allocated; + unsigned long *memory_pressure; + long *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + int max_header; + bool no_autobind; + struct kmem_cache *slab; + unsigned int obj_size; + unsigned int ipv6_pinfo_offset; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + unsigned int __attribute__((btf_type_tag("percpu"))) * orphan_count; + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + struct module *owner; + char name[32]; + struct list_head node; + int (*diag_destroy)(struct sock *, int); +}; + +typedef unsigned short __kernel_sa_family_t; + +typedef __kernel_sa_family_t sa_family_t; + +struct sockaddr { + sa_family_t sa_family; + union { + char sa_data_min[14]; + struct { + struct { + } __empty_sa_data; + char sa_data[0]; + }; + }; +}; + +struct ubuf_info; + +struct msghdr { + void *msg_name; + int msg_namelen; + int msg_inq; + struct iov_iter msg_iter; + union { + void *msg_control; + void __attribute__((btf_type_tag("user"))) * msg_control_user; + }; + bool msg_control_is_user : 1; + bool msg_get_inq : 1; + unsigned int msg_flags; + __kernel_size_t msg_controllen; + struct kiocb *msg_iocb; + struct ubuf_info *msg_ubuf; + int (*sg_from_iter)(struct sock *, struct sk_buff *, struct iov_iter *, size_t); +}; + +struct ubuf_info { + void (*callback)(struct sk_buff *, struct ubuf_info *, bool); + refcount_t refcnt; + u8 flags; +}; + +typedef u32 xdp_features_t; + +struct net_device_stats { + union { + unsigned long rx_packets; + atomic_long_t __rx_packets; + }; + union { + unsigned long tx_packets; + atomic_long_t __tx_packets; + }; + union { + unsigned long rx_bytes; + atomic_long_t __rx_bytes; + }; + union { + unsigned long tx_bytes; + atomic_long_t __tx_bytes; + }; + union { + unsigned long rx_errors; + atomic_long_t __rx_errors; + }; + union { + unsigned long tx_errors; + atomic_long_t __tx_errors; + }; + union { + unsigned long rx_dropped; + atomic_long_t __rx_dropped; + }; + union { + unsigned long tx_dropped; + atomic_long_t __tx_dropped; + }; + union { + unsigned long multicast; + atomic_long_t __multicast; + }; + union { + unsigned long collisions; + atomic_long_t __collisions; + }; + union { + unsigned long rx_length_errors; + atomic_long_t __rx_length_errors; + }; + union { + unsigned long rx_over_errors; + atomic_long_t __rx_over_errors; + }; + union { + unsigned long rx_crc_errors; + atomic_long_t __rx_crc_errors; + }; + union { + unsigned long rx_frame_errors; + atomic_long_t __rx_frame_errors; + }; + union { + unsigned long rx_fifo_errors; + atomic_long_t __rx_fifo_errors; + }; + union { + unsigned long rx_missed_errors; + atomic_long_t __rx_missed_errors; + }; + union { + unsigned long tx_aborted_errors; + atomic_long_t __tx_aborted_errors; + }; + union { + unsigned long tx_carrier_errors; + atomic_long_t __tx_carrier_errors; + }; + union { + unsigned long tx_fifo_errors; + atomic_long_t __tx_fifo_errors; + }; + union { + unsigned long tx_heartbeat_errors; + atomic_long_t __tx_heartbeat_errors; + }; + union { + unsigned long tx_window_errors; + atomic_long_t __tx_window_errors; + }; + union { + unsigned long rx_compressed; + atomic_long_t __rx_compressed; + }; + union { + unsigned long tx_compressed; + atomic_long_t __tx_compressed; + }; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; + struct rb_root tree; +}; + +enum rx_handler_result { + RX_HANDLER_CONSUMED = 0, + RX_HANDLER_ANOTHER = 1, + RX_HANDLER_EXACT = 2, + RX_HANDLER_PASS = 3, +}; + +typedef enum rx_handler_result rx_handler_result_t; + +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); + +enum netdev_ml_priv_type { + ML_PRIV_NONE = 0, + ML_PRIV_CAN = 1, +}; + +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE = 0, + NETDEV_PCPU_STAT_LSTATS = 1, + NETDEV_PCPU_STAT_TSTATS = 2, + NETDEV_PCPU_STAT_DSTATS = 3, +}; + +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +struct sfp_bus; + +struct udp_tunnel_nic; + +struct bpf_xdp_link; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; + +struct netdev_name_node; + +struct dev_ifalias; + +struct net_device_ops; + +struct xdp_metadata_ops; + +struct net_device_core_stats; + +struct ethtool_ops; + +struct l3mdev_ops; + +struct ndisc_ops; + +struct header_ops; + +struct in_device; + +struct inet6_dev; + +struct wireless_dev; + +struct netdev_rx_queue; + +struct bpf_mprog_entry; + +struct netdev_queue; + +struct cpu_rmap; + +struct Qdisc; + +struct xdp_dev_bulk_queue; + +struct xps_dev_maps; + +struct pcpu_lstats; + +struct pcpu_sw_netstats; + +struct pcpu_dstats; + +struct dm_hw_stat_delta; + +struct rtnl_link_ops; + +struct netprio_map; + +struct phy_device; + +struct udp_tunnel_nic_info; + +struct rtnl_hw_stats64; + +struct devlink_port; + +struct net_device { + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias __attribute__((btf_type_tag("rcu"))) * ifalias; + unsigned long mem_end; + unsigned long mem_start; + unsigned long base_addr; + unsigned long state; + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct list_head ptype_specific; + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + unsigned int flags; + xdp_features_t xdp_features; + unsigned long long priv_flags; + const struct net_device_ops *netdev_ops; + const struct xdp_metadata_ops *xdp_metadata_ops; + int ifindex; + unsigned short gflags; + unsigned short hard_header_len; + unsigned int mtu; + unsigned short needed_headroom; + unsigned short needed_tailroom; + netdev_features_t features; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + netdev_features_t gso_partial_features; + unsigned int min_mtu; + unsigned int max_mtu; + unsigned short type; + unsigned char min_header_len; + unsigned char name_assign_type; + int group; + struct net_device_stats stats; + struct net_device_core_stats __attribute__((btf_type_tag("percpu"))) * core_stats; + atomic_t carrier_up_count; + atomic_t carrier_down_count; + const struct ethtool_ops *ethtool_ops; + const struct l3mdev_ops *l3mdev_ops; + const struct ndisc_ops *ndisc_ops; + const struct header_ops *header_ops; + unsigned char operstate; + unsigned char link_mode; + unsigned char if_port; + unsigned char dma; + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + unsigned short neigh_priv_len; + unsigned short dev_id; + unsigned short dev_port; + unsigned short padded; + spinlock_t addr_list_lock; + int irq; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + struct kset *queues_kset; + unsigned int promiscuity; + unsigned int allmulti; + bool uc_promisc; + struct in_device __attribute__((btf_type_tag("rcu"))) * ip_ptr; + struct inet6_dev __attribute__((btf_type_tag("rcu"))) * ip6_ptr; + struct wireless_dev *ieee80211_ptr; + const unsigned char *dev_addr; + struct netdev_rx_queue *_rx; + unsigned int num_rx_queues; + unsigned int real_num_rx_queues; + struct bpf_prog __attribute__((btf_type_tag("rcu"))) * xdp_prog; + unsigned long gro_flush_timeout; + int napi_defer_hard_irqs; + unsigned int gro_max_size; + unsigned int gro_ipv4_max_size; + unsigned int xdp_zc_max_segs; + rx_handler_func_t __attribute__((btf_type_tag("rcu"))) * rx_handler; + void __attribute__((btf_type_tag("rcu"))) * rx_handler_data; + struct bpf_mprog_entry __attribute__((btf_type_tag("rcu"))) * tcx_ingress; + struct netdev_queue __attribute__((btf_type_tag("rcu"))) * ingress_queue; + struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) * nf_hooks_ingress; + unsigned char broadcast[32]; + struct cpu_rmap *rx_cpu_rmap; + struct hlist_node index_hlist; + struct netdev_queue *_tx; + unsigned int num_tx_queues; + unsigned int real_num_tx_queues; + struct Qdisc __attribute__((btf_type_tag("rcu"))) * qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + struct xdp_dev_bulk_queue __attribute__((btf_type_tag("percpu"))) * xdp_bulkq; + struct xps_dev_maps __attribute__((btf_type_tag("rcu"))) * xps_maps[2]; + struct bpf_mprog_entry __attribute__((btf_type_tag("rcu"))) * tcx_egress; + struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) * nf_hooks_egress; + struct timer_list watchdog_timer; + int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; + int __attribute__((btf_type_tag("percpu"))) * pcpu_refcnt; + struct ref_tracker_dir refcnt_tracker; + struct list_head link_watch_list; + enum { + NETREG_UNINITIALIZED = 0, + NETREG_REGISTERED = 1, + NETREG_UNREGISTERING = 2, + NETREG_UNREGISTERED = 3, + NETREG_RELEASED = 4, + NETREG_DUMMY = 5, + } reg_state : 8; + bool dismantle; + enum { + RTNL_LINK_INITIALIZED = 0, + RTNL_LINK_INITIALIZING = 1, + } rtnl_link_state : 16; + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *); + possible_net_t nd_net; + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type : 8; + union { + struct pcpu_lstats __attribute__((btf_type_tag("percpu"))) * lstats; + struct pcpu_sw_netstats __attribute__((btf_type_tag("percpu"))) * tstats; + struct pcpu_dstats __attribute__((btf_type_tag("percpu"))) * dstats; + }; + struct dm_hw_stat_delta __attribute__((btf_type_tag("rcu"))) * dm_private; + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + const struct rtnl_link_ops *rtnl_link_ops; + unsigned int gso_max_size; + unsigned int tso_max_size; + u16 gso_max_segs; + u16 tso_max_segs; + unsigned int gso_ipv4_max_size; + s16 num_tc; + struct netdev_tc_txq tc_to_txq[16]; + u8 prio_tc_map[16]; + struct netprio_map __attribute__((btf_type_tag("rcu"))) * priomap; + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + bool proto_down; + unsigned int wol_enabled : 1; + unsigned int threaded : 1; + struct list_head net_notifier_list; + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + struct bpf_xdp_entity xdp_state[3]; + u8 dev_addr_shadow[32]; + netdevice_tracker linkwatch_dev_tracker; + netdevice_tracker watchdog_dev_tracker; + netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; + struct devlink_port *devlink_port; + long : 64; +}; + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[0]; +}; + +enum netdev_tx { + __NETDEV_TX_MIN = -2147483648, + NETDEV_TX_OK = 0, + NETDEV_TX_BUSY = 16, +}; + +typedef enum netdev_tx netdev_tx_t; + +enum tc_setup_type { + TC_QUERY_CAPS = 0, + TC_SETUP_QDISC_MQPRIO = 1, + TC_SETUP_CLSU32 = 2, + TC_SETUP_CLSFLOWER = 3, + TC_SETUP_CLSMATCHALL = 4, + TC_SETUP_CLSBPF = 5, + TC_SETUP_BLOCK = 6, + TC_SETUP_QDISC_CBS = 7, + TC_SETUP_QDISC_RED = 8, + TC_SETUP_QDISC_PRIO = 9, + TC_SETUP_QDISC_MQ = 10, + TC_SETUP_QDISC_ETF = 11, + TC_SETUP_ROOT_QDISC = 12, + TC_SETUP_QDISC_GRED = 13, + TC_SETUP_QDISC_TAPRIO = 14, + TC_SETUP_FT = 15, + TC_SETUP_QDISC_ETS = 16, + TC_SETUP_QDISC_TBF = 17, + TC_SETUP_QDISC_FIFO = 18, + TC_SETUP_QDISC_HTB = 19, + TC_SETUP_ACT = 20, +}; + +struct ifreq; + +struct if_settings; + +struct ifmap; + +struct neigh_parms; + +struct rtnl_link_stats64; + +struct ifla_vf_info; + +struct ifla_vf_stats; + +struct nlattr; + +struct ifla_vf_guid; + +struct netlink_ext_ack; + +struct ndmsg; + +struct nlmsghdr; + +struct netlink_callback; + +struct netdev_phys_item_id; + +struct netdev_bpf; + +struct xdp_frame; + +struct xdp_buff; + +struct ip_tunnel_parm; + +struct net_device_path_ctx; + +struct net_device_path; + +struct skb_shared_hwtstamps; + +struct kernel_hwtstamp_config; + +struct net_device_ops { + int (*ndo_init)(struct net_device *); + void (*ndo_uninit)(struct net_device *); + int (*ndo_open)(struct net_device *); + int (*ndo_stop)(struct net_device *); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); + netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, + netdev_features_t); + u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); + void (*ndo_change_rx_flags)(struct net_device *, int); + void (*ndo_set_rx_mode)(struct net_device *); + int (*ndo_set_mac_address)(struct net_device *, void *); + int (*ndo_validate_addr)(struct net_device *); + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_eth_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_siocbond)(struct net_device *, struct ifreq *, int); + int (*ndo_siocwandev)(struct net_device *, struct if_settings *); + int (*ndo_siocdevprivate)(struct net_device *, struct ifreq *, + void __attribute__((btf_type_tag("user"))) *, int); + int (*ndo_set_config)(struct net_device *, struct ifmap *); + int (*ndo_change_mtu)(struct net_device *, int); + int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); + void (*ndo_tx_timeout)(struct net_device *, unsigned int); + void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); + bool (*ndo_has_offload_stats)(const struct net_device *, int); + int (*ndo_get_offload_stats)(int, const struct net_device *, void *); + struct net_device_stats *(*ndo_get_stats)(struct net_device *); + int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); + int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); + int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); + int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); + int (*ndo_set_vf_rate)(struct net_device *, int, int, int); + int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); + int (*ndo_set_vf_trust)(struct net_device *, int, bool); + int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); + int (*ndo_set_vf_link_state)(struct net_device *, int, int); + int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); + int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); + int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); + int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, + struct ifla_vf_guid *); + int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); + int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); + int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); + int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); + int (*ndo_add_slave)(struct net_device *, struct net_device *, + struct netlink_ext_ack *); + int (*ndo_del_slave)(struct net_device *, struct net_device *); + struct net_device *(*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); + struct net_device *(*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); + netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); + int (*ndo_set_features)(struct net_device *, netdev_features_t); + int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); + void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); + int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, + const unsigned char *, u16, u16, struct netlink_ext_ack *); + int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, + const unsigned char *, u16, struct netlink_ext_ack *); + int (*ndo_fdb_del_bulk)(struct nlmsghdr *, struct net_device *, + struct netlink_ext_ack *); + int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, + struct net_device *, struct net_device *, int *); + int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, + const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); + int (*ndo_mdb_add)(struct net_device *, struct nlattr **, u16, + struct netlink_ext_ack *); + int (*ndo_mdb_del)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_dump)(struct net_device *, struct sk_buff *, struct netlink_callback *); + int (*ndo_mdb_get)(struct net_device *, struct nlattr **, u32, u32, + struct netlink_ext_ack *); + int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, + struct netlink_ext_ack *); + int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); + int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); + int (*ndo_change_carrier)(struct net_device *, bool); + int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); + void *(*ndo_dfwd_add_station)(struct net_device *, struct net_device *); + void (*ndo_dfwd_del_station)(struct net_device *, void *); + int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); + int (*ndo_get_iflink)(const struct net_device *); + int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); + void (*ndo_set_rx_headroom)(struct net_device *, int); + int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); + int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); + struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *, struct xdp_buff *); + int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); + int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm *, int); + struct net_device *(*ndo_get_peer_dev)(struct net_device *); + int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); + ktime_t (*ndo_get_tstamp)(struct net_device *, + const struct skb_shared_hwtstamps *, bool); + int (*ndo_hwtstamp_get)(struct net_device *, struct kernel_hwtstamp_config *); + int (*ndo_hwtstamp_set)(struct net_device *, struct kernel_hwtstamp_config *, + struct netlink_ext_ack *); +}; + +struct ifmap { + unsigned long mem_start; + unsigned long mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +typedef struct { + unsigned short encoding; + unsigned short parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + unsigned short lmi; + unsigned short dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +} fr_proto_pvc_info; + +typedef struct { + unsigned short dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + unsigned short loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + unsigned short loopback; + unsigned int slot_map; +} te1_settings; + +struct if_settings { + unsigned int type; + unsigned int size; + union { + raw_hdlc_proto __attribute__((btf_type_tag("user"))) * raw_hdlc; + cisco_proto __attribute__((btf_type_tag("user"))) * cisco; + fr_proto __attribute__((btf_type_tag("user"))) * fr; + fr_proto_pvc __attribute__((btf_type_tag("user"))) * fr_pvc; + fr_proto_pvc_info __attribute__((btf_type_tag("user"))) * fr_pvc_info; + x25_hdlc_proto __attribute__((btf_type_tag("user"))) * x25; + sync_serial_settings __attribute__((btf_type_tag("user"))) * sync; + te1_settings __attribute__((btf_type_tag("user"))) * te1; + } ifs_ifsu; +}; + +struct ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void __attribute__((btf_type_tag("user"))) * ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +struct neigh_table; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + void *sysctl_table; + int dead; + refcount_t refcnt; + struct callback_head callback_head; + int reachable_time; + u32 qlen; + int data[14]; + unsigned long data_state[1]; +}; + +struct fib_rule; + +struct flowi; + +struct fib_lookup_arg; + +struct fib_rule_hdr; + +struct fib_rules_ops { + int family; + struct list_head list; + int rule_size; + int addr_size; + int unresolved_rules; + int nr_goto_rules; + unsigned int fib_rules_seq; + int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); + bool (*suppress)(struct fib_rule *, int, struct fib_lookup_arg *); + int (*match)(struct fib_rule *, struct flowi *, int); + int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, + struct nlattr **, struct netlink_ext_ack *); + int (*delete)(struct fib_rule *); + int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); + int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); + size_t (*nlmsg_payload)(struct fib_rule *); + void (*flush_cache)(struct fib_rules_ops *); + int nlgroup; + struct list_head rules_list; + struct module *owner; + struct net *fro_net; + struct callback_head rcu; +}; + +typedef __u64 __be64; + +struct fib_kuid_range { + kuid_t start; + kuid_t end; +}; + +struct fib_rule_port_range { + __u16 start; + __u16 end; +}; + +struct fib_rule { + struct list_head list; + int iifindex; + int oifindex; + u32 mark; + u32 mark_mask; + u32 flags; + u32 table; + u8 action; + u8 l3mdev; + u8 proto; + u8 ip_proto; + u32 target; + __be64 tun_id; + struct fib_rule __attribute__((btf_type_tag("rcu"))) * ctarget; + struct net *fr_net; + refcount_t refcnt; + u32 pref; + int suppress_ifgroup; + int suppress_prefixlen; + char iifname[16]; + char oifname[16]; + struct fib_kuid_range uid_range; + struct fib_rule_port_range sport_range; + struct fib_rule_port_range dport_range; + struct callback_head rcu; +}; + +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + int flowic_l3mdev; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + __u32 flowic_secid; + kuid_t flowic_uid; + __u32 flowic_multipath_hash; + struct flowi_tunnel flowic_tun_key; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + struct { + __u8 type; + __u8 code; + } icmpt; + __be32 gre_key; + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; + __be32 saddr; + __be32 daddr; + union flowi_uli uli; +}; + +struct flowi6 { + struct flowi_common __fl_common; + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; + __u32 mp_hash; +}; + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + } u; +}; + +struct fib_lookup_arg { + void *lookup_ptr; + const void *lookup_data; + void *result; + struct fib_rule *rule; + u32 table; + int flags; +}; + +struct fib_rule_hdr { + __u8 family; + __u8 dst_len; + __u8 src_len; + __u8 tos; + __u8 table; + __u8 res1; + __u8 res2; + __u8 action; + __u32 flags; +}; + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +struct nla_policy; + +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + const struct nla_policy *policy; + const struct nlattr *miss_nest; + u16 miss_type; + u8 cookie[20]; + u8 cookie_len; + char _msg_buf[80]; +}; + +struct netlink_range_validation; + +struct netlink_range_validation_signed; + +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + u16 strict_start_type; + const u32 bitfield32_valid; + const u32 mask; + const char *reject_message; + const struct nla_policy *nested_policy; + const struct netlink_range_validation *range; + const struct netlink_range_validation_signed *range_signed; + struct { + s16 min; + s16 max; + }; + int (*validate)(const struct nlattr *, struct netlink_ext_ack *); + }; +}; + +struct netlink_range_validation { + u64 min; + u64 max; +}; + +struct netlink_range_validation_signed { + s64 min; + s64 max; +}; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(struct net *); + int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); + struct module *owner; + struct callback_head rcu; +}; + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + unsigned long hh_data[16]; +}; + +struct neigh_ops; + +struct neighbour { + struct neighbour __attribute__((btf_type_tag("rcu"))) * next; + struct neigh_table *tbl; + struct neigh_parms *parms; + unsigned long confirmed; + unsigned long updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + unsigned long used; + atomic_t probes; + u8 nud_state; + u8 type; + u8 dead; + u8 protocol; + u32 flags; + seqlock_t ha_lock; + long : 0; + unsigned char ha[32]; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct list_head managed_list; + struct callback_head rcu; + struct net_device *dev; + netdevice_tracker dev_tracker; + u8 primary_key[0]; +}; + +struct pneigh_entry; + +struct neigh_statistics; + +struct neigh_hash_table; + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *, const struct net_device *, __u32 *); + bool (*key_eq)(const struct neighbour *, const void *); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *); + int (*is_multicast)(const void *); + bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + unsigned long last_flush; + struct delayed_work gc_work; + struct delayed_work managed_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + struct list_head managed_list; + rwlock_t lock; + unsigned long last_rand; + struct neigh_statistics __attribute__((btf_type_tag("percpu"))) * stats; + struct neigh_hash_table __attribute__((btf_type_tag("rcu"))) * nht; + struct pneigh_entry **phash_buckets; +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u8 protocol; + u32 key[0]; +}; + +struct neigh_statistics { + unsigned long allocs; + unsigned long destroys; + unsigned long hash_grows; + unsigned long res_failed; + unsigned long lookups; + unsigned long hits; + unsigned long rcv_probes_mcast; + unsigned long rcv_probes_ucast; + unsigned long periodic_gc_runs; + unsigned long forced_gc_runs; + unsigned long unres_discards; + unsigned long table_fulls; +}; + +struct neigh_hash_table { + struct neighbour __attribute__((btf_type_tag("rcu"))) * *hash_buckets; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; +}; + +struct ipv6_devconf { + __s32 forwarding; + __s32 hop_limit; + __s32 mtu6; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_min_lft; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + __s32 accept_ra_rtr_pref; + __s32 rtr_probe_interval; + __s32 accept_ra_rt_info_min_plen; + __s32 accept_ra_rt_info_max_plen; + __s32 proxy_ndp; + __s32 accept_source_route; + __s32 accept_ra_from_local; + __s32 optimistic_dad; + __s32 use_optimistic; + atomic_t mc_forwarding; + __s32 disable_ipv6; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + __s32 accept_untracked_na; + struct ipv6_stable_secret stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; + __s32 seg6_require_hmac; + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 disable_policy; + __s32 ndisc_tclass; + __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; + __u8 ndisc_evict_nocarrier; + __u8 ra_honor_pio_life; + struct ctl_table_header *sysctl_header; +}; + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + __u64 rx_compressed; + __u64 tx_compressed; + __u64 rx_nohandler; + __u64 rx_otherhost_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_guid { + __u32 vf; + __u64 guid; +}; + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq; + unsigned int seq; + bool strict_check; + union { + u8 ctx[48]; + long args[6]; + }; +}; + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +enum bpf_netdev_command { + XDP_SETUP_PROG = 0, + XDP_SETUP_PROG_HW = 1, + BPF_OFFLOAD_MAP_ALLOC = 2, + BPF_OFFLOAD_MAP_FREE = 3, + XDP_SETUP_XSK_POOL = 4, +}; + +struct bpf_offloaded_map; + +struct xsk_buff_pool; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + struct { + struct bpf_offloaded_map *offmap; + }; + struct { + struct xsk_buff_pool *pool; + u16 queue_id; + } xsk; + }; +}; + +struct net_device_path_ctx { + const struct net_device *dev; + u8 daddr[6]; + int num_vlans; + struct { + u16 id; + __be16 proto; + } vlan[2]; +}; + +enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN = 1, + DEV_PATH_BRIDGE = 2, + DEV_PATH_PPPOE = 3, + DEV_PATH_DSA = 4, + DEV_PATH_MTK_WDMA = 5, +}; + +struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; + union { + struct { + u16 id; + __be16 proto; + u8 h_dest[6]; + } encap; + struct { + enum { + DEV_PATH_BR_VLAN_KEEP = 0, + DEV_PATH_BR_VLAN_TAG = 1, + DEV_PATH_BR_VLAN_UNTAG = 2, + DEV_PATH_BR_VLAN_UNTAG_HW = 3, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; + } bridge; + struct { + int port; + u16 proto; + } dsa; + struct { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; + u8 amsdu; + } mtk_wdma; + }; +}; + +struct skb_shared_hwtstamps { + union { + ktime_t hwtstamp; + void *netdev_data; + }; +}; + +enum hwtstamp_source { + HWTSTAMP_SOURCE_NETDEV = 0, + HWTSTAMP_SOURCE_PHYLIB = 1, +}; + +struct kernel_hwtstamp_config { + int flags; + int tx_type; + int rx_filter; + struct ifreq *ifr; + bool copied_to_user; + enum hwtstamp_source source; +}; + +enum xdp_rss_hash_type { + XDP_RSS_L3_IPV4 = 1, + XDP_RSS_L3_IPV6 = 2, + XDP_RSS_L3_DYNHDR = 4, + XDP_RSS_L4 = 8, + XDP_RSS_L4_TCP = 16, + XDP_RSS_L4_UDP = 32, + XDP_RSS_L4_SCTP = 64, + XDP_RSS_L4_IPSEC = 128, + XDP_RSS_TYPE_NONE = 0, + XDP_RSS_TYPE_L2 = 0, + XDP_RSS_TYPE_L3_IPV4 = 1, + XDP_RSS_TYPE_L3_IPV6 = 2, + XDP_RSS_TYPE_L3_IPV4_OPT = 5, + XDP_RSS_TYPE_L3_IPV6_EX = 6, + XDP_RSS_TYPE_L4_ANY = 8, + XDP_RSS_TYPE_L4_IPV4_TCP = 25, + XDP_RSS_TYPE_L4_IPV4_UDP = 41, + XDP_RSS_TYPE_L4_IPV4_SCTP = 73, + XDP_RSS_TYPE_L4_IPV4_IPSEC = 137, + XDP_RSS_TYPE_L4_IPV6_TCP = 26, + XDP_RSS_TYPE_L4_IPV6_UDP = 42, + XDP_RSS_TYPE_L4_IPV6_SCTP = 74, + XDP_RSS_TYPE_L4_IPV6_IPSEC = 138, + XDP_RSS_TYPE_L4_IPV6_TCP_EX = 30, + XDP_RSS_TYPE_L4_IPV6_UDP_EX = 46, + XDP_RSS_TYPE_L4_IPV6_SCTP_EX = 78, +}; + +struct xdp_md; + +struct xdp_metadata_ops { + int (*xmo_rx_timestamp)(const struct xdp_md *, u64 *); + int (*xmo_rx_hash)(const struct xdp_md *, u32 *, enum xdp_rss_hash_type *); +}; + +struct net_device_core_stats { + unsigned long rx_dropped; + unsigned long tx_dropped; + unsigned long rx_nohandler; + unsigned long rx_otherhost_dropped; +}; + +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE = 0, + ETHTOOL_ID_ACTIVE = 1, + ETHTOOL_ID_ON = 2, + ETHTOOL_ID_OFF = 3, +}; + +struct ethtool_drvinfo; + +struct ethtool_regs; + +struct ethtool_wolinfo; + +struct ethtool_link_ext_state_info; + +struct ethtool_link_ext_stats; + +struct ethtool_eeprom; + +struct ethtool_coalesce; + +struct kernel_ethtool_coalesce; + +struct ethtool_ringparam; + +struct kernel_ethtool_ringparam; + +struct ethtool_pause_stats; + +struct ethtool_pauseparam; + +struct ethtool_test; + +struct ethtool_stats; + +struct ethtool_rxnfc; + +struct ethtool_flash; + +struct ethtool_channels; + +struct ethtool_dump; + +struct ethtool_ts_info; + +struct ethtool_modinfo; + +struct ethtool_eee; + +struct ethtool_tunable; + +struct ethtool_link_ksettings; + +struct ethtool_fec_stats; + +struct ethtool_fecparam; + +struct ethtool_module_eeprom; + +struct ethtool_eth_phy_stats; + +struct ethtool_eth_mac_stats; + +struct ethtool_eth_ctrl_stats; + +struct ethtool_rmon_stats; + +struct ethtool_rmon_hist_range; + +struct ethtool_module_power_mode_params; + +struct ethtool_mm_state; + +struct ethtool_mm_cfg; + +struct ethtool_mm_stats; + +struct ethtool_ops { + u32 cap_link_lanes_supported : 1; + u32 supported_coalesce_params; + u32 supported_ring_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); + void (*get_link_ext_stats)(struct net_device *, struct ethtool_link_ext_stats *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *, + struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *, + struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); + void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); + int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8); + int (*get_rxfh_context)(struct net_device *, u32 *, u8 *, u8 *, u32); + int (*set_rxfh_context)(struct net_device *, const u32 *, const u8 *, const u8, + u32 *, bool); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_eee *); + int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); + int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, + const void *); + int (*get_module_eeprom_by_page)(struct net_device *, + const struct ethtool_module_eeprom *, + struct netlink_ext_ack *); + void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); + void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); + void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); + void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, + const struct ethtool_rmon_hist_range **); + int (*get_module_power_mode)(struct net_device *, + struct ethtool_module_power_mode_params *, + struct netlink_ext_ack *); + int (*set_module_power_mode)(struct net_device *, + const struct ethtool_module_power_mode_params *, + struct netlink_ext_ack *); + int (*get_mm)(struct net_device *, struct ethtool_mm_state *); + int (*set_mm)(struct net_device *, struct ethtool_mm_cfg *, struct netlink_ext_ack *); + void (*get_mm_stats)(struct net_device *, struct ethtool_mm_stats *); +}; + +struct l3mdev_ops { + u32 (*l3mdev_fib_table)(const struct net_device *); + struct sk_buff *(*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16); + struct sk_buff *(*l3mdev_l3_out)(struct net_device *, struct sock *, + struct sk_buff *, u16); + struct dst_entry *(*l3mdev_link_scope_lookup)(const struct net_device *, + struct flowi6 *); +}; + +struct nd_opt_hdr; + +struct ndisc_options; + +struct prefix_info; + +struct ndisc_ops { + int (*is_useropt)(u8); + int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, + struct ndisc_options *); + void (*update)(const struct net_device *, struct neighbour *, u32, u8, + const struct ndisc_options *); + int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); + void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); + void (*prefix_rcv_add_addr)(struct net *, struct net_device *, + const struct prefix_info *, struct inet6_dev *, + struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); +}; + +struct header_ops { + int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, + const void *, unsigned int); + int (*parse)(const struct sk_buff *, unsigned char *); + int (*cache)(const struct neighbour *, struct hh_cache *, __be16); + void (*cache_update)(struct hh_cache *, const struct net_device *, + const unsigned char *); + bool (*validate)(const char *, unsigned int); + __be16 (*parse_protocol)(const struct sk_buff *); +}; + +struct ipv4_devconf { + void *sysctl; + int data[33]; + unsigned long state[1]; +}; + +struct in_ifaddr; + +struct ip_mc_list; + +struct in_device { + struct net_device *dev; + netdevice_tracker dev_tracker; + refcount_t refcnt; + int dead; + struct in_ifaddr __attribute__((btf_type_tag("rcu"))) * ifa_list; + struct ip_mc_list __attribute__((btf_type_tag("rcu"))) * mc_list; + struct ip_mc_list + __attribute__((btf_type_tag("rcu"))) *__attribute__((btf_type_tag("rcu"))) * + mc_hash; + int mc_count; + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + unsigned long mr_v1_seen; + unsigned long mr_v2_seen; + unsigned long mr_maxdelay; + unsigned long mr_qi; + unsigned long mr_qri; + unsigned char mr_qrv; + unsigned char mr_gq_running; + u32 mr_ifc_count; + struct timer_list mr_gq_timer; + struct timer_list mr_ifc_timer; + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct callback_head callback_head; +}; + +struct icmpv6_mib_device; + +struct icmpv6msg_mib_device; + +struct ipv6_devstat { + struct proc_dir_entry *proc_dir_entry; + struct ipstats_mib __attribute__((btf_type_tag("percpu"))) * ipv6; + struct icmpv6_mib_device *icmpv6dev; + struct icmpv6msg_mib_device *icmpv6msgdev; +}; + +struct ifmcaddr6; + +struct ifacaddr6; + +struct inet6_dev { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head addr_list; + struct ifmcaddr6 __attribute__((btf_type_tag("rcu"))) * mc_list; + struct ifmcaddr6 __attribute__((btf_type_tag("rcu"))) * mc_tomb; + unsigned char mc_qrv; + unsigned char mc_gq_running; + unsigned char mc_ifc_count; + unsigned char mc_dad_count; + unsigned long mc_v1_seen; + unsigned long mc_qi; + unsigned long mc_qri; + unsigned long mc_maxdelay; + struct delayed_work mc_gq_work; + struct delayed_work mc_ifc_work; + struct delayed_work mc_dad_work; + struct delayed_work mc_query_work; + struct delayed_work mc_report_work; + struct sk_buff_head mc_query_queue; + struct sk_buff_head mc_report_queue; + spinlock_t mc_query_lock; + spinlock_t mc_report_lock; + struct mutex mc_lock; + struct ifacaddr6 *ac_list; + rwlock_t lock; + refcount_t refcnt; + __u32 if_flags; + int dead; + u32 desync_factor; + struct list_head tempaddr_list; + struct in6_addr token; + struct neigh_parms *nd_parms; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; + struct timer_list rs_timer; + __s32 rs_interval; + __u8 rs_probes; + unsigned long tstamp; + struct callback_head rcu; + unsigned int ra_mtu; +}; + +struct ip6_sf_list; + +struct ifmcaddr6 { + struct in6_addr mca_addr; + struct inet6_dev *idev; + struct ifmcaddr6 __attribute__((btf_type_tag("rcu"))) * next; + struct ip6_sf_list __attribute__((btf_type_tag("rcu"))) * mca_sources; + struct ip6_sf_list __attribute__((btf_type_tag("rcu"))) * mca_tomb; + unsigned int mca_sfmode; + unsigned char mca_crcount; + unsigned long mca_sfcount[2]; + struct delayed_work mca_work; + unsigned int mca_flags; + int mca_users; + refcount_t mca_refcnt; + unsigned long mca_cstamp; + unsigned long mca_tstamp; + struct callback_head rcu; +}; + +struct ip6_sf_list { + struct ip6_sf_list __attribute__((btf_type_tag("rcu"))) * sf_next; + struct in6_addr sf_addr; + unsigned long sf_count[2]; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; + struct callback_head rcu; +}; + +struct ifacaddr6 { + struct in6_addr aca_addr; + struct fib6_info *aca_rt; + struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; + int aca_users; + refcount_t aca_refcnt; + unsigned long aca_cstamp; + unsigned long aca_tstamp; + struct callback_head rcu; +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[7]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + +struct bpf_mprog_fp { + struct bpf_prog *prog; +}; + +struct bpf_mprog_bundle; + +struct bpf_mprog_entry { + struct bpf_mprog_fp fp_items[64]; + struct bpf_mprog_bundle *parent; +}; + +struct dql { + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + unsigned int limit; + unsigned int num_completed; + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + unsigned int lowest_slack; + unsigned long slack_start_time; + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; + long : 64; + long : 64; +}; + +struct netdev_queue { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct Qdisc __attribute__((btf_type_tag("rcu"))) * qdisc; + struct Qdisc __attribute__((btf_type_tag("rcu"))) * qdisc_sleeping; + struct kobject kobj; + int numa_node; + unsigned long tx_maxrate; + atomic_long_t trans_timeout; + struct net_device *sb_dev; + struct xsk_buff_pool *pool; + spinlock_t _xmit_lock; + int xmit_lock_owner; + unsigned long trans_start; + unsigned long state; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct dql dql; +}; + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct gnet_stats_basic_sync { + u64_stats_t bytes; + u64_stats_t packets; + struct u64_stats_sync syncp; +}; + +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +struct Qdisc_ops; + +struct qdisc_size_table; + +struct net_rate_estimator; + +struct Qdisc { + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff *(*dequeue)(struct Qdisc *); + unsigned int flags; + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table __attribute__((btf_type_tag("rcu"))) * stab; + struct hlist_node hash; + u32 handle; + u32 parent; + struct netdev_queue *dev_queue; + struct net_rate_estimator __attribute__((btf_type_tag("rcu"))) * rate_est; + struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) * cpu_bstats; + struct gnet_stats_queue __attribute__((btf_type_tag("percpu"))) * cpu_qstats; + int pad; + refcount_t refcnt; + long : 64; + long : 64; + long : 64; + struct sk_buff_head gso_skb; + struct qdisc_skb_head q; + struct gnet_stats_basic_sync bstats; + struct gnet_stats_queue qstats; + unsigned long state; + unsigned long state2; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + spinlock_t busylock; + spinlock_t seqlock; + struct callback_head rcu; + netdevice_tracker dev_tracker; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long privdata[0]; +}; + +struct xdp_dev_bulk_queue { + struct xdp_frame *q[16]; + struct list_head flush_node; + struct net_device *dev; + struct net_device *dev_rx; + struct bpf_prog *xdp_prog; + unsigned int count; +}; + +struct xps_map; + +struct xps_dev_maps { + struct callback_head rcu; + unsigned int nr_ids; + s16 num_tc; + struct xps_map __attribute__((btf_type_tag("rcu"))) * attr_map[0]; +}; + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[0]; +}; + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; +}; + +struct pcpu_sw_netstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + struct u64_stats_sync syncp; +}; + +struct pcpu_dstats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_drops; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; + struct u64_stats_sync syncp; + long : 64; + long : 64; +}; + +struct dm_hw_stat_delta { + unsigned long last_rx; + unsigned long last_drop_val; + struct callback_head rcu; +}; + +struct rtnl_link_ops { + struct list_head list; + const char *kind; + size_t priv_size; + struct net_device *(*alloc)(struct nlattr **, const char *, unsigned char, + unsigned int, unsigned int); + void (*setup)(struct net_device *); + bool netns_refund; + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*newlink)(struct net *, struct net_device *, struct nlattr **, + struct nlattr **, struct netlink_ext_ack *); + int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, + struct netlink_ext_ack *); + void (*dellink)(struct net_device *, struct list_head *); + size_t (*get_size)(const struct net_device *); + int (*fill_info)(struct sk_buff *, const struct net_device *); + size_t (*get_xstats_size)(const struct net_device *); + int (*fill_xstats)(struct sk_buff *, const struct net_device *); + unsigned int (*get_num_tx_queues)(); + unsigned int (*get_num_rx_queues)(); + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, + struct nlattr **, struct netlink_ext_ack *); + size_t (*get_slave_size)(const struct net_device *, const struct net_device *); + int (*fill_slave_info)(struct sk_buff *, const struct net_device *, + const struct net_device *); + struct net *(*get_link_net)(const struct net_device *); + size_t (*get_linkxstats_size)(const struct net_device *, int); + int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); +}; + +struct netprio_map { + struct callback_head rcu; + u32 priomap_len; + u32 priomap[0]; +}; + +struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; +}; + +struct udp_tunnel_info; + +struct udp_tunnel_nic_shared; + +struct udp_tunnel_nic_info { + int (*set_port)(struct net_device *, unsigned int, unsigned int, + struct udp_tunnel_info *); + int (*unset_port)(struct net_device *, unsigned int, unsigned int, + struct udp_tunnel_info *); + int (*sync_table)(struct net_device *, unsigned int); + struct udp_tunnel_nic_shared *shared; + unsigned int flags; + struct udp_tunnel_nic_table_info tables[4]; +}; + +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED = 1, + SS_CONNECTING = 2, + SS_CONNECTED = 3, + SS_DISCONNECTING = 4, +} socket_state; + +struct socket_wq { + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + unsigned long flags; + struct callback_head rcu; + long : 64; +}; + +struct proto_ops; + +struct socket { + socket_state state; + short type; + unsigned long flags; + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + long : 64; + long : 64; + long : 64; + struct socket_wq wq; +}; + +typedef struct { + size_t written; + size_t count; + union { + char __attribute__((btf_type_tag("user"))) * buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); + +typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); + +struct proto_ops { + int family; + struct module *owner; + int (*release)(struct socket *); + int (*bind)(struct socket *, struct sockaddr *, int); + int (*connect)(struct socket *, struct sockaddr *, int, int); + int (*socketpair)(struct socket *, struct socket *); + int (*accept)(struct socket *, struct socket *, int, bool); + int (*getname)(struct socket *, struct sockaddr *, int); + __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); + int (*ioctl)(struct socket *, unsigned int, unsigned long); + int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); + int (*gettstamp)(struct socket *, void __attribute__((btf_type_tag("user"))) *, + bool, bool); + int (*listen)(struct socket *, int); + int (*shutdown)(struct socket *, int); + int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct socket *, int, int, + char __attribute__((btf_type_tag("user"))) *, + int __attribute__((btf_type_tag("user"))) *); + void (*show_fdinfo)(struct seq_file *, struct socket *); + int (*sendmsg)(struct socket *, struct msghdr *, size_t); + int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); + int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); + ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, + size_t, unsigned int); + void (*splice_eof)(struct socket *); + int (*set_peek_off)(struct sock *, int); + int (*peek_len)(struct socket *); + int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); + int (*read_skb)(struct sock *, skb_read_actor_t); + int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); + int (*set_rcvlowat)(struct sock *, int); +}; + +struct request_sock; + +struct request_sock_ops { + int family; + unsigned int obj_size; + struct kmem_cache *slab; + char *slab_name; + int (*rtx_syn_ack)(const struct sock *, struct request_sock *); + void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*send_reset)(const struct sock *, struct sk_buff *); + void (*destructor)(struct request_sock *); + void (*syn_ack_timeout)(const struct request_sock *); +}; + +struct saved_syn; + +struct request_sock { + struct sock_common __req_common; + struct request_sock *dl_next; + u16 mss; + u8 num_retrans; + u8 syncookie : 1; + u8 num_timeout : 7; + u32 ts_recent; + struct timer_list rsk_timer; + const struct request_sock_ops *rsk_ops; + struct sock *sk; + struct saved_syn *saved_syn; + u32 secid; + u32 peer_secid; + u32 timeout; +}; + +struct saved_syn { + u32 mac_hdrlen; + u32 network_hdrlen; + u32 tcp_hdrlen; + u8 data[0]; +}; + +struct timewait_sock_ops { + struct kmem_cache *twsk_slab; + char *twsk_slab_name; + unsigned int twsk_obj_size; + int (*twsk_unique)(struct sock *, struct sock *, void *); + void (*twsk_destructor)(struct sock *); +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +struct sock_reuseport { + struct callback_head rcu; + u16 max_socks; + u16 num_socks; + u16 num_closed_socks; + u16 incoming_cpu; + unsigned int synq_overflow_ts; + unsigned int reuseport_id; + unsigned int bind_inany : 1; + unsigned int has_conns : 1; + struct bpf_prog __attribute__((btf_type_tag("rcu"))) * prog; + struct sock *socks[0]; +}; + +enum { + Root_NFS = 255, + Root_CIFS = 254, + Root_Generic = 253, + Root_RAM0 = 1048576, +}; + +enum pageflags { + PG_locked = 0, + PG_writeback = 1, + PG_referenced = 2, + PG_uptodate = 3, + PG_dirty = 4, + PG_lru = 5, + PG_head = 6, + PG_waiters = 7, + PG_active = 8, + PG_workingset = 9, + PG_error = 10, + PG_slab = 11, + PG_owner_priv_1 = 12, + PG_arch_1 = 13, + PG_reserved = 14, + PG_private = 15, + PG_private_2 = 16, + PG_mappedtodisk = 17, + PG_reclaim = 18, + PG_swapbacked = 19, + PG_unevictable = 20, + PG_mlocked = 21, + PG_uncached = 22, + PG_hwpoison = 23, + PG_young = 24, + PG_idle = 25, + __NR_PAGEFLAGS = 26, + PG_readahead = 18, + PG_anon_exclusive = 17, + PG_checked = 12, + PG_swapcache = 12, + PG_fscache = 16, + PG_pinned = 12, + PG_savepinned = 4, + PG_foreign = 12, + PG_xen_remapped = 12, + PG_isolated = 18, + PG_reported = 3, + PG_vmemmap_self_hosted = 12, + PG_has_hwpoisoned = 10, + PG_hugetlb = 8, + PG_large_rmappable = 9, +}; + +typedef u64 async_cookie_t; + +enum state { + Start = 0, + Collect = 1, + GotHeader = 2, + SkipIt = 3, + GotName = 4, + CopyFile = 5, + GotSymlink = 6, + Reset = 7, +}; + +struct hash { + int ino; + int minor; + int major; + umode_t mode; + struct hash *next; + char name[4098]; +}; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING = 1, + UMH_DISABLED = 2, +}; + +struct dir_entry { + struct list_head list; + time64_t mtime; + char name[0]; +}; + +typedef void (*async_func_t)(void *, async_cookie_t); + +typedef int (*decompress_fn)(unsigned char *, long, long (*)(void *, unsigned long), + long (*)(void *, unsigned long), unsigned char *, long *, + void (*)(char *)); + +enum cc_vendor { + CC_VENDOR_NONE = 0, + CC_VENDOR_AMD = 1, + CC_VENDOR_INTEL = 2, +}; + +enum cc_attr { + CC_ATTR_MEM_ENCRYPT = 0, + CC_ATTR_HOST_MEM_ENCRYPT = 1, + CC_ATTR_GUEST_MEM_ENCRYPT = 2, + CC_ATTR_GUEST_STATE_ENCRYPT = 3, + CC_ATTR_GUEST_UNROLL_STRING_IO = 4, + CC_ATTR_GUEST_SEV_SNP = 5, + CC_ATTR_HOTPLUG_DISABLED = 6, +}; + +typedef long (*sys_call_ptr_t)(const struct pt_regs *); + +struct io_bitmap { + u64 sequence; + refcount_t refcnt; + unsigned int max; + unsigned long bitmap[1024]; +}; + +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + +struct irqentry_state { + union { + bool exit_rcu; + bool lockdep; + }; +}; + +typedef struct irqentry_state irqentry_state_t; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, + struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_COMPLETED = 16384, + VM_FAULT_HINDEX_MASK = 983040, +}; + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE = 0, + VDSO_CLOCKMODE_TSC = 1, + VDSO_CLOCKMODE_PVCLOCK = 2, + VDSO_CLOCKMODE_HVCLOCK = 3, + VDSO_CLOCKMODE_MAX = 4, + VDSO_CLOCKMODE_TIMENS = 2147483647, +}; + +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +struct arch_vdso_data {}; + +struct vdso_data { + u32 seq; + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + union { + struct vdso_timestamp basetime[12]; + struct timens_offset offset[12]; + }; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; + struct arch_vdso_data arch_data; +}; + +struct alt_instr { + s32 instr_offset; + s32 repl_offset; + union { + struct { + u32 cpuid : 16; + u32 flags : 16; + }; + u32 ft_flags; + }; + u8 instrlen; + u8 replacementlen; +} __attribute__((packed)); + +struct maple_enode; + +struct maple_alloc; + +struct ma_state { + struct maple_tree *tree; + unsigned long index; + unsigned long last; + struct maple_enode *node; + unsigned long min; + unsigned long max; + struct maple_alloc *alloc; + unsigned char depth; + unsigned char offset; + unsigned char mas_flags; +}; + +struct vma_iterator { + struct ma_state mas; +}; + +struct maple_alloc { + unsigned long total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[30]; +}; + +typedef unsigned int zap_flags_t; + +struct zap_details { + struct folio *single_folio; + bool even_cows; + zap_flags_t zap_flags; +}; + +struct vdso_exception_table_entry { + int insn; + int fixup; +}; + +typedef void (*btf_trace_emulate_vsyscall)(void *, int); + +enum { + EMULATE = 0, + XONLY = 1, + NONE = 2, +}; + +enum x86_pf_error_code { + X86_PF_PROT = 1, + X86_PF_WRITE = 2, + X86_PF_USER = 4, + X86_PF_RSVD = 8, + X86_PF_INSTR = 16, + X86_PF_PK = 32, + X86_PF_SHSTK = 64, + X86_PF_SGX = 32768, +}; + +enum fixed_addresses { + VSYSCALL_PAGE = 511, + FIX_DBGP_BASE = 512, + FIX_EARLYCON_MEM_BASE = 513, + FIX_OHCI1394_BASE = 514, + FIX_APIC_BASE = 515, + FIX_IO_APIC_BASE_0 = 516, + FIX_IO_APIC_BASE_END = 643, + FIX_APEI_GHES_IRQ = 644, + FIX_APEI_GHES_NMI = 645, + __end_of_permanent_fixed_addresses = 646, + FIX_BTMAP_END = 1024, + FIX_BTMAP_BEGIN = 1535, + __end_of_fixed_addresses = 1536, +}; + +enum syscall_work_bit { + SYSCALL_WORK_BIT_SECCOMP = 0, + SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT = 1, + SYSCALL_WORK_BIT_SYSCALL_TRACE = 2, + SYSCALL_WORK_BIT_SYSCALL_EMU = 3, + SYSCALL_WORK_BIT_SYSCALL_AUDIT = 4, + SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH = 5, + SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP = 6, +}; + +struct trace_event_raw_emulate_vsyscall { + struct trace_entry ent; + int nr; + char __data[0]; +}; + +typedef unsigned long p4dval_t; + +typedef struct { + p4dval_t p4d; +} p4d_t; + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct trace_event_data_offsets_emulate_vsyscall {}; + +struct perf_guest_switch_msr { + unsigned int msr; + u64 host; + u64 guest; +}; + +struct event_constraint; + +struct er_account; + +struct intel_shared_regs; + +struct intel_excl_cntrs; + +struct amd_nb; + +struct cpu_hw_events { + struct perf_event *events[64]; + unsigned long active_mask[1]; + unsigned long dirty[1]; + int enabled; + int n_events; + int n_added; + int n_txn; + int n_txn_pair; + int n_txn_metric; + int assign[64]; + u64 tags[64]; + struct perf_event *event_list[64]; + struct event_constraint *event_constraint[64]; + int n_excl; + unsigned int txn_flags; + int is_fake; + struct debug_store *ds; + void *ds_pebs_vaddr; + void *ds_bts_vaddr; + u64 pebs_enabled; + int n_pebs; + int n_large_pebs; + int n_pebs_via_pt; + int pebs_output; + u64 pebs_data_cfg; + u64 active_pebs_data_cfg; + int pebs_record_size; + u64 fixed_ctrl_val; + u64 active_fixed_ctrl_val; + int lbr_users; + int lbr_pebs_users; + struct perf_branch_stack lbr_stack; + struct perf_branch_entry lbr_entries[32]; + union { + struct er_account *lbr_sel; + struct er_account *lbr_ctl; + }; + u64 br_sel; + void *last_task_ctx; + int last_log_id; + int lbr_select; + void *lbr_xsave; + u64 intel_ctrl_guest_mask; + u64 intel_ctrl_host_mask; + struct perf_guest_switch_msr guest_switch_msrs[64]; + u64 intel_cp_status; + struct intel_shared_regs *shared_regs; + struct event_constraint *constraint_list; + struct intel_excl_cntrs *excl_cntrs; + int excl_thread_id; + u64 tfa_shadow; + int n_metric; + struct amd_nb *amd_nb; + int brs_active; + u64 perf_ctr_virt_mask; + int n_pair; + void *kfree_on_online[2]; + struct pmu *pmu; +}; + +struct event_constraint { + union { + unsigned long idxmsk[1]; + u64 idxmsk64; + }; + u64 code; + u64 cmask; + int weight; + int overlap; + int flags; + unsigned int size; +}; + +struct er_account { + raw_spinlock_t lock; + u64 config; + u64 reg; + atomic_t ref; +}; + +struct intel_shared_regs { + struct er_account regs[7]; + int refcnt; + unsigned int core_id; +}; + +enum intel_excl_state_type { + INTEL_EXCL_UNUSED = 0, + INTEL_EXCL_SHARED = 1, + INTEL_EXCL_EXCLUSIVE = 2, +}; + +struct intel_excl_states { + enum intel_excl_state_type state[64]; + bool sched_started; +}; + +struct intel_excl_cntrs { + raw_spinlock_t lock; + struct intel_excl_states states[2]; + union { + u16 has_exclusive[2]; + u32 exclusive_present; + }; + int refcnt; + unsigned int core_id; +}; + +struct amd_nb { + int nb_id; + int refcnt; + struct perf_event *owners[64]; + struct event_constraint event_constraints[64]; +}; + +union perf_capabilities { + struct { + u64 lbr_format : 6; + u64 pebs_trap : 1; + u64 pebs_arch_reg : 1; + u64 pebs_format : 4; + u64 smm_freeze : 1; + u64 full_width_write : 1; + u64 pebs_baseline : 1; + u64 perf_metrics : 1; + u64 pebs_output_pt_available : 1; + u64 pebs_timing_info : 1; + u64 anythread_deprecated : 1; + }; + u64 capabilities; +}; + +enum hybrid_cpu_type { + HYBRID_INTEL_NONE = 0, + HYBRID_INTEL_ATOM = 32, + HYBRID_INTEL_CORE = 64, +}; + +struct x86_pmu_quirk; + +struct extra_reg; + +struct x86_hybrid_pmu; + +struct x86_pmu { + const char *name; + int version; + int (*handle_irq)(struct pt_regs *); + void (*disable_all)(); + void (*enable_all)(int); + void (*enable)(struct perf_event *); + void (*disable)(struct perf_event *); + void (*assign)(struct perf_event *, int); + void (*add)(struct perf_event *); + void (*del)(struct perf_event *); + void (*read)(struct perf_event *); + int (*set_period)(struct perf_event *); + u64 (*update)(struct perf_event *); + int (*hw_config)(struct perf_event *); + int (*schedule_events)(struct cpu_hw_events *, int, int *); + unsigned int eventsel; + unsigned int perfctr; + int (*addr_offset)(int, bool); + int (*rdpmc_index)(int); + u64 (*event_map)(int); + int max_events; + int num_counters; + int num_counters_fixed; + int cntval_bits; + u64 cntval_mask; + union { + unsigned long events_maskl; + unsigned long events_mask[1]; + }; + int events_mask_len; + int apic; + u64 max_period; + struct event_constraint *(*get_event_constraints)(struct cpu_hw_events *, int, + struct perf_event *); + void (*put_event_constraints)(struct cpu_hw_events *, struct perf_event *); + void (*start_scheduling)(struct cpu_hw_events *); + void (*commit_scheduling)(struct cpu_hw_events *, int, int); + void (*stop_scheduling)(struct cpu_hw_events *); + struct event_constraint *event_constraints; + struct x86_pmu_quirk *quirks; + void (*limit_period)(struct perf_event *, s64 *); + unsigned int late_ack : 1; + unsigned int mid_ack : 1; + unsigned int enabled_ack : 1; + int attr_rdpmc_broken; + int attr_rdpmc; + struct attribute **format_attrs; + ssize_t (*events_sysfs_show)(char *, u64); + const struct attribute_group **attr_update; + unsigned long attr_freeze_on_smi; + int (*cpu_prepare)(int); + void (*cpu_starting)(int); + void (*cpu_dying)(int); + void (*cpu_dead)(int); + void (*check_microcode)(); + void (*sched_task)(struct perf_event_pmu_context *, bool); + u64 intel_ctrl; + union perf_capabilities intel_cap; + unsigned int bts : 1; + unsigned int bts_active : 1; + unsigned int pebs : 1; + unsigned int pebs_active : 1; + unsigned int pebs_broken : 1; + unsigned int pebs_prec_dist : 1; + unsigned int pebs_no_tlb : 1; + unsigned int pebs_no_isolation : 1; + unsigned int pebs_block : 1; + unsigned int pebs_ept : 1; + int pebs_record_size; + int pebs_buffer_size; + int max_pebs_events; + void (*drain_pebs)(struct pt_regs *, struct perf_sample_data *); + struct event_constraint *pebs_constraints; + void (*pebs_aliases)(struct perf_event *); + u64 (*pebs_latency_data)(struct perf_event *, u64); + unsigned long large_pebs_flags; + u64 rtm_abort_event; + u64 pebs_capable; + unsigned int lbr_tos; + unsigned int lbr_from; + unsigned int lbr_to; + unsigned int lbr_info; + unsigned int lbr_nr; + union { + u64 lbr_sel_mask; + u64 lbr_ctl_mask; + }; + union { + const int *lbr_sel_map; + int *lbr_ctl_map; + }; + bool lbr_double_abort; + bool lbr_pt_coexist; + unsigned int lbr_has_info : 1; + unsigned int lbr_has_tsx : 1; + unsigned int lbr_from_flags : 1; + unsigned int lbr_to_cycles : 1; + unsigned int lbr_depth_mask : 8; + unsigned int lbr_deep_c_reset : 1; + unsigned int lbr_lip : 1; + unsigned int lbr_cpl : 1; + unsigned int lbr_filter : 1; + unsigned int lbr_call_stack : 1; + unsigned int lbr_mispred : 1; + unsigned int lbr_timed_lbr : 1; + unsigned int lbr_br_type : 1; + void (*lbr_reset)(); + void (*lbr_read)(struct cpu_hw_events *); + void (*lbr_save)(void *); + void (*lbr_restore)(void *); + atomic_t lbr_exclusive[3]; + int num_topdown_events; + void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); + unsigned int amd_nb_constraints : 1; + u64 perf_ctr_pair_en; + struct extra_reg *extra_regs; + unsigned int flags; + struct perf_guest_switch_msr *(*guest_get_msrs)(int *, void *); + int (*check_period)(struct perf_event *, u64); + int (*aux_output_match)(struct perf_event *); + void (*filter)(struct pmu *, int, bool *); + int num_hybrid_pmus; + struct x86_hybrid_pmu *hybrid_pmu; + enum hybrid_cpu_type (*get_hybrid_cpu_type)(); +}; + +struct x86_pmu_quirk { + struct x86_pmu_quirk *next; + void (*func)(); +}; + +struct extra_reg { + unsigned int event; + unsigned int msr; + u64 config_mask; + u64 valid_mask; + int idx; + bool extra_msr_access; +}; + +enum hybrid_pmu_type { + not_hybrid = 0, + hybrid_small = 1, + hybrid_big = 2, + hybrid_big_small = 3, +}; + +struct x86_hybrid_pmu { + struct pmu pmu; + const char *name; + enum hybrid_pmu_type pmu_type; + cpumask_t supported_cpus; + union perf_capabilities intel_cap; + u64 intel_ctrl; + int max_pebs_events; + int num_counters; + int num_counters_fixed; + struct event_constraint unconstrained; + u64 hw_cache_event_ids[42]; + u64 hw_cache_extra_regs[42]; + struct event_constraint *event_constraints; + struct event_constraint *pebs_constraints; + struct extra_reg *extra_regs; + unsigned int late_ack : 1; + unsigned int mid_ack : 1; + unsigned int enabled_ack : 1; + u64 pebs_data_source[16]; +}; + +typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); + +struct nmiaction { + struct list_head list; + nmi_handler_t handler; + u64 max_duration; + unsigned long flags; + const char *name; +}; + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; +}; + +enum { + x86_lbr_exclusive_lbr = 0, + x86_lbr_exclusive_bts = 1, + x86_lbr_exclusive_pt = 2, + x86_lbr_exclusive_max = 3, +}; + +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1, + PERF_SAMPLE_BRANCH_KERNEL = 2, + PERF_SAMPLE_BRANCH_HV = 4, + PERF_SAMPLE_BRANCH_ANY = 8, + PERF_SAMPLE_BRANCH_ANY_CALL = 16, + PERF_SAMPLE_BRANCH_ANY_RETURN = 32, + PERF_SAMPLE_BRANCH_IND_CALL = 64, + PERF_SAMPLE_BRANCH_ABORT_TX = 128, + PERF_SAMPLE_BRANCH_IN_TX = 256, + PERF_SAMPLE_BRANCH_NO_TX = 512, + PERF_SAMPLE_BRANCH_COND = 1024, + PERF_SAMPLE_BRANCH_CALL_STACK = 2048, + PERF_SAMPLE_BRANCH_IND_JUMP = 4096, + PERF_SAMPLE_BRANCH_CALL = 8192, + PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, + PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, + PERF_SAMPLE_BRANCH_HW_INDEX = 131072, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 262144, + PERF_SAMPLE_BRANCH_MAX = 524288, +}; + +enum perf_event_x86_regs { + PERF_REG_X86_AX = 0, + PERF_REG_X86_BX = 1, + PERF_REG_X86_CX = 2, + PERF_REG_X86_DX = 3, + PERF_REG_X86_SI = 4, + PERF_REG_X86_DI = 5, + PERF_REG_X86_BP = 6, + PERF_REG_X86_SP = 7, + PERF_REG_X86_IP = 8, + PERF_REG_X86_FLAGS = 9, + PERF_REG_X86_CS = 10, + PERF_REG_X86_SS = 11, + PERF_REG_X86_DS = 12, + PERF_REG_X86_ES = 13, + PERF_REG_X86_FS = 14, + PERF_REG_X86_GS = 15, + PERF_REG_X86_R8 = 16, + PERF_REG_X86_R9 = 17, + PERF_REG_X86_R10 = 18, + PERF_REG_X86_R11 = 19, + PERF_REG_X86_R12 = 20, + PERF_REG_X86_R13 = 21, + PERF_REG_X86_R14 = 22, + PERF_REG_X86_R15 = 23, + PERF_REG_X86_32_MAX = 16, + PERF_REG_X86_64_MAX = 24, + PERF_REG_X86_XMM0 = 32, + PERF_REG_X86_XMM1 = 34, + PERF_REG_X86_XMM2 = 36, + PERF_REG_X86_XMM3 = 38, + PERF_REG_X86_XMM4 = 40, + PERF_REG_X86_XMM5 = 42, + PERF_REG_X86_XMM6 = 44, + PERF_REG_X86_XMM7 = 46, + PERF_REG_X86_XMM8 = 48, + PERF_REG_X86_XMM9 = 50, + PERF_REG_X86_XMM10 = 52, + PERF_REG_X86_XMM11 = 54, + PERF_REG_X86_XMM12 = 56, + PERF_REG_X86_XMM13 = 58, + PERF_REG_X86_XMM14 = 60, + PERF_REG_X86_XMM15 = 62, + PERF_REG_X86_XMM_MAX = 64, +}; + +enum { + PERF_X86_EVENT_PEBS_LDLAT = 1, + PERF_X86_EVENT_PEBS_ST = 2, + PERF_X86_EVENT_PEBS_ST_HSW = 4, + PERF_X86_EVENT_PEBS_LD_HSW = 8, + PERF_X86_EVENT_PEBS_NA_HSW = 16, + PERF_X86_EVENT_EXCL = 32, + PERF_X86_EVENT_DYNAMIC = 64, + PERF_X86_EVENT_EXCL_ACCT = 256, + PERF_X86_EVENT_AUTO_RELOAD = 512, + PERF_X86_EVENT_LARGE_PEBS = 1024, + PERF_X86_EVENT_PEBS_VIA_PT = 2048, + PERF_X86_EVENT_PAIR = 4096, + PERF_X86_EVENT_LBR_SELECT = 8192, + PERF_X86_EVENT_TOPDOWN = 16384, + PERF_X86_EVENT_PEBS_STLAT = 32768, + PERF_X86_EVENT_AMD_BRS = 65536, + PERF_X86_EVENT_PEBS_LAT_HYBRID = 131072, +}; + +enum stack_type { + STACK_TYPE_UNKNOWN = 0, + STACK_TYPE_TASK = 1, + STACK_TYPE_IRQ = 2, + STACK_TYPE_SOFTIRQ = 3, + STACK_TYPE_ENTRY = 4, + STACK_TYPE_EXCEPTION = 5, + STACK_TYPE_EXCEPTION_LAST = 10, +}; + +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +}; + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, +}; + +enum { + NMI_LOCAL = 0, + NMI_UNKNOWN = 1, + NMI_SERR = 2, + NMI_IO_CHECK = 3, + NMI_MAX = 4, +}; + +enum { + X86_PERF_KFREE_SHARED = 0, + X86_PERF_KFREE_EXCL = 1, + X86_PERF_KFREE_MAX = 2, +}; + +enum extra_reg_type { + EXTRA_REG_NONE = -1, + EXTRA_REG_RSP_0 = 0, + EXTRA_REG_RSP_1 = 1, + EXTRA_REG_LBR = 2, + EXTRA_REG_LDLAT = 3, + EXTRA_REG_FE = 4, + EXTRA_REG_SNOOP_0 = 5, + EXTRA_REG_SNOOP_1 = 6, + EXTRA_REG_MAX = 7, +}; + +struct perf_pmu_events_ht_attr { + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; +}; + +struct perf_pmu_events_hybrid_attr { + struct device_attribute attr; + u64 id; + const char *event_str; + u64 pmu_type; +}; + +struct stack_frame { + struct stack_frame *next_frame; + unsigned long return_address; +}; + +struct sched_state { + int weight; + int event; + int counter; + int unassigned; + int nr_gp; + u64 used; +}; + +struct perf_sched { + int max_weight; + int max_events; + int max_gp; + int saved_states; + struct event_constraint **constraints; + struct sched_state state; + struct sched_state saved[2]; +}; + +struct cyc2ns_data { + u32 cyc2ns_mul; + u32 cyc2ns_shift; + u64 cyc2ns_offset; +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short contexts; + bool contexts_maxed; +}; + +struct stack_info { + enum stack_type type; + unsigned long *begin; + unsigned long *end; + unsigned long *next_sp; +}; + +struct unwind_state { + struct stack_info stack_info; + unsigned long stack_mask; + struct task_struct *task; + int graph_idx; + struct llist_node *kr_cur; + bool error; + bool signal; + bool full_regs; + unsigned long sp; + unsigned long bp; + unsigned long ip; + struct pt_regs *regs; + struct pt_regs *prev_regs; +}; + +struct stack_frame_ia32 { + u32 next_frame; + u32 return_address; +}; + +typedef void (*smp_call_func_t)(void *); + +typedef bool (*smp_cond_func_t)(int, void *); + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0 : 1; + __u64 cap_bit0_is_deprecated : 1; + __u64 cap_user_rdpmc : 1; + __u64 cap_user_time : 1; + __u64 cap_user_time_zero : 1; + __u64 cap_user_time_short : 1; + __u64 cap_____res : 58; + }; + }; + __u16 pmc_width; + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + __u64 time_zero; + __u32 size; + __u32 __reserved_1; + __u64 time_cycles; + __u64 time_mask; + __u8 __reserved[928]; + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; + +struct x86_pmu_capability { + int version; + int num_counters_gp; + int num_counters_fixed; + int bit_width_gp; + int bit_width_fixed; + unsigned int events_mask; + int events_mask_len; + unsigned int pebs_ept : 1; +}; + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct bpf_map_dev_ops; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); + int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); + int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_offloaded_map *, void *); +}; + +struct Qdisc_class_ops; + +struct gnet_dump; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff *(*dequeue)(struct Qdisc *); + struct sk_buff *(*peek)(struct Qdisc *); + int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*attach)(struct Qdisc *); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + void (*change_real_num_tx)(struct Qdisc *, unsigned int); + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *, u32); + void (*egress_block_set)(struct Qdisc *, u32); + u32 (*ingress_block_get)(struct Qdisc *); + u32 (*egress_block_get)(struct Qdisc *); + struct module *owner; +}; + +struct tcmsg; + +struct qdisc_walker; + +struct tcf_block; + +struct Qdisc_class_ops { + unsigned int flags; + struct netdev_queue *(*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **, + struct netlink_ext_ack *); + struct Qdisc *(*leaf)(struct Qdisc *, unsigned long); + void (*qlen_notify)(struct Qdisc *, unsigned long); + unsigned long (*find)(struct Qdisc *, u32); + int (*change)(struct Qdisc *, u32, u32, struct nlattr **, unsigned long *, + struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, unsigned long, struct netlink_ext_ack *); + void (*walk)(struct Qdisc *, struct qdisc_walker *); + struct tcf_block *(*tcf_block)(struct Qdisc *, unsigned long, struct netlink_ext_ack *); + unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32); + void (*unbind_tcf)(struct Qdisc *, unsigned long); + int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); + int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); +}; + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + unsigned short tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +struct flow_block { + struct list_head cb_list; +}; + +struct tcf_chain; + +struct tcf_block { + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[128]; + struct mutex proto_destroy_lock; +}; + +struct tcf_proto; + +struct tcf_proto_ops; + +struct tcf_chain { + struct mutex filter_chain_lock; + struct tcf_proto __attribute__((btf_type_tag("rcu"))) * filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +struct tcf_result; + +struct tcf_proto { + struct tcf_proto __attribute__((btf_type_tag("rcu"))) * next; + void __attribute__((btf_type_tag("rcu"))) * root; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + __be16 protocol; + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + spinlock_t lock; + bool deleting; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +enum skb_drop_reason { + SKB_NOT_DROPPED_YET = 0, + SKB_CONSUMED = 1, + SKB_DROP_REASON_NOT_SPECIFIED = 2, + SKB_DROP_REASON_NO_SOCKET = 3, + SKB_DROP_REASON_PKT_TOO_SMALL = 4, + SKB_DROP_REASON_TCP_CSUM = 5, + SKB_DROP_REASON_SOCKET_FILTER = 6, + SKB_DROP_REASON_UDP_CSUM = 7, + SKB_DROP_REASON_NETFILTER_DROP = 8, + SKB_DROP_REASON_OTHERHOST = 9, + SKB_DROP_REASON_IP_CSUM = 10, + SKB_DROP_REASON_IP_INHDR = 11, + SKB_DROP_REASON_IP_RPFILTER = 12, + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST = 13, + SKB_DROP_REASON_XFRM_POLICY = 14, + SKB_DROP_REASON_IP_NOPROTO = 15, + SKB_DROP_REASON_SOCKET_RCVBUFF = 16, + SKB_DROP_REASON_PROTO_MEM = 17, + SKB_DROP_REASON_TCP_AUTH_HDR = 18, + SKB_DROP_REASON_TCP_MD5NOTFOUND = 19, + SKB_DROP_REASON_TCP_MD5UNEXPECTED = 20, + SKB_DROP_REASON_TCP_MD5FAILURE = 21, + SKB_DROP_REASON_TCP_AONOTFOUND = 22, + SKB_DROP_REASON_TCP_AOUNEXPECTED = 23, + SKB_DROP_REASON_TCP_AOKEYNOTFOUND = 24, + SKB_DROP_REASON_TCP_AOFAILURE = 25, + SKB_DROP_REASON_SOCKET_BACKLOG = 26, + SKB_DROP_REASON_TCP_FLAGS = 27, + SKB_DROP_REASON_TCP_ZEROWINDOW = 28, + SKB_DROP_REASON_TCP_OLD_DATA = 29, + SKB_DROP_REASON_TCP_OVERWINDOW = 30, + SKB_DROP_REASON_TCP_OFOMERGE = 31, + SKB_DROP_REASON_TCP_RFC7323_PAWS = 32, + SKB_DROP_REASON_TCP_OLD_SEQUENCE = 33, + SKB_DROP_REASON_TCP_INVALID_SEQUENCE = 34, + SKB_DROP_REASON_TCP_RESET = 35, + SKB_DROP_REASON_TCP_INVALID_SYN = 36, + SKB_DROP_REASON_TCP_CLOSE = 37, + SKB_DROP_REASON_TCP_FASTOPEN = 38, + SKB_DROP_REASON_TCP_OLD_ACK = 39, + SKB_DROP_REASON_TCP_TOO_OLD_ACK = 40, + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA = 41, + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE = 42, + SKB_DROP_REASON_TCP_OFO_DROP = 43, + SKB_DROP_REASON_IP_OUTNOROUTES = 44, + SKB_DROP_REASON_BPF_CGROUP_EGRESS = 45, + SKB_DROP_REASON_IPV6DISABLED = 46, + SKB_DROP_REASON_NEIGH_CREATEFAIL = 47, + SKB_DROP_REASON_NEIGH_FAILED = 48, + SKB_DROP_REASON_NEIGH_QUEUEFULL = 49, + SKB_DROP_REASON_NEIGH_DEAD = 50, + SKB_DROP_REASON_TC_EGRESS = 51, + SKB_DROP_REASON_QDISC_DROP = 52, + SKB_DROP_REASON_CPU_BACKLOG = 53, + SKB_DROP_REASON_XDP = 54, + SKB_DROP_REASON_TC_INGRESS = 55, + SKB_DROP_REASON_UNHANDLED_PROTO = 56, + SKB_DROP_REASON_SKB_CSUM = 57, + SKB_DROP_REASON_SKB_GSO_SEG = 58, + SKB_DROP_REASON_SKB_UCOPY_FAULT = 59, + SKB_DROP_REASON_DEV_HDR = 60, + SKB_DROP_REASON_DEV_READY = 61, + SKB_DROP_REASON_FULL_RING = 62, + SKB_DROP_REASON_NOMEM = 63, + SKB_DROP_REASON_HDR_TRUNC = 64, + SKB_DROP_REASON_TAP_FILTER = 65, + SKB_DROP_REASON_TAP_TXFILTER = 66, + SKB_DROP_REASON_ICMP_CSUM = 67, + SKB_DROP_REASON_INVALID_PROTO = 68, + SKB_DROP_REASON_IP_INADDRERRORS = 69, + SKB_DROP_REASON_IP_INNOROUTES = 70, + SKB_DROP_REASON_PKT_TOO_BIG = 71, + SKB_DROP_REASON_DUP_FRAG = 72, + SKB_DROP_REASON_FRAG_REASM_TIMEOUT = 73, + SKB_DROP_REASON_FRAG_TOO_FAR = 74, + SKB_DROP_REASON_TCP_MINTTL = 75, + SKB_DROP_REASON_IPV6_BAD_EXTHDR = 76, + SKB_DROP_REASON_IPV6_NDISC_FRAG = 77, + SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT = 78, + SKB_DROP_REASON_IPV6_NDISC_BAD_CODE = 79, + SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS = 80, + SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST = 81, + SKB_DROP_REASON_QUEUE_PURGE = 82, + SKB_DROP_REASON_TC_ERROR = 83, + SKB_DROP_REASON_MAX = 84, + SKB_DROP_REASON_SUBSYS_MASK = 4294901760, +}; + +struct tcf_result { + union { + struct { + unsigned long class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + }; + enum skb_drop_reason drop_reason; +}; + +typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); + +struct tcf_walker; + +struct tcf_exts; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + int (*init)(struct tcf_proto *); + void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); + void *(*get)(struct tcf_proto *, u32); + void (*put)(struct tcf_proto *, void *); + int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, + u32, struct nlattr **, void **, u32, struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *); + void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); + int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, + struct netlink_ext_ack *); + void (*hw_add)(struct tcf_proto *, void *); + void (*hw_del)(struct tcf_proto *, void *); + void (*bind_class)(void *, u32, unsigned long, void *, unsigned long); + void *(*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, + struct netlink_ext_ack *); + void (*tmplt_destroy)(void *); + void (*tmplt_reoffload)(struct tcf_chain *, bool, flow_setup_cb_t *, void *); + struct tcf_exts *(*get_exts)(const struct tcf_proto *, u32); + int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, + struct tcmsg *, bool); + int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, + struct tcmsg *, bool); + int (*tmplt_dump)(struct sk_buff *, struct net *, void *); + struct module *owner; + int flags; +}; + +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; +}; + +struct gnet_dump { + spinlock_t *lock; + struct sk_buff *skb; + struct nlattr *tail; + int compat_tc_stats; + int compat_xstats; + int padattr; + void *xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[0]; +}; + +struct net_rate_estimator { + struct gnet_stats_basic_sync *bstats; + spinlock_t *stats_lock; + bool running; + struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) * cpu_bstats; + u8 ewma_log; + u8 intvl_log; + seqcount_t seq; + u64 last_packets; + u64 last_bytes; + u64 avpps; + u64 avbps; + unsigned long next_jiffies; + struct timer_list timer; + struct callback_head rcu; +}; + +struct perf_buffer { + refcount_t refcount; + struct callback_head callback_head; + int nr_pages; + int overwrite; + int paused; + atomic_t poll; + local_t head; + unsigned int nest; + local_t events; + local_t wakeup; + local_t lost; + long watermark; + long aux_watermark; + spinlock_t event_lock; + struct list_head event_list; + atomic_t mmap_count; + unsigned long mmap_locked; + struct user_struct *mmap_user; + long aux_head; + unsigned int aux_nest; + long aux_wakeup; + unsigned long aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + atomic_t aux_mmap_count; + unsigned long aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct perf_msr { + u64 msr; + struct attribute_group *grp; + bool (*test)(int, void *); + bool no_check; + u64 mask; +}; + +enum { + PERF_BR_UNKNOWN = 0, + PERF_BR_COND = 1, + PERF_BR_UNCOND = 2, + PERF_BR_IND = 3, + PERF_BR_CALL = 4, + PERF_BR_IND_CALL = 5, + PERF_BR_RET = 6, + PERF_BR_SYSCALL = 7, + PERF_BR_SYSRET = 8, + PERF_BR_COND_CALL = 9, + PERF_BR_COND_RET = 10, + PERF_BR_ERET = 11, + PERF_BR_IRQ = 12, + PERF_BR_SERROR = 13, + PERF_BR_NO_TX = 14, + PERF_BR_EXTEND_ABI = 15, + PERF_BR_MAX = 16, +}; + +enum { + X86_BR_NONE = 0, + X86_BR_USER = 1, + X86_BR_KERNEL = 2, + X86_BR_CALL = 4, + X86_BR_RET = 8, + X86_BR_SYSCALL = 16, + X86_BR_SYSRET = 32, + X86_BR_INT = 64, + X86_BR_IRET = 128, + X86_BR_JCC = 256, + X86_BR_JMP = 512, + X86_BR_IRQ = 1024, + X86_BR_IND_CALL = 2048, + X86_BR_ABORT = 4096, + X86_BR_IN_TX = 8192, + X86_BR_NO_TX = 16384, + X86_BR_ZERO_CALL = 32768, + X86_BR_CALL_STACK = 65536, + X86_BR_IND_JMP = 131072, + X86_BR_TYPE_SAVE = 262144, +}; + +typedef int insn_value_t; + +typedef unsigned char insn_byte_t; + +struct insn_field { + union { + insn_value_t value; + insn_byte_t bytes[4]; + }; + unsigned char got; + unsigned char nbytes; +}; + +typedef unsigned int insn_attr_t; + +struct insn { + struct insn_field prefixes; + struct insn_field rex_prefix; + struct insn_field vex_prefix; + struct insn_field opcode; + struct insn_field modrm; + struct insn_field sib; + struct insn_field displacement; + union { + struct insn_field immediate; + struct insn_field moffset1; + struct insn_field immediate1; + }; + union { + struct insn_field moffset2; + struct insn_field immediate2; + }; + int emulate_prefix_size; + insn_attr_t attr; + unsigned char opnd_bytes; + unsigned char addr_bytes; + unsigned char length; + unsigned char x86_64; + const insn_byte_t *kaddr; + const insn_byte_t *end_kaddr; + const insn_byte_t *next_byte; +}; + +union cpuid_0x80000022_ebx { + struct { + unsigned int num_core_pmc : 4; + unsigned int lbr_v2_stack_sz : 6; + unsigned int num_df_pmc : 6; + unsigned int num_umc_pmc : 6; + } split; + unsigned int full; +}; + +typedef void amd_pmu_branch_reset_t(); + +enum { + PERF_BR_SPEC_NA = 0, + PERF_BR_SPEC_WRONG_PATH = 1, + PERF_BR_NON_SPEC_CORRECT_PATH = 2, + PERF_BR_SPEC_CORRECT_PATH = 3, + PERF_BR_SPEC_MAX = 4, +}; + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 19, +}; + +struct branch_entry { + union { + struct { + u64 ip : 58; + u64 ip_sign_ext : 5; + u64 mispredict : 1; + } split; + u64 full; + } from; + union { + struct { + u64 ip : 58; + u64 ip_sign_ext : 3; + u64 reserved : 1; + u64 spec : 1; + u64 valid : 1; + } split; + u64 full; + } to; +}; + +union amd_debug_extn_cfg { + __u64 val; + struct { + __u64 rsvd0 : 2; + __u64 brsmen : 1; + __u64 rsvd4_3 : 2; + __u64 vb : 1; + __u64 rsvd2 : 10; + __u64 msroff : 4; + __u64 rsvd3 : 4; + __u64 pmc : 3; + __u64 rsvd4 : 37; + }; +}; + +typedef void (*exitcall_t)(); + +typedef u64 uint64_t; + +struct cpu_perf_ibs; + +struct perf_ibs { + struct pmu pmu; + unsigned int msr; + u64 config_mask; + u64 cnt_mask; + u64 enable_mask; + u64 valid_mask; + u64 max_period; + unsigned long offset_mask[1]; + int offset_max; + unsigned int fetch_count_reset_broken : 1; + unsigned int fetch_ignore_if_zero_rip : 1; + struct cpu_perf_ibs __attribute__((btf_type_tag("percpu"))) * pcpu; + u64 (*get_count)(u64); +}; + +struct cpu_perf_ibs { + struct perf_event *event; + unsigned long state[1]; +}; + +struct syscore_ops { + struct list_head node; + int (*suspend)(); + void (*resume)(); + void (*shutdown)(); +}; + +enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +}; + +enum ibs_states { + IBS_ENABLED = 0, + IBS_STARTED = 1, + IBS_STOPPING = 2, + IBS_STOPPED = 3, + IBS_MAX_STATES = 4, +}; + +union ibs_fetch_ctl { + __u64 val; + struct { + __u64 fetch_maxcnt : 16; + __u64 fetch_cnt : 16; + __u64 fetch_lat : 16; + __u64 fetch_en : 1; + __u64 fetch_val : 1; + __u64 fetch_comp : 1; + __u64 ic_miss : 1; + __u64 phy_addr_valid : 1; + __u64 l1tlb_pgsz : 2; + __u64 l1tlb_miss : 1; + __u64 l2tlb_miss : 1; + __u64 rand_en : 1; + __u64 fetch_l2_miss : 1; + __u64 l3_miss_only : 1; + __u64 fetch_oc_miss : 1; + __u64 fetch_l3_miss : 1; + __u64 reserved : 2; + }; +}; + +union ibs_op_ctl { + __u64 val; + struct { + __u64 opmaxcnt : 16; + __u64 l3_miss_only : 1; + __u64 op_en : 1; + __u64 op_val : 1; + __u64 cnt_ctl : 1; + __u64 opmaxcnt_ext : 7; + __u64 reserved0 : 5; + __u64 opcurcnt : 27; + __u64 reserved1 : 5; + }; +}; + +struct perf_ibs_data { + u32 size; + union { + u32 data[0]; + u32 caps; + }; + u64 regs[8]; +}; + +union ibs_op_data3 { + __u64 val; + struct { + __u64 ld_op : 1; + __u64 st_op : 1; + __u64 dc_l1tlb_miss : 1; + __u64 dc_l2tlb_miss : 1; + __u64 dc_l1tlb_hit_2m : 1; + __u64 dc_l1tlb_hit_1g : 1; + __u64 dc_l2tlb_hit_2m : 1; + __u64 dc_miss : 1; + __u64 dc_mis_acc : 1; + __u64 reserved : 4; + __u64 dc_wc_mem_acc : 1; + __u64 dc_uc_mem_acc : 1; + __u64 dc_locked_op : 1; + __u64 dc_miss_no_mab_alloc : 1; + __u64 dc_lin_addr_valid : 1; + __u64 dc_phy_addr_valid : 1; + __u64 dc_l2_tlb_hit_1g : 1; + __u64 l2_miss : 1; + __u64 sw_pf : 1; + __u64 op_mem_width : 4; + __u64 op_dc_miss_open_mem_reqs : 6; + __u64 dc_miss_lat : 16; + __u64 tlb_refill_lat : 16; + }; +}; + +union ibs_op_data2 { + __u64 val; + struct { + __u64 data_src_lo : 3; + __u64 reserved0 : 1; + __u64 rmt_node : 1; + __u64 cache_hit_st : 1; + __u64 data_src_hi : 2; + __u64 reserved1 : 56; + }; +}; + +union ibs_op_data { + __u64 val; + struct { + __u64 comp_to_ret_ctr : 16; + __u64 tag_to_ret_ctr : 16; + __u64 reserved1 : 2; + __u64 op_return : 1; + __u64 op_brn_taken : 1; + __u64 op_brn_misp : 1; + __u64 op_brn_ret : 1; + __u64 op_rip_invalid : 1; + __u64 op_brn_fuse : 1; + __u64 op_microcode : 1; + __u64 reserved2 : 23; + }; +}; + +typedef int pci_power_t; + +typedef unsigned int pci_channel_state_t; + +typedef phys_addr_t resource_size_t; + +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + unsigned long flags; + unsigned long desc; + struct resource *parent; + struct resource *sibling; + struct resource *child; +}; + +typedef unsigned short pci_dev_flags_t; + +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; +}; + +struct pci_bus; + +struct pci_slot; + +struct aer_stats; + +struct rcec_ea; + +struct pci_driver; + +struct pcie_link_state; + +struct pci_sriov; + +struct pci_p2pdma; + +struct pci_dev { + struct list_head bus_list; + struct pci_bus *bus; + struct pci_bus *subordinate; + void *sysdata; + struct proc_dir_entry *procent; + struct pci_slot *slot; + unsigned int devfn; + unsigned short vendor; + unsigned short device; + unsigned short subsystem_vendor; + unsigned short subsystem_device; + unsigned int class; + u8 revision; + u8 hdr_type; + u16 aer_cap; + struct aer_stats *aer_stats; + struct rcec_ea *rcec_ea; + struct pci_dev *rcec; + u32 devcap; + u8 pcie_cap; + u8 msi_cap; + u8 msix_cap; + u8 pcie_mpss : 3; + u8 rom_base_reg; + u8 pin; + u16 pcie_flags_reg; + unsigned long *dma_alias_mask; + struct pci_driver *driver; + u64 dma_mask; + struct device_dma_parameters dma_parms; + pci_power_t current_state; + u8 pm_cap; + unsigned int imm_ready : 1; + unsigned int pme_support : 5; + unsigned int pme_poll : 1; + unsigned int d1_support : 1; + unsigned int d2_support : 1; + unsigned int no_d1d2 : 1; + unsigned int no_d3cold : 1; + unsigned int bridge_d3 : 1; + unsigned int d3cold_allowed : 1; + unsigned int mmio_always_on : 1; + unsigned int wakeup_prepared : 1; + unsigned int skip_bus_pm : 1; + unsigned int ignore_hotplug : 1; + unsigned int hotplug_user_indicators : 1; + unsigned int clear_retrain_link : 1; + unsigned int d3hot_delay; + unsigned int d3cold_delay; + struct pcie_link_state *link_state; + u16 l1ss; + unsigned int ltr_path : 1; + unsigned int pasid_no_tlp : 1; + unsigned int eetlp_prefix_path : 1; + pci_channel_state_t error_state; + struct device dev; + int cfg_size; + unsigned int irq; + struct resource resource[17]; + struct resource driver_exclusive_resource; + bool match_driver; + unsigned int transparent : 1; + unsigned int io_window : 1; + unsigned int pref_window : 1; + unsigned int pref_64_window : 1; + unsigned int multifunction : 1; + unsigned int is_busmaster : 1; + unsigned int no_msi : 1; + unsigned int no_64bit_msi : 1; + unsigned int block_cfg_access : 1; + unsigned int broken_parity_status : 1; + unsigned int irq_reroute_variant : 2; + unsigned int msi_enabled : 1; + unsigned int msix_enabled : 1; + unsigned int ari_enabled : 1; + unsigned int ats_enabled : 1; + unsigned int pasid_enabled : 1; + unsigned int pri_enabled : 1; + unsigned int is_managed : 1; + unsigned int is_msi_managed : 1; + unsigned int needs_freset : 1; + unsigned int state_saved : 1; + unsigned int is_physfn : 1; + unsigned int is_virtfn : 1; + unsigned int is_hotplug_bridge : 1; + unsigned int shpc_managed : 1; + unsigned int is_thunderbolt : 1; + unsigned int untrusted : 1; + unsigned int external_facing : 1; + unsigned int broken_intx_masking : 1; + unsigned int io_window_1k : 1; + unsigned int irq_managed : 1; + unsigned int non_compliant_bars : 1; + unsigned int is_probed : 1; + unsigned int link_active_reporting : 1; + unsigned int no_vf_scan : 1; + unsigned int no_command_memory : 1; + unsigned int rom_bar_overlap : 1; + unsigned int rom_attr_enabled : 1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; + spinlock_t pcie_cap_lock; + u32 saved_config_space[16]; + struct hlist_head saved_cap_space; + struct bin_attribute *res_attr[17]; + struct bin_attribute *res_attr_wc[17]; + u16 ptm_cap; + unsigned int ptm_root : 1; + unsigned int ptm_enabled : 1; + u8 ptm_granularity; + void *msix_base; + raw_spinlock_t msi_lock; + struct pci_vpd vpd; + u16 dpc_cap; + unsigned int dpc_rp_extensions : 1; + u8 dpc_rp_log_size; + union { + struct pci_sriov *sriov; + struct pci_dev *physfn; + }; + u16 ats_cap; + u8 ats_stu; + u16 pri_cap; + u32 pri_reqs_alloc; + unsigned int pasid_required : 1; + u16 pasid_cap; + u16 pasid_features; + struct pci_p2pdma __attribute__((btf_type_tag("rcu"))) * p2pdma; + u16 acs_cap; + phys_addr_t rom; + size_t romlen; + const char *driver_override; + unsigned long priv_flags; + u8 reset_methods[7]; +}; + +typedef unsigned short pci_bus_flags_t; + +struct pci_ops; + +struct pci_bus { + struct list_head node; + struct pci_bus *parent; + struct list_head children; + struct list_head devices; + struct pci_dev *self; + struct list_head slots; + struct resource *resource[4]; + struct list_head resources; + struct resource busn_res; + struct pci_ops *ops; + void *sysdata; + struct proc_dir_entry *procdir; + unsigned char number; + unsigned char primary; + unsigned char max_bus_speed; + unsigned char cur_bus_speed; + char name[48]; + unsigned short bridge_ctl; + pci_bus_flags_t bus_flags; + struct device *bridge; + struct device dev; + struct bin_attribute *legacy_io; + struct bin_attribute *legacy_mem; + unsigned int is_added : 1; + unsigned int unsafe_warn : 1; +}; + +struct pci_ops { + int (*add_bus)(struct pci_bus *); + void (*remove_bus)(struct pci_bus *); + void *(*map_bus)(struct pci_bus *, unsigned int, int); + int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); + int (*write)(struct pci_bus *, unsigned int, int, int, u32); +}; + +struct hotplug_slot; + +struct pci_slot { + struct pci_bus *bus; + struct list_head list; + struct hotplug_slot *hotplug; + unsigned char number; + struct kobject kobj; +}; + +struct pci_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct pci_device_id; + +struct pci_error_handlers; + +struct pci_driver { + struct list_head node; + const char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *, const struct pci_device_id *); + void (*remove)(struct pci_dev *); + int (*suspend)(struct pci_dev *, pm_message_t); + int (*resume)(struct pci_dev *); + void (*shutdown)(struct pci_dev *); + int (*sriov_configure)(struct pci_dev *, int); + int (*sriov_set_msix_vec_count)(struct pci_dev *, int); + u32 (*sriov_get_vf_total_msix)(struct pci_dev *); + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + struct device_driver driver; + struct pci_dynids dynids; + bool driver_managed_dma; +}; + +struct pci_device_id { + __u32 vendor; + __u32 device; + __u32 subvendor; + __u32 subdevice; + __u32 class; + __u32 class_mask; + kernel_ulong_t driver_data; + __u32 override_only; +}; + +typedef unsigned int pci_ers_result_t; + +struct pci_error_handlers { + pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *); + pci_ers_result_t (*slot_reset)(struct pci_dev *); + void (*reset_prepare)(struct pci_dev *); + void (*reset_done)(struct pci_dev *); + void (*resume)(struct pci_dev *); + void (*cor_error_detected)(struct pci_dev *); +}; + +struct gen_pool; + +struct pci_p2pdma { + struct gen_pool *pool; + bool p2pmem_published; + struct xarray map_types; +}; + +union amd_uncore_info; + +struct amd_uncore_pmu; + +struct amd_uncore { + union amd_uncore_info *info; + struct amd_uncore_pmu *pmus; + unsigned int num_pmus; + bool init_done; + void (*scan)(struct amd_uncore *, unsigned int); + int (*init)(struct amd_uncore *, unsigned int); + void (*move)(struct amd_uncore *, unsigned int); + void (*free)(struct amd_uncore *, unsigned int); +}; + +union amd_uncore_info { + struct { + u64 aux_data : 32; + u64 num_pmcs : 8; + u64 gid : 8; + u64 cid : 8; + } split; + u64 full; +}; + +struct amd_uncore_ctx; + +struct amd_uncore_pmu { + char name[16]; + int num_counters; + int rdpmc_base; + u32 msr_base; + int group; + cpumask_t active_mask; + struct pmu pmu; + struct amd_uncore_ctx *__attribute__((btf_type_tag("percpu"))) * ctx; +}; + +struct amd_uncore_ctx { + int refcnt; + int cpu; + struct perf_event **events; + struct hlist_node node; +}; + +enum { + UNCORE_TYPE_DF = 0, + UNCORE_TYPE_L3 = 1, + UNCORE_TYPE_UMC = 2, + UNCORE_TYPE_MAX = 3, +}; + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context = 1, + perf_nr_task_contexts = 2, +}; + +struct amd_iommu_event_desc { + struct device_attribute attr; + const char *event; +}; + +struct amd_iommu; + +struct perf_amd_iommu { + struct list_head list; + struct pmu pmu; + struct amd_iommu *iommu; + char name[16]; + u8 max_banks; + u8 max_counters; + u64 cntr_assign_mask; + raw_spinlock_t lock; +}; + +enum perf_msr_id { + PERF_MSR_TSC = 0, + PERF_MSR_APERF = 1, + PERF_MSR_MPERF = 2, + PERF_MSR_PPERF = 3, + PERF_MSR_SMI = 4, + PERF_MSR_PTSC = 5, + PERF_MSR_IRPERF = 6, + PERF_MSR_THERM = 7, + PERF_MSR_EVENT_MAX = 8, +}; + +struct real_mode_header { + u32 text_start; + u32 ro_end; + u32 trampoline_start; + u32 trampoline_header; + u32 sev_es_trampoline_start; + u32 trampoline_start64; + u32 trampoline_pgd; + u32 wakeup_start; + u32 wakeup_header; + u32 machine_real_restart_asm; + u32 machine_real_restart_seg; +}; + +struct trampoline_header { + u64 start; + u64 efer; + u32 cr4; + u32 flags; + u32 lock; +}; + +struct idt_bits { + u16 ist : 3; + u16 zero : 5; + u16 type : 5; + u16 dpl : 2; + u16 p : 1; +}; + +struct gate_struct { + u16 offset_low; + u16 segment; + struct idt_bits bits; + u16 offset_middle; + u32 offset_high; + u32 reserved; +}; + +typedef struct gate_struct gate_desc; + +struct screen_info { + __u8 orig_x; + __u8 orig_y; + __u16 ext_mem_k; + __u16 orig_video_page; + __u8 orig_video_mode; + __u8 orig_video_cols; + __u8 flags; + __u8 unused2; + __u16 orig_video_ega_bx; + __u16 unused3; + __u8 orig_video_lines; + __u8 orig_video_isVGA; + __u16 orig_video_points; + __u16 lfb_width; + __u16 lfb_height; + __u16 lfb_depth; + __u32 lfb_base; + __u32 lfb_size; + __u16 cl_magic; + __u16 cl_offset; + __u16 lfb_linelength; + __u8 red_size; + __u8 red_pos; + __u8 green_size; + __u8 green_pos; + __u8 blue_size; + __u8 blue_pos; + __u8 rsvd_size; + __u8 rsvd_pos; + __u16 vesapm_seg; + __u16 vesapm_off; + __u16 pages; + __u16 vesa_attributes; + __u32 capabilities; + __u32 ext_lfb_base; + __u8 _reserved[2]; +} __attribute__((packed)); + +struct apm_bios_info { + __u16 version; + __u16 cseg; + __u32 offset; + __u16 cseg_16; + __u16 dseg; + __u16 flags; + __u16 cseg_len; + __u16 cseg_16_len; + __u16 dseg_len; +}; + +struct ist_info { + __u32 signature; + __u32 command; + __u32 event; + __u32 perf_level; +}; + +struct sys_desc_table { + __u16 length; + __u8 table[14]; +}; + +struct olpc_ofw_header { + __u32 ofw_magic; + __u32 ofw_version; + __u32 cif_handler; + __u32 irq_desc_table; +}; + +struct edid_info { + unsigned char dummy[128]; +}; + +struct efi_info { + __u32 efi_loader_signature; + __u32 efi_systab; + __u32 efi_memdesc_size; + __u32 efi_memdesc_version; + __u32 efi_memmap; + __u32 efi_memmap_size; + __u32 efi_systab_hi; + __u32 efi_memmap_hi; +}; + +struct setup_header { + __u8 setup_sects; + __u16 root_flags; + __u32 syssize; + __u16 ram_size; + __u16 vid_mode; + __u16 root_dev; + __u16 boot_flag; + __u16 jump; + __u32 header; + __u16 version; + __u32 realmode_swtch; + __u16 start_sys_seg; + __u16 kernel_version; + __u8 type_of_loader; + __u8 loadflags; + __u16 setup_move_size; + __u32 code32_start; + __u32 ramdisk_image; + __u32 ramdisk_size; + __u32 bootsect_kludge; + __u16 heap_end_ptr; + __u8 ext_loader_ver; + __u8 ext_loader_type; + __u32 cmd_line_ptr; + __u32 initrd_addr_max; + __u32 kernel_alignment; + __u8 relocatable_kernel; + __u8 min_alignment; + __u16 xloadflags; + __u32 cmdline_size; + __u32 hardware_subarch; + __u64 hardware_subarch_data; + __u32 payload_offset; + __u32 payload_length; + __u64 setup_data; + __u64 pref_address; + __u32 init_size; + __u32 handover_offset; + __u32 kernel_info_offset; +} __attribute__((packed)); + +struct boot_e820_entry { + __u64 addr; + __u64 size; + __u32 type; +} __attribute__((packed)); + +struct edd_device_params { + __u16 length; + __u16 info_flags; + __u32 num_default_cylinders; + __u32 num_default_heads; + __u32 sectors_per_track; + __u64 number_of_sectors; + __u16 bytes_per_sector; + __u32 dpte_ptr; + __u16 key; + __u8 device_path_info_length; + __u8 reserved2; + __u16 reserved3; + __u8 host_bus_type[4]; + __u8 interface_type[8]; + union { + struct { + __u16 base_address; + __u16 reserved1; + __u32 reserved2; + } isa; + struct { + __u8 bus; + __u8 slot; + __u8 function; + __u8 channel; + __u32 reserved; + } pci; + struct { + __u64 reserved; + } ibnd; + struct { + __u64 reserved; + } xprs; + struct { + __u64 reserved; + } htpt; + struct { + __u64 reserved; + } unknown; + } interface_path; + union { + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } ata; + struct { + __u8 device; + __u8 lun; + __u8 reserved1; + __u8 reserved2; + __u32 reserved3; + __u64 reserved4; + } atapi; + struct { + __u16 id; + __u64 lun; + __u16 reserved1; + __u32 reserved2; + } __attribute__((packed)) scsi; + struct { + __u64 serial_number; + __u64 reserved; + } usb; + struct { + __u64 eui; + __u64 reserved; + } i1394; + struct { + __u64 wwid; + __u64 lun; + } fibre; + struct { + __u64 identity_tag; + __u64 reserved; + } i2o; + struct { + __u32 array_number; + __u32 reserved1; + __u64 reserved2; + } raid; + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } sata; + struct { + __u64 reserved1; + __u64 reserved2; + } unknown; + } device_path; + __u8 reserved4; + __u8 checksum; +} __attribute__((packed)); + +struct edd_info { + __u8 device; + __u8 version; + __u16 interface_support; + __u16 legacy_max_cylinder; + __u8 legacy_max_head; + __u8 legacy_sectors_per_track; + struct edd_device_params params; +}; + +struct boot_params { + struct screen_info screen_info; + struct apm_bios_info apm_bios_info; + __u8 _pad2[4]; + __u64 tboot_addr; + struct ist_info ist_info; + __u64 acpi_rsdp_addr; + __u8 _pad3[8]; + __u8 hd0_info[16]; + __u8 hd1_info[16]; + struct sys_desc_table sys_desc_table; + struct olpc_ofw_header olpc_ofw_header; + __u32 ext_ramdisk_image; + __u32 ext_ramdisk_size; + __u32 ext_cmd_line_ptr; + __u8 _pad4[112]; + __u32 cc_blob_address; + struct edid_info edid_info; + struct efi_info efi_info; + __u32 alt_mem_k; + __u32 scratch; + __u8 e820_entries; + __u8 eddbuf_entries; + __u8 edd_mbr_sig_buf_entries; + __u8 kbd_status; + __u8 secure_boot; + __u8 _pad5[2]; + __u8 sentinel; + __u8 _pad6[1]; + struct setup_header hdr; + __u8 _pad7[36]; + __u32 edd_mbr_sig_buffer[16]; + struct boot_e820_entry e820_table[128]; + __u8 _pad8[48]; + struct edd_info eddbuf[6]; + __u8 _pad9[276]; +}; + +enum x86_hardware_subarch { + X86_SUBARCH_PC = 0, + X86_SUBARCH_LGUEST = 1, + X86_SUBARCH_XEN = 2, + X86_SUBARCH_INTEL_MID = 3, + X86_SUBARCH_CE4100 = 4, + X86_NR_SUBARCHS = 5, +}; + +enum { + GATE_INTERRUPT = 14, + GATE_TRAP = 15, + GATE_CALL = 12, + GATE_TASK = 5, +}; + +struct boot_params_to_save { + unsigned int start; + unsigned int len; +}; + +struct idt_data { + unsigned int vector; + unsigned int segment; + struct idt_bits bits; + const void *addr; +}; + +enum x86_legacy_i8042_state { + X86_LEGACY_I8042_PLATFORM_ABSENT = 0, + X86_LEGACY_I8042_FIRMWARE_ABSENT = 1, + X86_LEGACY_I8042_EXPECTED_PRESENT = 2, +}; + +enum show_regs_mode { + SHOW_REGS_SHORT = 0, + SHOW_REGS_USER = 1, + SHOW_REGS_ALL = 2, +}; + +enum { + UNAME26 = 131072, + ADDR_NO_RANDOMIZE = 262144, + FDPIC_FUNCPTRS = 524288, + MMAP_PAGE_ZERO = 1048576, + ADDR_COMPAT_LAYOUT = 2097152, + READ_IMPLIES_EXEC = 4194304, + ADDR_LIMIT_32BIT = 8388608, + SHORT_INODE = 16777216, + WHOLE_SECONDS = 33554432, + STICKY_TIMEOUTS = 67108864, + ADDR_LIMIT_3GB = 134217728, +}; + +enum which_selector { + FS = 0, + GS = 1, +}; + +enum xfeature { + XFEATURE_FP = 0, + XFEATURE_SSE = 1, + XFEATURE_YMM = 2, + XFEATURE_BNDREGS = 3, + XFEATURE_BNDCSR = 4, + XFEATURE_OPMASK = 5, + XFEATURE_ZMM_Hi256 = 6, + XFEATURE_Hi16_ZMM = 7, + XFEATURE_PT_UNIMPLEMENTED_SO_FAR = 8, + XFEATURE_PKRU = 9, + XFEATURE_PASID = 10, + XFEATURE_CET_USER = 11, + XFEATURE_CET_KERNEL_UNUSED = 12, + XFEATURE_RSRVD_COMP_13 = 13, + XFEATURE_RSRVD_COMP_14 = 14, + XFEATURE_LBR = 15, + XFEATURE_RSRVD_COMP_16 = 16, + XFEATURE_XTILE_CFG = 17, + XFEATURE_XTILE_DATA = 18, + XFEATURE_MAX = 19, +}; + +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = 0, + RSEQ_EVENT_SIGNAL_BIT = 1, + RSEQ_EVENT_MIGRATE_BIT = 2, +}; + +struct kernel_vm86_regs { + struct pt_regs pt; + unsigned short es; + unsigned short __esh; + unsigned short ds; + unsigned short __dsh; + unsigned short fs; + unsigned short __fsh; + unsigned short gs; + unsigned short __gsh; +}; + +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; + +struct __large_struct { + unsigned long buf[100]; +}; + +struct sigaltstack { + void __attribute__((btf_type_tag("user"))) * ss_sp; + int ss_flags; + __kernel_size_t ss_size; +}; + +typedef struct sigaltstack stack_t; + +struct sigcontext_64 { + __u64 r8; + __u64 r9; + __u64 r10; + __u64 r11; + __u64 r12; + __u64 r13; + __u64 r14; + __u64 r15; + __u64 di; + __u64 si; + __u64 bp; + __u64 bx; + __u64 dx; + __u64 ax; + __u64 cx; + __u64 sp; + __u64 ip; + __u64 flags; + __u16 cs; + __u16 gs; + __u16 fs; + __u16 ss; + __u64 err; + __u64 trapno; + __u64 oldmask; + __u64 cr2; + __u64 fpstate; + __u64 reserved1[8]; +}; + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext_64 uc_mcontext; + sigset_t uc_sigmask; +}; + +struct siginfo { + union { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; + int _si_pad[32]; + }; +}; + +struct rt_sigframe { + char __attribute__((btf_type_tag("user"))) * pretcode; + struct ucontext uc; + struct siginfo info; +}; + +typedef struct siginfo siginfo_t; + +enum die_val { + DIE_OOPS = 1, + DIE_INT3 = 2, + DIE_DEBUG = 3, + DIE_PANIC = 4, + DIE_NMI = 5, + DIE_DIE = 6, + DIE_KERNELDEBUG = 7, + DIE_TRAP = 8, + DIE_GPF = 9, + DIE_CALL = 10, + DIE_PAGE_FAULT = 11, + DIE_NMIUNKNOWN = 12, +}; + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +enum kernel_gp_hint { + GP_NO_HINT = 0, + GP_NON_CANONICAL = 1, + GP_CANONICAL = 2, +}; + +enum insn_mode { + INSN_MODE_32 = 0, + INSN_MODE_64 = 1, + INSN_MODE_KERN = 2, + INSN_NUM_MODES = 3, +}; + +typedef u8 kprobe_opcode_t; + +struct kprobe; + +typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); + +typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, unsigned long); + +struct arch_specific_insn { + kprobe_opcode_t *insn; + unsigned int boostable : 1; + unsigned char size; + union { + unsigned char opcode; + struct { + unsigned char type; + } jcc; + struct { + unsigned char type; + unsigned char asize; + } loop; + struct { + unsigned char reg; + } indirect; + }; + s32 rel32; + void (*emulate_op)(struct kprobe *, struct pt_regs *); + int tp_len; +}; + +struct kprobe { + struct hlist_node hlist; + struct list_head list; + unsigned long nmissed; + kprobe_opcode_t *addr; + const char *symbol_name; + unsigned int offset; + kprobe_pre_handler_t pre_handler; + kprobe_post_handler_t post_handler; + kprobe_opcode_t opcode; + struct arch_specific_insn ainsn; + u32 flags; +}; + +typedef void (*btf_trace_local_timer_entry)(void *, int); + +typedef void (*btf_trace_local_timer_exit)(void *, int); + +typedef void (*btf_trace_spurious_apic_entry)(void *, int); + +typedef void (*btf_trace_spurious_apic_exit)(void *, int); + +typedef void (*btf_trace_error_apic_entry)(void *, int); + +typedef void (*btf_trace_error_apic_exit)(void *, int); + +typedef void (*btf_trace_x86_platform_ipi_entry)(void *, int); + +typedef void (*btf_trace_x86_platform_ipi_exit)(void *, int); + +typedef void (*btf_trace_irq_work_entry)(void *, int); + +typedef void (*btf_trace_irq_work_exit)(void *, int); + +typedef void (*btf_trace_reschedule_entry)(void *, int); + +typedef void (*btf_trace_reschedule_exit)(void *, int); + +typedef void (*btf_trace_call_function_entry)(void *, int); + +typedef void (*btf_trace_call_function_exit)(void *, int); + +typedef void (*btf_trace_call_function_single_entry)(void *, int); + +typedef void (*btf_trace_call_function_single_exit)(void *, int); + +typedef void (*btf_trace_threshold_apic_entry)(void *, int); + +typedef void (*btf_trace_threshold_apic_exit)(void *, int); + +typedef void (*btf_trace_deferred_error_apic_entry)(void *, int); + +typedef void (*btf_trace_deferred_error_apic_exit)(void *, int); + +typedef void (*btf_trace_vector_config)(void *, unsigned int, unsigned int, unsigned int, + unsigned int); + +typedef void (*btf_trace_vector_update)(void *, unsigned int, unsigned int, unsigned int, + unsigned int, unsigned int); + +typedef void (*btf_trace_vector_clear)(void *, unsigned int, unsigned int, unsigned int, + unsigned int, unsigned int); + +typedef void (*btf_trace_vector_reserve_managed)(void *, unsigned int, int); + +typedef void (*btf_trace_vector_reserve)(void *, unsigned int, int); + +typedef void (*btf_trace_vector_alloc)(void *, unsigned int, unsigned int, bool, int); + +typedef void (*btf_trace_vector_alloc_managed)(void *, unsigned int, unsigned int, int); + +typedef void (*btf_trace_vector_activate)(void *, unsigned int, bool, bool, bool); + +typedef void (*btf_trace_vector_deactivate)(void *, unsigned int, bool, bool, bool); + +typedef void (*btf_trace_vector_teardown)(void *, unsigned int, bool, bool); + +typedef void (*btf_trace_vector_setup)(void *, unsigned int, bool, int); + +typedef void (*btf_trace_vector_free_moved)(void *, unsigned int, unsigned int, + unsigned int, bool); + +typedef struct { + unsigned int __nmi_count; + unsigned int apic_timer_irqs; + unsigned int irq_spurious_count; + unsigned int icr_read_retry_count; + unsigned int kvm_posted_intr_ipis; + unsigned int kvm_posted_intr_wakeup_ipis; + unsigned int kvm_posted_intr_nested_ipis; + unsigned int x86_platform_ipis; + unsigned int apic_perf_irqs; + unsigned int apic_irq_work_irqs; + unsigned int irq_resched_count; + unsigned int irq_call_count; + unsigned int irq_tlb_count; + unsigned int irq_threshold_count; + unsigned int irq_deferred_error_count; + long : 0; +} irq_cpustat_t; + +struct trace_event_raw_x86_irq_vector { + struct trace_entry ent; + int vector; + char __data[0]; +}; + +struct trace_event_raw_vector_config { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + unsigned int cpu; + unsigned int apicdest; + char __data[0]; +}; + +struct trace_event_raw_vector_mod { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + unsigned int cpu; + unsigned int prev_vector; + unsigned int prev_cpu; + char __data[0]; +}; + +struct trace_event_raw_vector_reserve { + struct trace_entry ent; + unsigned int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_alloc { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + bool reserved; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_alloc_managed { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_activate { + struct trace_entry ent; + unsigned int irq; + bool is_managed; + bool can_reserve; + bool reserve; + char __data[0]; +}; + +struct trace_event_raw_vector_teardown { + struct trace_entry ent; + unsigned int irq; + bool is_managed; + bool has_reserved; + char __data[0]; +}; + +struct trace_event_raw_vector_setup { + struct trace_entry ent; + unsigned int irq; + bool is_legacy; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_free_moved { + struct trace_entry ent; + unsigned int irq; + unsigned int cpu; + unsigned int vector; + bool is_managed; + char __data[0]; +}; + +struct trace_event_data_offsets_x86_irq_vector {}; + +struct trace_event_data_offsets_vector_config {}; + +struct trace_event_data_offsets_vector_mod {}; + +struct trace_event_data_offsets_vector_reserve {}; + +struct trace_event_data_offsets_vector_alloc {}; + +struct trace_event_data_offsets_vector_alloc_managed {}; + +struct trace_event_data_offsets_vector_activate {}; + +struct trace_event_data_offsets_vector_teardown {}; + +struct trace_event_data_offsets_vector_setup {}; + +struct trace_event_data_offsets_vector_free_moved {}; + +struct irq_stack { + char stack[16384]; +}; + +struct estack_pages { + u32 offs; + u16 size; + u16 type; +}; + +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER = 1, + CSID_MAX = 2, +}; + +struct clocksource { + u64 (*read)(struct clocksource *); + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; + u32 uncertainty_margin; + u64 max_cycles; + const char *name; + struct list_head list; + int rating; + enum clocksource_ids id; + enum vdso_clock_mode vdso_clock_mode; + unsigned long flags; + int (*enable)(struct clocksource *); + void (*disable)(struct clocksource *); + void (*suspend)(struct clocksource *); + void (*resume)(struct clocksource *); + void (*mark_unstable)(struct clocksource *); + void (*tick_stable)(struct clocksource *); + struct list_head wd_list; + u64 cs_last; + u64 wd_last; + struct module *owner; +}; + +enum lockdown_reason { + LOCKDOWN_NONE = 0, + LOCKDOWN_MODULE_SIGNATURE = 1, + LOCKDOWN_DEV_MEM = 2, + LOCKDOWN_EFI_TEST = 3, + LOCKDOWN_KEXEC = 4, + LOCKDOWN_HIBERNATION = 5, + LOCKDOWN_PCI_ACCESS = 6, + LOCKDOWN_IOPORT = 7, + LOCKDOWN_MSR = 8, + LOCKDOWN_ACPI_TABLES = 9, + LOCKDOWN_DEVICE_TREE = 10, + LOCKDOWN_PCMCIA_CIS = 11, + LOCKDOWN_TIOCSSERIAL = 12, + LOCKDOWN_MODULE_PARAMETERS = 13, + LOCKDOWN_MMIOTRACE = 14, + LOCKDOWN_DEBUGFS = 15, + LOCKDOWN_XMON_WR = 16, + LOCKDOWN_BPF_WRITE_USER = 17, + LOCKDOWN_DBG_WRITE_KERNEL = 18, + LOCKDOWN_RTAS_ERROR_INJECTION = 19, + LOCKDOWN_INTEGRITY_MAX = 20, + LOCKDOWN_KCORE = 21, + LOCKDOWN_KPROBES = 22, + LOCKDOWN_BPF_READ_KERNEL = 23, + LOCKDOWN_DBG_READ_KERNEL = 24, + LOCKDOWN_PERF = 25, + LOCKDOWN_TRACEFS = 26, + LOCKDOWN_XMON_RW = 27, + LOCKDOWN_XFRM_SECRET = 28, + LOCKDOWN_CONFIDENTIALITY_MAX = 29, +}; + +enum lockdep_ok { + LOCKDEP_STILL_OK = 0, + LOCKDEP_NOW_UNRELIABLE = 1, +}; + +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF = 0, + KMSG_DUMP_PANIC = 1, + KMSG_DUMP_OOPS = 2, + KMSG_DUMP_EMERG = 3, + KMSG_DUMP_SHUTDOWN = 4, + KMSG_DUMP_MAX = 5, +}; + +typedef void (*btf_trace_nmi_handler)(void *, void *, s64, int); + +struct nmi_stats { + unsigned int normal; + unsigned int unknown; + unsigned int external; + unsigned int swallow; + unsigned long recv_jiffies; + unsigned long idt_seq; + unsigned long idt_nmi_seq; + unsigned long idt_ignored; + atomic_long_t idt_calls; + unsigned long idt_seq_snap; + unsigned long idt_nmi_seq_snap; + unsigned long idt_ignored_snap; + long idt_calls_snap; +}; + +enum nmi_states { + NMI_NOT_RUNNING = 0, + NMI_EXECUTING = 1, + NMI_LATCHED = 2, +}; + +struct nmi_desc { + raw_spinlock_t lock; + struct list_head head; +}; + +struct trace_event_raw_nmi_handler { + struct trace_entry ent; + void *handler; + s64 delta_ns; + int handled; + char __data[0]; +}; + +struct trace_event_data_offsets_nmi_handler {}; + +typedef struct ldttss_desc ldt_desc; + +struct encoded_page; + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct encoded_page *encoded_pages[0]; +}; + +struct mmu_gather { + struct mm_struct *mm; + unsigned long start; + unsigned long end; + unsigned int fullmm : 1; + unsigned int need_flush_all : 1; + unsigned int freed_tables : 1; + unsigned int delayed_rmap : 1; + unsigned int cleared_ptes : 1; + unsigned int cleared_pmds : 1; + unsigned int cleared_puds : 1; + unsigned int cleared_p4ds : 1; + unsigned int vma_exec : 1; + unsigned int vma_huge : 1; + unsigned int vma_pfn : 1; + unsigned int batch_count; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[8]; +}; + +struct user_desc { + unsigned int entry_number; + unsigned int base_addr; + unsigned int limit; + unsigned int seg_32bit : 1; + unsigned int contents : 2; + unsigned int read_exec_only : 1; + unsigned int limit_in_pages : 1; + unsigned int seg_not_present : 1; + unsigned int useable : 1; + unsigned int lm : 1; +}; + +enum efi_secureboot_mode { + efi_secureboot_mode_unset = 0, + efi_secureboot_mode_unknown = 1, + efi_secureboot_mode_disabled = 2, + efi_secureboot_mode_enabled = 3, +}; + +enum e820_type { + E820_TYPE_RAM = 1, + E820_TYPE_RESERVED = 2, + E820_TYPE_ACPI = 3, + E820_TYPE_NVS = 4, + E820_TYPE_UNUSABLE = 5, + E820_TYPE_PMEM = 7, + E820_TYPE_PRAM = 12, + E820_TYPE_SOFT_RESERVED = 4026531839, + E820_TYPE_RESERVED_KERN = 128, +}; + +struct setup_indirect { + __u32 type; + __u32 reserved; + __u64 len; + __u64 addr; +}; + +struct e820_entry { + u64 addr; + u64 size; + enum e820_type type; +} __attribute__((packed)); + +struct e820_table { + __u32 nr_entries; + struct e820_entry entries[320]; +}; + +struct setup_data { + __u64 next; + __u32 type; + __u32 len; + __u8 data[0]; +}; + +struct x86_init_resources { + void (*probe_roms)(); + void (*reserve_resources)(); + char *(*memory_setup)(); +}; + +struct x86_init_mpparse { + void (*setup_ioapic_ids)(); + void (*find_smp_config)(); + void (*get_smp_config)(unsigned int); +}; + +struct x86_init_irqs { + void (*pre_vector_init)(); + void (*intr_init)(); + void (*intr_mode_select)(); + void (*intr_mode_init)(); + struct irq_domain *(*create_pci_msi_domain)(); +}; + +struct x86_init_oem { + void (*arch_setup)(); + void (*banner)(); +}; + +struct x86_init_paging { + void (*pagetable_init)(); +}; + +struct x86_init_timers { + void (*setup_percpu_clockev)(); + void (*timer_init)(); + void (*wallclock_init)(); +}; + +struct x86_init_iommu { + int (*iommu_init)(); +}; + +struct x86_init_pci { + int (*arch_init)(); + int (*init)(); + void (*init_irq)(); + void (*fixup_irqs)(); +}; + +struct x86_hyper_init { + void (*init_platform)(); + void (*guest_late_init)(); + bool (*x2apic_available)(); + bool (*msi_ext_dest_id)(); + void (*init_mem_mapping)(); + void (*init_after_bootmem)(); +}; + +struct x86_init_acpi { + void (*set_root_pointer)(u64); + u64 (*get_root_pointer)(); + void (*reduced_hw_early_init)(); +}; + +struct x86_init_ops { + struct x86_init_resources resources; + struct x86_init_mpparse mpparse; + struct x86_init_irqs irqs; + struct x86_init_oem oem; + struct x86_init_paging paging; + struct x86_init_timers timers; + struct x86_init_iommu iommu; + struct x86_init_pci pci; + struct x86_hyper_init hyper; + struct x86_init_acpi acpi; +}; + +struct x86_cpuinit_ops { + void (*setup_percpu_clockev)(); + void (*early_percpu_clock_init)(); + void (*fixup_cpu_id)(struct cpuinfo_x86 *, int); + bool parallel_bringup; +}; + +struct x86_legacy_devices { + int pnpbios; +}; + +struct x86_legacy_features { + enum x86_legacy_i8042_state i8042; + int rtc; + int warm_reset; + int no_vga; + int reserve_bios_regions; + struct x86_legacy_devices devices; +}; + +struct ghcb; + +struct x86_hyper_runtime { + void (*pin_vcpu)(int); + void (*sev_es_hcall_prepare)(struct ghcb *, struct pt_regs *); + bool (*sev_es_hcall_finish)(struct ghcb *, struct pt_regs *); + bool (*is_private_mmio)(u64); +}; + +struct x86_guest { + bool (*enc_status_change_prepare)(unsigned long, int, bool); + bool (*enc_status_change_finish)(unsigned long, int, bool); + bool (*enc_tlb_flush_required)(bool); + bool (*enc_cache_flush_required)(); +}; + +struct x86_platform_ops { + unsigned long (*calibrate_cpu)(); + unsigned long (*calibrate_tsc)(); + void (*get_wallclock)(struct timespec64 *); + int (*set_wallclock)(const struct timespec64 *); + void (*iommu_shutdown)(); + bool (*is_untracked_pat_range)(u64, u64); + void (*nmi_init)(); + unsigned char (*get_nmi_reason)(); + void (*save_sched_clock_state)(); + void (*restore_sched_clock_state)(); + void (*apic_post_init)(); + struct x86_legacy_features legacy; + void (*set_legacy_features)(); + void (*realmode_reserve)(); + void (*realmode_init)(); + struct x86_hyper_runtime hyper; + struct x86_guest guest; +}; + +struct x86_apic_ops { + unsigned int (*io_apic_read)(unsigned int, unsigned int); + void (*restore)(); +}; + +struct legacy_pic { + int nr_legacy_irqs; + struct irq_chip *chip; + void (*mask)(unsigned int); + void (*unmask)(unsigned int); + void (*mask_all)(); + void (*restore_mask)(); + void (*init)(int); + int (*probe)(); + int (*irq_pending)(unsigned int); + void (*make_irq)(unsigned int); +}; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, + IRQ_NO_DEBUG = 2097152, +}; + +typedef struct irq_desc *vector_irq_t[256]; + +union text_poke_insn { + u8 text[5]; + struct { + u8 opcode; + s32 disp; + } __attribute__((packed)); +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +struct jump_label_patch { + const void *code; + int size; +}; + +enum psc_op { + SNP_PAGE_STATE_PRIVATE = 1, + SNP_PAGE_STATE_SHARED = 2, +}; + +enum page_cache_mode { + _PAGE_CACHE_MODE_WB = 0, + _PAGE_CACHE_MODE_WC = 1, + _PAGE_CACHE_MODE_UC_MINUS = 2, + _PAGE_CACHE_MODE_UC = 3, + _PAGE_CACHE_MODE_WT = 4, + _PAGE_CACHE_MODE_WP = 5, + _PAGE_CACHE_MODE_NUM = 8, +}; + +struct kstatfs { + long f_type; + long f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long f_namelen; + long f_frsize; + long f_flags; + long f_spare[4]; +}; + +struct stat64 { + unsigned long long st_dev; + unsigned char __pad0[4]; + unsigned int __st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned long long st_rdev; + unsigned char __pad3[4]; + long long st_size; + unsigned int st_blksize; + long long st_blocks; + unsigned int st_atime; + unsigned int st_atime_nsec; + unsigned int st_mtime; + unsigned int st_mtime_nsec; + unsigned int st_ctime; + unsigned int st_ctime_nsec; + unsigned long long st_ino; +} __attribute__((packed)); + +struct mmap_arg_struct32 { + unsigned int addr; + unsigned int len; + unsigned int prot; + unsigned int flags; + unsigned int fd; + unsigned int offset; +}; + +struct kernel_clone_args { + u64 flags; + int __attribute__((btf_type_tag("user"))) * pidfd; + int __attribute__((btf_type_tag("user"))) * child_tid; + int __attribute__((btf_type_tag("user"))) * parent_tid; + const char *name; + int exit_signal; + u32 kthread : 1; + u32 io_thread : 1; + u32 user_worker : 1; + u32 no_files : 1; + unsigned long stack; + unsigned long stack_size; + unsigned long tls; + pid_t *set_tid; + size_t set_tid_size; + int cgroup; + int idle; + int (*fn)(void *); + void *fn_arg; + struct cgroup *cgrp; + struct css_set *cset; +}; + +typedef u32 compat_sigset_word; + +typedef struct { + compat_sigset_word sig[2]; +} compat_sigset_t; + +typedef u32 compat_size_t; + +struct sigcontext_32 { + __u16 gs; + __u16 __gsh; + __u16 fs; + __u16 __fsh; + __u16 es; + __u16 __esh; + __u16 ds; + __u16 __dsh; + __u32 di; + __u32 si; + __u32 bp; + __u32 sp; + __u32 bx; + __u32 dx; + __u32 cx; + __u32 ax; + __u32 trapno; + __u32 err; + __u32 ip; + __u16 cs; + __u16 __csh; + __u32 flags; + __u32 sp_at_signal; + __u16 ss; + __u16 __ssh; + __u32 fpstate; + __u32 oldmask; + __u32 cr2; +}; + +struct _fpreg { + __u16 significand[4]; + __u16 exponent; +}; + +struct _fpxreg { + __u16 significand[4]; + __u16 exponent; + __u16 padding[3]; +}; + +struct _xmmreg { + __u32 element[4]; +}; + +struct _fpx_sw_bytes { + __u32 magic1; + __u32 extended_size; + __u64 xfeatures; + __u32 xstate_size; + __u32 padding[7]; +}; + +struct _fpstate_32 { + __u32 cw; + __u32 sw; + __u32 tag; + __u32 ipoff; + __u32 cssel; + __u32 dataoff; + __u32 datasel; + struct _fpreg _st[8]; + __u16 status; + __u16 magic; + __u32 _fxsr_env[6]; + __u32 mxcsr; + __u32 reserved; + struct _fpxreg _fxsr_st[8]; + struct _xmmreg _xmm[8]; + union { + __u32 padding1[44]; + __u32 padding[44]; + }; + union { + __u32 padding2[12]; + struct _fpx_sw_bytes sw_reserved; + }; +}; + +struct sigframe_ia32 { + u32 pretcode; + int sig; + struct sigcontext_32 sc; + struct _fpstate_32 fpstate_unused; + unsigned int extramask[1]; + char retcode[8]; +}; + +typedef s32 compat_pid_t; + +typedef u32 __compat_uid32_t; + +typedef s32 compat_timer_t; + +typedef s32 compat_int_t; + +union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +}; + +typedef union compat_sigval compat_sigval_t; + +typedef s32 compat_clock_t; + +typedef u32 compat_ulong_t; + +struct compat_siginfo { + int si_signo; + int si_errno; + int si_code; + union { + int _pad[29]; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + } _kill; + struct { + compat_timer_t _tid; + int _overrun; + compat_sigval_t _sigval; + } _timer; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + compat_sigval_t _sigval; + } _rt; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + int _status; + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + struct { + compat_uptr_t _addr; + union { + int _trapno; + short _addr_lsb; + struct { + char _dummy_bnd[4]; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + struct { + char _dummy_pkey[4]; + u32 _pkey; + } _addr_pkey; + struct { + compat_ulong_t _data; + u32 _type; + u32 _flags; + } _perf; + }; + } _sigfault; + struct { + compat_long_t _band; + int _fd; + } _sigpoll; + struct { + compat_uptr_t _call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; + } _sifields; +}; + +typedef struct compat_siginfo compat_siginfo_t; + +struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +}; + +typedef struct compat_sigaltstack compat_stack_t; + +struct ucontext_ia32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + struct sigcontext_32 uc_mcontext; + compat_sigset_t uc_sigmask; +}; + +struct rt_sigframe_ia32 { + u32 pretcode; + int sig; + u32 pinfo; + u32 puc; + compat_siginfo_t info; + struct ucontext_ia32 uc; + char retcode[8]; +}; + +enum align_flags { + ALIGN_VA_32 = 1, + ALIGN_VA_64 = 2, +}; + +struct vm_unmapped_area_info { + unsigned long flags; + unsigned long length; + unsigned long low_limit; + unsigned long high_limit; + unsigned long align_mask; + unsigned long align_offset; +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); + ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); +}; + +enum { + MEMREMAP_WB = 1, + MEMREMAP_WT = 2, + MEMREMAP_WC = 4, + MEMREMAP_ENC = 8, + MEMREMAP_DEC = 16, +}; + +struct change_member { + struct e820_entry *entry; + unsigned long long addr; +}; + +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, + IORES_DESC_CXL = 9, +}; + +typedef int (*cmp_func_t)(const void *, const void *); + +typedef void (*swap_func_t)(void *, void *, int); + +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; + +struct scatterlist { + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; + unsigned int dma_length; + unsigned int dma_flags; +}; + +enum { + NONE_FORCE_HPET_RESUME = 0, + OLD_ICH_FORCE_HPET_RESUME = 1, + ICH_FORCE_HPET_RESUME = 2, + VT8237_FORCE_HPET_RESUME = 3, + NVIDIA_FORCE_HPET_RESUME = 4, + ATI_FORCE_HPET_RESUME = 5, +}; + +enum dmi_field { + DMI_NONE = 0, + DMI_BIOS_VENDOR = 1, + DMI_BIOS_VERSION = 2, + DMI_BIOS_DATE = 3, + DMI_BIOS_RELEASE = 4, + DMI_EC_FIRMWARE_RELEASE = 5, + DMI_SYS_VENDOR = 6, + DMI_PRODUCT_NAME = 7, + DMI_PRODUCT_VERSION = 8, + DMI_PRODUCT_SERIAL = 9, + DMI_PRODUCT_UUID = 10, + DMI_PRODUCT_SKU = 11, + DMI_PRODUCT_FAMILY = 12, + DMI_BOARD_VENDOR = 13, + DMI_BOARD_NAME = 14, + DMI_BOARD_VERSION = 15, + DMI_BOARD_SERIAL = 16, + DMI_BOARD_ASSET_TAG = 17, + DMI_CHASSIS_VENDOR = 18, + DMI_CHASSIS_TYPE = 19, + DMI_CHASSIS_VERSION = 20, + DMI_CHASSIS_SERIAL = 21, + DMI_CHASSIS_ASSET_TAG = 22, + DMI_STRING_MAX = 23, + DMI_OEM_STRING = 24, +}; + +struct acpi_device; + +struct pci_sysdata { + int domain; + int node; + struct acpi_device *companion; + void *iommu; + void *fwnode; + struct pci_dev *nvme_remap_dev; +}; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +struct x86_cpu { + struct cpu cpu; +}; + +struct debugfs_blob_wrapper { + void *data; + unsigned long size; +}; + +struct setup_data_node { + u64 paddr; + u32 type; + u32 len; +}; + +struct text_poke_loc { + s32 rel_addr; + s32 disp; + u8 len; + u8 opcode; + const u8 text[5]; + u8 old; +}; + +struct bp_patching_desc { + struct text_poke_loc *vec; + int nr_entries; + atomic_t refs; +}; + +struct smp_alt_module { + struct module *mod; + char *name; + const s32 *locks; + const s32 *locks_end; + u8 *text; + u8 *text_end; + struct list_head next; +}; + +typedef u8 retpoline_thunk_t[32]; + +typedef struct { + struct mm_struct *mm; +} temp_mm_state_t; + +typedef void text_poke_f(void *, const void *, size_t); + +struct die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN = 1, + CLOCK_EVT_STATE_PERIODIC = 2, + CLOCK_EVT_STATE_ONESHOT = 3, + CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, +}; + +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(unsigned long, struct clock_event_device *); + int (*set_next_ktime)(ktime_t, struct clock_event_device *); + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + unsigned long retries; + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + unsigned long min_delta_ticks; + unsigned long max_delta_ticks; + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = 3, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = 7, +}; + +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_3 = 3, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_5 = 5, + HW_BREAKPOINT_LEN_6 = 6, + HW_BREAKPOINT_LEN_7 = 7, + HW_BREAKPOINT_LEN_8 = 8, +}; + +struct tlb_context { + u64 ctx_id; + u64 tlb_gen; +}; + +struct tlb_state { + struct mm_struct *loaded_mm; + union { + struct mm_struct *last_user_mm; + unsigned long last_user_mm_spec; + }; + u16 loaded_mm_asid; + u16 next_asid; + bool invalidate_other; + u8 lam; + unsigned short user_pcid_flush_mask; + unsigned long cr4; + struct tlb_context ctxs[6]; +}; + +typedef struct { + seqcount_t seqcount; +} seqcount_latch_t; + +struct cyc2ns { + struct cyc2ns_data data[2]; + seqcount_latch_t seq; +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED = 0, + PM_QOS_MAX = 1, + PM_QOS_MIN = 2, +}; + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX = 2, +}; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED = 0, + CPUFREQ_TABLE_SORTED_ASCENDING = 1, + CPUFREQ_TABLE_SORTED_DESCENDING = 2, +}; + +enum { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_INACTIVE_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + WORK_STRUCT_COLOR_SHIFT = 4, + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PENDING = 1, + WORK_STRUCT_INACTIVE = 2, + WORK_STRUCT_PWQ = 4, + WORK_STRUCT_LINKED = 8, + WORK_STRUCT_STATIC = 0, + WORK_NR_COLORS = 16, + WORK_CPU_UNBOUND = 64, + WORK_STRUCT_FLAG_BITS = 8, + WORK_OFFQ_FLAG_BASE = 4, + __WORK_OFFQ_CANCELING = 4, + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_POOL_SHIFT = 5, + WORK_OFFQ_LEFT = 59, + WORK_OFFQ_POOL_BITS = 31, + WORK_BUSY_PENDING = 1, + WORK_BUSY_RUNNING = 2, + WORKER_DESC_LEN = 24, +}; + +typedef unsigned long long cycles_t; + +typedef unsigned int u_int; + +struct system_counterval_t { + u64 cycles; + struct clocksource *cs; +}; + +struct cpufreq_policy; + +struct cpufreq_freqs { + struct cpufreq_policy *policy; + unsigned int old; + unsigned int new; + u8 flags; +}; + +struct clk; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct plist_head { + struct list_head node_list; +}; + +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct cpufreq_governor; + +struct freq_qos_request; + +struct cpufreq_frequency_table; + +struct cpufreq_stats; + +struct thermal_cooling_device; + +struct cpufreq_policy { + cpumask_var_t cpus; + cpumask_var_t related_cpus; + cpumask_var_t real_cpus; + unsigned int shared_type; + unsigned int cpu; + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo; + unsigned int min; + unsigned int max; + unsigned int cur; + unsigned int suspend_freq; + unsigned int policy; + unsigned int last_policy; + struct cpufreq_governor *governor; + void *governor_data; + char last_governor[16]; + struct work_struct update; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + struct rw_semaphore rwsem; + bool fast_switch_possible; + bool fast_switch_enabled; + bool strict_target; + bool efficiencies_available; + unsigned int transition_delay_us; + bool dvfs_possible_from_any_cpu; + bool boost_enabled; + unsigned int cached_target_freq; + unsigned int cached_resolved_idx; + bool transition_ongoing; + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; + struct cpufreq_stats *stats; + void *driver_data; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct cpufreq_governor { + char name[16]; + int (*init)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*start)(struct cpufreq_policy *); + void (*stop)(struct cpufreq_policy *); + void (*limits)(struct cpufreq_policy *); + ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); + int (*store_setspeed)(struct cpufreq_policy *, unsigned int); + struct list_head governor_list; + struct module *owner; + u8 flags; +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; + unsigned int frequency; +}; + +struct muldiv { + u32 multiplier; + u32 divider; +}; + +struct freq_desc { + bool use_msr_plat; + struct muldiv muldiv[16]; + u32 freqs[16]; + u32 mask; +}; + +struct mfd_cell; + +struct pdev_archdata {}; + +struct platform_device_id; + +struct platform_device { + const char *name; + int id; + bool id_auto; + struct device dev; + u64 platform_dma_mask; + struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + const struct platform_device_id *id_entry; + const char *driver_override; + struct mfd_cell *mfd_cell; + struct pdev_archdata archdata; +}; + +struct platform_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct pnp_protocol; + +struct pnp_card; + +struct pnp_driver; + +struct pnp_card_link; + +struct pnp_id; + +struct pnp_dev { + struct device dev; + u64 dma_mask; + unsigned int number; + int status; + struct list_head global_list; + struct list_head protocol_list; + struct list_head card_list; + struct list_head rdev_list; + struct pnp_protocol *protocol; + struct pnp_card *card; + struct pnp_driver *driver; + struct pnp_card_link *card_link; + struct pnp_id *id; + int active; + int capabilities; + unsigned int num_dependent_sets; + struct list_head resources; + struct list_head options; + char name[50]; + int flags; + struct proc_dir_entry *procent; + void *data; +}; + +struct pnp_protocol { + struct list_head protocol_list; + char *name; + int (*get)(struct pnp_dev *); + int (*set)(struct pnp_dev *); + int (*disable)(struct pnp_dev *); + bool (*can_wakeup)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + unsigned char number; + struct device dev; + struct list_head cards; + struct list_head devices; +}; + +struct pnp_card { + struct device dev; + unsigned char number; + struct list_head global_list; + struct list_head protocol_list; + struct list_head devices; + struct pnp_protocol *protocol; + struct pnp_id *id; + char name[50]; + unsigned char pnpver; + unsigned char productver; + unsigned int serial; + unsigned char checksum; + struct proc_dir_entry *procdir; +}; + +struct pnp_id { + char id[8]; + struct pnp_id *next; +}; + +struct pnp_device_id; + +struct pnp_driver { + const char *name; + const struct pnp_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_dev *, const struct pnp_device_id *); + void (*remove)(struct pnp_dev *); + void (*shutdown)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + struct device_driver driver; +}; + +struct pnp_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; +}; + +struct pnp_card_driver; + +struct pnp_card_link { + struct pnp_card *card; + struct pnp_card_driver *driver; + void *driver_data; + pm_message_t pm_state; +}; + +struct pnp_card_device_id; + +struct pnp_card_driver { + struct list_head global_list; + char *name; + const struct pnp_card_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *); + void (*remove)(struct pnp_card_link *); + int (*suspend)(struct pnp_card_link *, pm_message_t); + int (*resume)(struct pnp_card_link *); + struct pnp_driver link; +}; + +struct pnp_card_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; + struct { + __u8 id[8]; + } devs[8]; +}; + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +enum insn_type { + CALL = 0, + NOP = 1, + JMP = 2, + RET = 3, + JCC = 4, +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; +}; + +struct dev_pm_qos_request; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct freq_constraints freq; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE = 2, + DEV_PM_QOS_MIN_FREQUENCY = 3, + DEV_PM_QOS_MAX_FREQUENCY = 4, + DEV_PM_QOS_FLAGS = 5, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + struct freq_qos_request freq; + } data; + struct device *dev; +}; + +struct ssb_state { + struct ssb_state *shared_state; + raw_spinlock_t lock; + unsigned int disable_state; + unsigned long local_state; +}; + +enum idle_boot_override { + IDLE_NO_OVERRIDE = 0, + IDLE_HALT = 1, + IDLE_NOMWAIT = 2, + IDLE_POLL = 3, +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT = 0, + TICK_BROADCAST_ENTER = 1, +}; + +enum tick_broadcast_mode { + TICK_BROADCAST_OFF = 0, + TICK_BROADCAST_ON = 1, + TICK_BROADCAST_FORCE = 2, +}; + +struct inactive_task_frame { + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long bx; + unsigned long bp; + unsigned long ret_addr; +}; + +struct fork_frame { + struct inactive_task_frame frame; + struct pt_regs regs; +}; + +typedef void (*btf_trace_x86_fpu_before_save)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_after_save)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_before_restore)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_after_restore)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_regs_activated)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_regs_deactivated)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_init_state)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_dropped)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_copy_src)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_copy_dst)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_xstate_check_failed)(void *, struct fpu *); + +struct fpu_state_config { + unsigned int max_size; + unsigned int default_size; + u64 max_features; + u64 default_features; + u64 legacy_features; +}; + +enum xstate_copy_mode { + XSTATE_COPY_FP = 0, + XSTATE_COPY_FX = 1, + XSTATE_COPY_XSAVE = 2, +}; + +struct trace_event_raw_x86_fpu { + struct trace_entry ent; + struct fpu *fpu; + bool load_fpu; + u64 xfeatures; + u64 xcomp_bv; + char __data[0]; +}; + +struct fpu_guest { + u64 xfeatures; + u64 perm; + u64 xfd_err; + unsigned int uabi_size; + struct fpstate *fpstate; +}; + +struct membuf { + void *p; + size_t left; +}; + +struct cet_user_state { + u64 user_cet; + u64 user_ssp; +}; + +struct trace_event_data_offsets_x86_fpu {}; + +struct user_regset; + +typedef int +user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); + +typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, + unsigned int, unsigned int, const void *, + const void __attribute__((btf_type_tag("user"))) *); + +typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); + +typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); + +struct user_regset { + user_regset_get2_fn *regset_get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +struct user_i387_ia32_struct { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; +}; + +struct pkru_state { + u32 pkru; + u32 pad; +}; + +struct pt_regs_offset { + const char *name; + int offset; +}; + +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +enum x86_regset_32 { + REGSET32_GENERAL = 0, + REGSET32_FP = 1, + REGSET32_XFP = 2, + REGSET32_XSTATE = 3, + REGSET32_TLS = 4, + REGSET32_IOPERM = 5, +}; + +enum x86_regset_64 { + REGSET64_GENERAL = 0, + REGSET64_FP = 1, + REGSET64_IOPERM = 2, + REGSET64_XSTATE = 3, + REGSET64_SSP = 4, +}; + +enum { + FOLL_WRITE = 1, + FOLL_GET = 2, + FOLL_DUMP = 4, + FOLL_FORCE = 8, + FOLL_NOWAIT = 16, + FOLL_NOFAULT = 32, + FOLL_HWPOISON = 64, + FOLL_ANON = 128, + FOLL_LONGTERM = 256, + FOLL_SPLIT_PMD = 512, + FOLL_PCI_P2PDMA = 1024, + FOLL_INTERRUPTIBLE = 2048, + FOLL_HONOR_NUMA_FAULT = 4096, +}; + +struct stack_frame_user { + const void __attribute__((btf_type_tag("user"))) * next_fp; + unsigned long ret_addr; +}; + +typedef bool (*stack_trace_consume_fn)(void *, unsigned long); + +struct _cache_table { + unsigned char descriptor; + char cache_type; + short size; +}; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = 1, + CACHE_TYPE_DATA = 2, + CACHE_TYPE_SEPARATE = 3, + CACHE_TYPE_UNIFIED = 4, +}; + +enum _cache_type { + CTYPE_NULL = 0, + CTYPE_DATA = 1, + CTYPE_INST = 2, + CTYPE_UNIFIED = 3, +}; + +union _cpuid4_leaf_eax { + struct { + enum _cache_type type : 5; + unsigned int level : 3; + unsigned int is_self_initializing : 1; + unsigned int is_fully_associative : 1; + unsigned int reserved : 4; + unsigned int num_threads_sharing : 12; + unsigned int num_cores_on_die : 6; + } split; + u32 full; +}; + +union _cpuid4_leaf_ebx { + struct { + unsigned int coherency_line_size : 12; + unsigned int physical_line_partition : 10; + unsigned int ways_of_associativity : 10; + } split; + u32 full; +}; + +union _cpuid4_leaf_ecx { + struct { + unsigned int number_of_sets : 32; + } split; + u32 full; +}; + +union l1_cache { + struct { + unsigned int line_size : 8; + unsigned int lines_per_tag : 8; + unsigned int assoc : 8; + unsigned int size_in_kb : 8; + }; + unsigned int val; +}; + +union l2_cache { + struct { + unsigned int line_size : 8; + unsigned int lines_per_tag : 4; + unsigned int assoc : 4; + unsigned int size_in_kb : 16; + }; + unsigned int val; +}; + +union l3_cache { + struct { + unsigned int line_size : 8; + unsigned int lines_per_tag : 4; + unsigned int assoc : 4; + unsigned int res : 2; + unsigned int size_encoded : 14; + }; + unsigned int val; +}; + +struct cacheinfo; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int per_cpu_data_slice_size; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; + bool early_ci_levels; +}; + +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct amd_northbridge; + +struct _cpuid4_info_regs { + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + unsigned int id; + unsigned long size; + struct amd_northbridge *nb; +}; + +struct amd_l3_cache { + unsigned int indices; + u8 subcaches[4]; +}; + +struct threshold_bank; + +struct amd_northbridge { + struct pci_dev *root; + struct pci_dev *misc; + struct pci_dev *link; + struct amd_l3_cache l3_cache; + struct threshold_bank *bank4; +}; + +struct threshold_block; + +struct threshold_bank { + struct kobject *kobj; + struct threshold_block *blocks; + refcount_t cpus; + unsigned int shared; +}; + +struct threshold_block { + unsigned int block; + unsigned int bank; + unsigned int cpu; + u32 address; + u16 interrupt_enable; + bool interrupt_capable; + u16 threshold_limit; + struct kobject kobj; + struct list_head miscj; +}; + +typedef int (*cpu_stop_fn_t)(void *); + +struct cpuid_bit { + u16 feature; + u8 reg; + u8 bit; + u32 level; + u32 sub_leaf; +}; + +enum cpuid_regs_idx { + CPUID_EAX = 0, + CPUID_EBX = 1, + CPUID_ECX = 2, + CPUID_EDX = 3, +}; + +struct pcpu_hot { + union { + struct { + struct task_struct *current_task; + int preempt_count; + int cpu_number; + unsigned long top_of_stack; + void *hardirq_stack_ptr; + u16 softirq_pending; + bool hardirq_stack_inuse; + }; + u8 pad[64]; + }; +}; + +struct fixed_percpu_data { + char gs_base[40]; + unsigned long stack_canary; +}; + +struct cpu_dev { + const char *c_vendor; + const char *c_ident[2]; + void (*c_early_init)(struct cpuinfo_x86 *); + void (*c_bsp_init)(struct cpuinfo_x86 *); + void (*c_init)(struct cpuinfo_x86 *); + void (*c_identify)(struct cpuinfo_x86 *); + void (*c_detect_tlb)(struct cpuinfo_x86 *); + int c_x86_vendor; +}; + +struct cpuid_dependent_feature { + u32 feature; + u32 level; +}; + +struct ppin_info { + int feature; + int msr_ppin_ctl; + int msr_ppin; +}; + +enum cpuid_leafs { + CPUID_1_EDX = 0, + CPUID_8000_0001_EDX = 1, + CPUID_8086_0001_EDX = 2, + CPUID_LNX_1 = 3, + CPUID_1_ECX = 4, + CPUID_C000_0001_EDX = 5, + CPUID_8000_0001_ECX = 6, + CPUID_LNX_2 = 7, + CPUID_LNX_3 = 8, + CPUID_7_0_EBX = 9, + CPUID_D_1_EAX = 10, + CPUID_LNX_4 = 11, + CPUID_7_1_EAX = 12, + CPUID_8000_0008_EBX = 13, + CPUID_6_EAX = 14, + CPUID_8000_000A_EDX = 15, + CPUID_7_ECX = 16, + CPUID_8000_0007_EBX = 17, + CPUID_7_EDX = 18, + CPUID_8000_001F_EAX = 19, + CPUID_8000_0021_EAX = 20, +}; + +enum tlb_infos { + ENTRIES = 0, + NR_INFO = 1, +}; + +enum { + SAMPLES = 8, + MIN_CHANGE = 5, +}; + +struct x86_cpu_desc { + u8 x86_family; + u8 x86_vendor; + u8 x86_model; + u8 x86_stepping; + u32 x86_microcode_rev; +}; + +enum spectre_v2_mitigation { + SPECTRE_V2_NONE = 0, + SPECTRE_V2_RETPOLINE = 1, + SPECTRE_V2_LFENCE = 2, + SPECTRE_V2_EIBRS = 3, + SPECTRE_V2_EIBRS_RETPOLINE = 4, + SPECTRE_V2_EIBRS_LFENCE = 5, + SPECTRE_V2_IBRS = 6, +}; + +enum l1tf_mitigations { + L1TF_MITIGATION_OFF = 0, + L1TF_MITIGATION_FLUSH_NOWARN = 1, + L1TF_MITIGATION_FLUSH = 2, + L1TF_MITIGATION_FLUSH_NOSMT = 3, + L1TF_MITIGATION_FULL = 4, + L1TF_MITIGATION_FULL_FORCE = 5, +}; + +enum vmx_l1d_flush_state { + VMENTER_L1D_FLUSH_AUTO = 0, + VMENTER_L1D_FLUSH_NEVER = 1, + VMENTER_L1D_FLUSH_COND = 2, + VMENTER_L1D_FLUSH_ALWAYS = 3, + VMENTER_L1D_FLUSH_EPT_DISABLED = 4, + VMENTER_L1D_FLUSH_NOT_REQUIRED = 5, +}; + +enum srbds_mitigations { + SRBDS_MITIGATION_OFF = 0, + SRBDS_MITIGATION_UCODE_NEEDED = 1, + SRBDS_MITIGATION_FULL = 2, + SRBDS_MITIGATION_TSX_OFF = 3, + SRBDS_MITIGATION_HYPERVISOR = 4, +}; + +enum l1d_flush_mitigations { + L1D_FLUSH_OFF = 0, + L1D_FLUSH_ON = 1, +}; + +enum gds_mitigations { + GDS_MITIGATION_OFF = 0, + GDS_MITIGATION_UCODE_NEEDED = 1, + GDS_MITIGATION_FORCE = 2, + GDS_MITIGATION_FULL = 3, + GDS_MITIGATION_FULL_LOCKED = 4, + GDS_MITIGATION_HYPERVISOR = 5, +}; + +enum spectre_v1_mitigation { + SPECTRE_V1_MITIGATION_NONE = 0, + SPECTRE_V1_MITIGATION_AUTO = 1, +}; + +enum retbleed_mitigation_cmd { + RETBLEED_CMD_OFF = 0, + RETBLEED_CMD_AUTO = 1, + RETBLEED_CMD_UNRET = 2, + RETBLEED_CMD_IBPB = 3, + RETBLEED_CMD_STUFF = 4, +}; + +enum retbleed_mitigation { + RETBLEED_MITIGATION_NONE = 0, + RETBLEED_MITIGATION_UNRET = 1, + RETBLEED_MITIGATION_IBPB = 2, + RETBLEED_MITIGATION_IBRS = 3, + RETBLEED_MITIGATION_EIBRS = 4, + RETBLEED_MITIGATION_STUFF = 5, +}; + +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE = 0, + SPECTRE_V2_CMD_AUTO = 1, + SPECTRE_V2_CMD_FORCE = 2, + SPECTRE_V2_CMD_RETPOLINE = 3, + SPECTRE_V2_CMD_RETPOLINE_GENERIC = 4, + SPECTRE_V2_CMD_RETPOLINE_LFENCE = 5, + SPECTRE_V2_CMD_EIBRS = 6, + SPECTRE_V2_CMD_EIBRS_RETPOLINE = 7, + SPECTRE_V2_CMD_EIBRS_LFENCE = 8, + SPECTRE_V2_CMD_IBRS = 9, +}; + +enum spectre_v2_user_cmd { + SPECTRE_V2_USER_CMD_NONE = 0, + SPECTRE_V2_USER_CMD_AUTO = 1, + SPECTRE_V2_USER_CMD_FORCE = 2, + SPECTRE_V2_USER_CMD_PRCTL = 3, + SPECTRE_V2_USER_CMD_PRCTL_IBPB = 4, + SPECTRE_V2_USER_CMD_SECCOMP = 5, + SPECTRE_V2_USER_CMD_SECCOMP_IBPB = 6, +}; + +enum spectre_v2_user_mitigation { + SPECTRE_V2_USER_NONE = 0, + SPECTRE_V2_USER_STRICT = 1, + SPECTRE_V2_USER_STRICT_PREFERRED = 2, + SPECTRE_V2_USER_PRCTL = 3, + SPECTRE_V2_USER_SECCOMP = 4, +}; + +enum mds_mitigations { + MDS_MITIGATION_OFF = 0, + MDS_MITIGATION_FULL = 1, + MDS_MITIGATION_VMWERV = 2, +}; + +enum taa_mitigations { + TAA_MITIGATION_OFF = 0, + TAA_MITIGATION_UCODE_NEEDED = 1, + TAA_MITIGATION_VERW = 2, + TAA_MITIGATION_TSX_DISABLED = 3, +}; + +enum mmio_mitigations { + MMIO_MITIGATION_OFF = 0, + MMIO_MITIGATION_UCODE_NEEDED = 1, + MMIO_MITIGATION_VERW = 2, +}; + +enum ssb_mitigation_cmd { + SPEC_STORE_BYPASS_CMD_NONE = 0, + SPEC_STORE_BYPASS_CMD_AUTO = 1, + SPEC_STORE_BYPASS_CMD_ON = 2, + SPEC_STORE_BYPASS_CMD_PRCTL = 3, + SPEC_STORE_BYPASS_CMD_SECCOMP = 4, +}; + +enum ssb_mitigation { + SPEC_STORE_BYPASS_NONE = 0, + SPEC_STORE_BYPASS_DISABLE = 1, + SPEC_STORE_BYPASS_PRCTL = 2, + SPEC_STORE_BYPASS_SECCOMP = 3, +}; + +enum srso_mitigation_cmd { + SRSO_CMD_OFF = 0, + SRSO_CMD_MICROCODE = 1, + SRSO_CMD_SAFE_RET = 2, + SRSO_CMD_IBPB = 3, + SRSO_CMD_IBPB_ON_VMEXIT = 4, +}; + +enum srso_mitigation { + SRSO_MITIGATION_NONE = 0, + SRSO_MITIGATION_UCODE_NEEDED = 1, + SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED = 2, + SRSO_MITIGATION_MICROCODE = 3, + SRSO_MITIGATION_SAFE_RET = 4, + SRSO_MITIGATION_IBPB = 5, + SRSO_MITIGATION_IBPB_ON_VMEXIT = 6, +}; + +enum cpuhp_smt_control { + CPU_SMT_ENABLED = 0, + CPU_SMT_DISABLED = 1, + CPU_SMT_FORCE_DISABLED = 2, + CPU_SMT_NOT_SUPPORTED = 3, + CPU_SMT_NOT_IMPLEMENTED = 4, +}; + +enum x86_hypervisor_type { + X86_HYPER_NATIVE = 0, + X86_HYPER_VMWARE = 1, + X86_HYPER_MS_HYPERV = 2, + X86_HYPER_XEN_PV = 3, + X86_HYPER_XEN_HVM = 4, + X86_HYPER_KVM = 5, + X86_HYPER_JAILHOUSE = 6, + X86_HYPER_ACRN = 7, +}; + +struct aperfmperf { + seqcount_t seq; + unsigned long last_update; + u64 acnt; + u64 mcnt; + u64 aperf; + u64 mperf; +}; + +struct cpuid_dep { + unsigned int feature; + unsigned int depends; +}; + +struct klist_node; + +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +}; + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead : 1; +}; + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +struct mce; + +typedef void (*btf_trace_mce_record)(void *, struct mce *); + +struct mce { + __u64 status; + __u64 misc; + __u64 addr; + __u64 mcgstatus; + __u64 ip; + __u64 tsc; + __u64 time; + __u8 cpuvendor; + __u8 inject_flags; + __u8 severity; + __u8 pad; + __u32 cpuid; + __u8 cs; + __u8 bank; + __u8 cpu; + __u8 finished; + __u32 extcpu; + __u32 socketid; + __u32 apicid; + __u64 mcgcap; + __u64 synd; + __u64 ipid; + __u64 ppin; + __u32 microcode; + __u64 kflags; +}; + +struct mca_config { + __u64 lmce_disabled : 1; + __u64 disabled : 1; + __u64 ser : 1; + __u64 recovery : 1; + __u64 bios_cmci_threshold : 1; + __u64 initialized : 1; + __u64 __reserved : 58; + bool dont_log_ce; + bool cmci_disabled; + bool ignore_ce; + bool print_all; + int monarch_timeout; + int panic_timeout; + u32 rip_msr; + s8 bootlog; +}; + +typedef unsigned long mce_banks_t[1]; + +struct mce_bank { + u64 ctl; + __u64 init : 1; + __u64 lsb_in_status : 1; + __u64 __reserved_1 : 62; +}; + +struct mce_vendor_flags { + __u64 overflow_recov : 1; + __u64 succor : 1; + __u64 smca : 1; + __u64 zen_ifu_quirk : 1; + __u64 amd_threshold : 1; + __u64 p5 : 1; + __u64 winchip : 1; + __u64 snb_ifu_quirk : 1; + __u64 skx_repmov_quirk : 1; + __u64 __reserved_0 : 55; +}; + +struct mce_bank_dev { + struct device_attribute attr; + char attrname[16]; + u8 bank; +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +enum mce_notifier_prios { + MCE_PRIO_LOWEST = 0, + MCE_PRIO_MCELOG = 1, + MCE_PRIO_EDAC = 2, + MCE_PRIO_NFIT = 3, + MCE_PRIO_EXTLOG = 4, + MCE_PRIO_UC = 5, + MCE_PRIO_EARLY = 6, + MCE_PRIO_CEC = 7, + MCE_PRIO_HIGHEST = 7, +}; + +enum mcp_flags { + MCP_TIMESTAMP = 1, + MCP_UC = 2, + MCP_DONTLOG = 4, + MCP_QUEUE_LOG = 8, +}; + +enum mca_msr { + MCA_CTL = 0, + MCA_STATUS = 1, + MCA_ADDR = 2, + MCA_MISC = 3, +}; + +enum severity_level { + MCE_NO_SEVERITY = 0, + MCE_DEFERRED_SEVERITY = 1, + MCE_UCNA_SEVERITY = 1, + MCE_KEEP_SEVERITY = 2, + MCE_SOME_SEVERITY = 3, + MCE_AO_SEVERITY = 4, + MCE_UC_SEVERITY = 5, + MCE_AR_SEVERITY = 6, + MCE_PANIC_SEVERITY = 7, +}; + +enum task_work_notify_mode { + TWA_NONE = 0, + TWA_RESUME = 1, + TWA_SIGNAL = 2, + TWA_SIGNAL_NO_IPI = 3, +}; + +enum mf_flags { + MF_COUNT_INCREASED = 1, + MF_ACTION_REQUIRED = 2, + MF_MUST_KILL = 4, + MF_SOFT_OFFLINE = 8, + MF_UNPOISON = 16, + MF_SW_SIMULATED = 32, + MF_NO_RETRY = 64, +}; + +struct trace_event_raw_mce_record { + struct trace_entry ent; + u64 mcgcap; + u64 mcgstatus; + u64 status; + u64 addr; + u64 misc; + u64 synd; + u64 ipid; + u64 ip; + u64 tsc; + u64 walltime; + u32 cpu; + u32 cpuid; + u32 apicid; + u32 socketid; + u8 cs; + u8 bank; + u8 cpuvendor; + char __data[0]; +}; + +struct mce_evt_llist { + struct llist_node llnode; + struct mce mce; +}; + +struct trace_event_data_offsets_mce_record {}; + +struct severity { + u64 mask; + u64 result; + unsigned char sev; + unsigned char mcgmask; + unsigned char mcgres; + unsigned char ser; + unsigned char context; + unsigned char excp; + unsigned char covered; + unsigned char cpu_model; + unsigned char cpu_minstepping; + unsigned char bank_lo; + unsigned char bank_hi; + char *msg; +}; + +enum context { + IN_KERNEL = 1, + IN_USER = 2, + IN_KERNEL_RECOV = 3, +}; + +enum exception { + EXCP_CONTEXT = 1, + NO_EXCP = 2, +}; + +enum ser { + SER_REQUIRED = 1, + NO_SER = 2, +}; + +typedef unsigned long (*genpool_algo_t)(unsigned long *, unsigned long, unsigned long, + unsigned int, void *, struct gen_pool *, + unsigned long); + +struct gen_pool { + spinlock_t lock; + struct list_head chunks; + int min_alloc_order; + genpool_algo_t algo; + void *data; + const char *name; +}; + +enum { + CMCI_STORM_NONE = 0, + CMCI_STORM_ACTIVE = 1, + CMCI_STORM_SUBSIDED = 2, +}; + +struct smca_hwid; + +struct smca_bank { + const struct smca_hwid *hwid; + u32 id; + u8 sysfs_id; +}; + +struct smca_hwid { + unsigned int bank_type; + u32 hwid_mcatype; +}; + +struct smca_bank_name { + const char *name; + const char *long_name; +}; + +struct threshold_attr { + struct attribute attr; + ssize_t (*show)(struct threshold_block *, char *); + ssize_t (*store)(struct threshold_block *, const char *, size_t); +}; + +enum smca_bank_types { + SMCA_LS = 0, + SMCA_LS_V2 = 1, + SMCA_IF = 2, + SMCA_L2_CACHE = 3, + SMCA_DE = 4, + SMCA_RESERVED = 5, + SMCA_EX = 6, + SMCA_FP = 7, + SMCA_L3_CACHE = 8, + SMCA_CS = 9, + SMCA_CS_V2 = 10, + SMCA_PIE = 11, + SMCA_UMC = 12, + SMCA_UMC_V2 = 13, + SMCA_PB = 14, + SMCA_PSP = 15, + SMCA_PSP_V2 = 16, + SMCA_SMU = 17, + SMCA_SMU_V2 = 18, + SMCA_MP5 = 19, + SMCA_MPDMA = 20, + SMCA_NBIO = 21, + SMCA_PCIE = 22, + SMCA_PCIE_V2 = 23, + SMCA_XGMI_PCS = 24, + SMCA_NBIF = 25, + SMCA_SHUB = 26, + SMCA_SATA = 27, + SMCA_USB = 28, + SMCA_GMI_PCS = 29, + SMCA_XGMI_PHY = 30, + SMCA_WAFL_PHY = 31, + SMCA_GMI_PHY = 32, + N_SMCA_BANK_TYPES = 33, +}; + +enum kobject_action { + KOBJ_ADD = 0, + KOBJ_REMOVE = 1, + KOBJ_CHANGE = 2, + KOBJ_MOVE = 3, + KOBJ_ONLINE = 4, + KOBJ_OFFLINE = 5, + KOBJ_BIND = 6, + KOBJ_UNBIND = 7, +}; + +struct thresh_restart { + struct threshold_block *b; + int reset; + int set_lvt_off; + int lvt_off; + u16 old_limit; +}; + +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; + +enum { + GHES_SEV_NO = 0, + GHES_SEV_CORRECTED = 1, + GHES_SEV_RECOVERABLE = 2, + GHES_SEV_PANIC = 3, +}; + +enum { + CPER_SEV_RECOVERABLE = 0, + CPER_SEV_FATAL = 1, + CPER_SEV_CORRECTED = 2, + CPER_SEV_INFORMATIONAL = 3, +}; + +typedef struct { + __u8 b[16]; +} guid_t; + +struct cper_record_header { + char signature[4]; + u16 revision; + u32 signature_end; + u16 section_count; + u32 error_severity; + u32 validation_bits; + u32 record_length; + u64 timestamp; + guid_t platform_id; + guid_t partition_id; + guid_t creator_id; + guid_t notification_type; + u64 record_id; + u32 flags; + u64 persistence_information; + u8 reserved[12]; +} __attribute__((packed)); + +struct cper_sec_mem_err { + u64 validation_bits; + u64 error_status; + u64 physical_addr; + u64 physical_addr_mask; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_pos; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u8 error_type; + u8 extended; + u16 rank; + u16 mem_array_handle; + u16 mem_dev_handle; +}; + +struct cper_ia_proc_ctx { + u16 reg_ctx_type; + u16 reg_arr_size; + u32 msr_addr; + u64 mm_reg_addr; +}; + +struct cper_section_descriptor { + u32 section_offset; + u32 section_length; + u16 revision; + u8 validation_bits; + u8 reserved; + u32 flags; + guid_t section_type; + guid_t fru_id; + u32 section_severity; + u8 fru_text[20]; +}; + +struct cper_mce_record { + struct cper_record_header hdr; + struct cper_section_descriptor sec_hdr; + struct mce mce; +}; + +typedef __u8 mtrr_type; + +struct mtrr_ops { + u32 var_regs; + void (*set)(unsigned int, unsigned long, unsigned long, mtrr_type); + void (*get)(unsigned int, unsigned long *, unsigned long *, mtrr_type *); + int (*get_free_region)(unsigned long, unsigned long, int); + int (*validate_add_page)(unsigned long, unsigned long, unsigned int); + int (*have_wrcomb)(); +}; + +struct set_mtrr_data { + unsigned long smp_base; + unsigned long smp_size; + unsigned int smp_reg; + mtrr_type smp_type; +}; + +struct proc_ops { + unsigned int proc_flags; + int (*proc_open)(struct inode *, struct file *); + ssize_t (*proc_read)(struct file *, char __attribute__((btf_type_tag("user"))) *, + size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*proc_write)(struct file *, + const char __attribute__((btf_type_tag("user"))) *, size_t, + loff_t *); + loff_t (*proc_lseek)(struct file *, loff_t, int); + int (*proc_release)(struct inode *, struct file *); + __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); + long (*proc_ioctl)(struct file *, unsigned int, unsigned long); + long (*proc_compat_ioctl)(struct file *, unsigned int, unsigned long); + int (*proc_mmap)(struct file *, struct vm_area_struct *); + unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +}; + +typedef u32 compat_uint_t; + +struct mtrr_sentry32 { + compat_ulong_t base; + compat_uint_t size; + compat_uint_t type; +}; + +struct mtrr_gentry32 { + compat_ulong_t regnum; + compat_uint_t base; + compat_uint_t size; + compat_uint_t type; +}; + +struct mtrr_sentry { + __u64 base; + __u32 size; + __u32 type; +}; + +struct mtrr_gentry { + __u64 base; + __u32 size; + __u32 regnum; + __u32 type; + __u32 _pad; +}; + +struct cache_map { + u64 start; + u64 end; + u64 flags; + u64 type : 8; + u64 fixed : 1; +}; + +struct mtrr_var_range { + __u32 base_lo; + __u32 base_hi; + __u32 mask_lo; + __u32 mask_hi; +}; + +struct mtrr_state_type { + struct mtrr_var_range var_ranges[256]; + mtrr_type fixed_ranges[88]; + unsigned char enabled; + bool have_fixed; + mtrr_type def_type; +}; + +struct fixed_range_block { + int base_msr; + int ranges; +}; + +struct var_mtrr_range_state { + unsigned long base_pfn; + unsigned long size_pfn; + mtrr_type type; +}; + +struct mtrr_cleanup_result { + unsigned long gran_sizek; + unsigned long chunk_sizek; + unsigned long lose_cover_sizek; + unsigned int num_reg; + int bad; +}; + +struct var_mtrr_state { + unsigned long range_startk; + unsigned long range_sizek; + unsigned long chunk_sizek; + unsigned long gran_sizek; + unsigned int reg; +}; + +enum ucode_state { + UCODE_OK = 0, + UCODE_NEW = 1, + UCODE_NEW_SAFE = 2, + UCODE_UPDATED = 3, + UCODE_NFOUND = 4, + UCODE_ERROR = 5, + UCODE_TIMEOUT = 6, + UCODE_OFFLINE = 7, +}; + +struct cpu_signature; + +struct microcode_ops { + enum ucode_state (*request_microcode_fw)(int, struct device *); + void (*microcode_fini_cpu)(int); + enum ucode_state (*apply_microcode)(int); + int (*collect_cpu_info)(int, struct cpu_signature *); + void (*finalize_late_load)(int); + unsigned int nmi_safe : 1; + unsigned int use_nmi : 1; +}; + +struct cpu_signature { + unsigned int sig; + unsigned int pf; + unsigned int rev; +}; + +struct ucode_cpu_info { + struct cpu_signature cpu_sig; + void *mc; +}; + +struct early_load_data { + u32 old_rev; + u32 new_rev; +}; + +struct cpio_data { + void *data; + size_t size; + char name[18]; +}; + +struct property_entry; + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + bool of_node_reused; + const char *name; + int id; + const struct resource *res; + unsigned int num_res; + const void *data; + size_t size_data; + u64 dma_mask; + const struct property_entry *properties; +}; + +enum dev_prop_type { + DEV_PROP_U8 = 0, + DEV_PROP_U16 = 1, + DEV_PROP_U32 = 2, + DEV_PROP_U64 = 3, + DEV_PROP_STRING = 4, + DEV_PROP_REF = 5, +}; + +struct property_entry { + const char *name; + size_t length; + bool is_inline; + enum dev_prop_type type; + union { + const void *pointer; + union { + u8 u8_data[8]; + u16 u16_data[4]; + u32 u32_data[2]; + u64 u64_data[1]; + const char *str[1]; + } value; + }; +}; + +struct equiv_cpu_entry; + +struct equiv_cpu_table { + unsigned int num_entries; + struct equiv_cpu_entry *entry; +}; + +struct equiv_cpu_entry { + u32 installed_cpu; + u32 fixed_errata_mask; + u32 fixed_errata_compare; + u16 equiv_cpu; + u16 res; +}; + +struct microcode_header_amd { + u32 data_code; + u32 patch_id; + u16 mc_patch_data_id; + u8 mc_patch_data_len; + u8 init_flag; + u32 mc_patch_data_checksum; + u32 nb_dev_id; + u32 sb_dev_id; + u16 processor_rev_id; + u8 nb_rev_id; + u8 sb_rev_id; + u8 bios_api_rev; + u8 reserved1[3]; + u32 match_reg[8]; +}; + +struct microcode_amd { + struct microcode_header_amd hdr; + unsigned int mpb[0]; +}; + +struct ucode_patch { + struct list_head plist; + void *data; + unsigned int size; + u32 patch_id; + u16 equiv_cpu; +}; + +struct cont_desc { + struct microcode_amd *mc; + u32 cpuid_1_eax; + u32 psize; + u8 *data; + size_t size; +}; + +struct firmware { + size_t size; + const u8 *data; + void *priv; +}; + +enum acpi_irq_model_id { + ACPI_IRQ_MODEL_PIC = 0, + ACPI_IRQ_MODEL_IOAPIC = 1, + ACPI_IRQ_MODEL_IOSAPIC = 2, + ACPI_IRQ_MODEL_PLATFORM = 3, + ACPI_IRQ_MODEL_GIC = 4, + ACPI_IRQ_MODEL_LPIC = 5, + ACPI_IRQ_MODEL_COUNT = 6, +}; + +struct serial_icounter_struct { + int cts; + int dsr; + int rng; + int dcd; + int rx; + int tx; + int frame; + int overrun; + int parity; + int brk; + int buf_overrun; + int reserved[9]; +}; + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + unsigned short close_delay; + char io_type; + char reserved_char[1]; + int hub6; + unsigned short closing_wait; + unsigned short closing_wait2; + unsigned char *iomem_base; + unsigned short iomem_reg_shift; + unsigned int port_high; + unsigned long iomap_base; +}; + +struct acpi_madt_multiproc_wakeup_mailbox { + u16 command; + u16 reserved; + u32 apic_id; + u64 wakeup_vector; + u8 reserved_os[2032]; + u8 reserved_firmware[2048]; +}; + +enum ioapic_domain_type { + IOAPIC_DOMAIN_INVALID = 0, + IOAPIC_DOMAIN_LEGACY = 1, + IOAPIC_DOMAIN_STRICT = 2, + IOAPIC_DOMAIN_DYNAMIC = 3, +}; + +enum acpi_madt_type { + ACPI_MADT_TYPE_LOCAL_APIC = 0, + ACPI_MADT_TYPE_IO_APIC = 1, + ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, + ACPI_MADT_TYPE_NMI_SOURCE = 3, + ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, + ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, + ACPI_MADT_TYPE_IO_SAPIC = 6, + ACPI_MADT_TYPE_LOCAL_SAPIC = 7, + ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, + ACPI_MADT_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, + ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, + ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, + ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, + ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, + ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, + ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16, + ACPI_MADT_TYPE_CORE_PIC = 17, + ACPI_MADT_TYPE_LIO_PIC = 18, + ACPI_MADT_TYPE_HT_PIC = 19, + ACPI_MADT_TYPE_EIO_PIC = 20, + ACPI_MADT_TYPE_MSI_PIC = 21, + ACPI_MADT_TYPE_BIO_PIC = 22, + ACPI_MADT_TYPE_LPC_PIC = 23, + ACPI_MADT_TYPE_RINTC = 24, + ACPI_MADT_TYPE_IMSIC = 25, + ACPI_MADT_TYPE_APLIC = 26, + ACPI_MADT_TYPE_PLIC = 27, + ACPI_MADT_TYPE_RESERVED = 28, + ACPI_MADT_TYPE_OEM_RESERVED = 128, +}; + +enum mp_irq_source_types { + mp_INT = 0, + mp_NMI = 1, + mp_SMI = 2, + mp_ExtINT = 3, +}; + +struct acpi_table_header { + char signature[4]; + u32 length; + u8 revision; + u8 checksum; + char oem_id[6]; + char oem_table_id[8]; + u32 oem_revision; + char asl_compiler_id[4]; + u32 asl_compiler_revision; +}; + +struct acpi_table_boot { + struct acpi_table_header header; + u8 cmos_index; + u8 reserved[3]; +}; + +struct acpi_table_madt { + struct acpi_table_header header; + u32 address; + u32 flags; +}; + +struct acpi_subtable_header { + u8 type; + u8 length; +}; + +struct acpi_madt_local_apic_override { + struct acpi_subtable_header header; + u16 reserved; + u64 address; +} __attribute__((packed)); + +typedef u8 acpi_adr_space_type; + +struct acpi_madt_local_sapic { + struct acpi_subtable_header header; + u8 processor_id; + u8 id; + u8 eid; + u8 reserved[3]; + u32 lapic_flags; + u32 uid; + char uid_string[0]; +}; + +struct acpi_madt_local_apic { + struct acpi_subtable_header header; + u8 processor_id; + u8 id; + u32 lapic_flags; +}; + +struct acpi_madt_local_x2apic { + struct acpi_subtable_header header; + u16 reserved; + u32 local_apic_id; + u32 lapic_flags; + u32 uid; +}; + +struct acpi_madt_local_x2apic_nmi { + struct acpi_subtable_header header; + u16 inti_flags; + u32 uid; + u8 lint; + u8 reserved[3]; +}; + +struct acpi_madt_local_apic_nmi { + struct acpi_subtable_header header; + u8 processor_id; + u16 inti_flags; + u8 lint; +} __attribute__((packed)); + +struct acpi_madt_io_apic { + struct acpi_subtable_header header; + u8 id; + u8 reserved; + u32 address; + u32 global_irq_base; +}; + +struct acpi_madt_interrupt_override { + struct acpi_subtable_header header; + u8 bus; + u8 source_irq; + u32 global_irq; + u16 inti_flags; +} __attribute__((packed)); + +struct acpi_madt_nmi_source { + struct acpi_subtable_header header; + u16 inti_flags; + u32 global_irq; +}; + +struct acpi_madt_multiproc_wakeup { + struct acpi_subtable_header header; + u16 mailbox_version; + u32 reserved; + u64 base_address; +}; + +struct acpi_generic_address { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __attribute__((packed)); + +struct acpi_table_hpet { + struct acpi_table_header header; + u32 id; + struct acpi_generic_address address; + u8 sequence; + u16 minimum_tick; + u8 flags; +} __attribute__((packed)); + +typedef void *acpi_handle; + +struct ioapic_domain_cfg { + enum ioapic_domain_type type; + const struct irq_domain_ops *ops; + struct device_node *dev; +}; + +typedef u32 acpi_status; + +typedef char *acpi_string; + +union acpi_object; + +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +typedef u32 acpi_object_type; + +typedef u64 acpi_io_address; + +union acpi_object { + acpi_object_type type; + struct { + acpi_object_type type; + u64 value; + } integer; + struct { + acpi_object_type type; + u32 length; + char *pointer; + } string; + struct { + acpi_object_type type; + u32 length; + u8 *pointer; + } buffer; + struct { + acpi_object_type type; + u32 count; + union acpi_object *elements; + } package; + struct { + acpi_object_type type; + acpi_object_type actual_type; + acpi_handle handle; + } reference; + struct { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + } processor; + struct { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + +typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *); + +union acpi_subtable_headers; + +typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *, const unsigned long); + +struct acpi_hmat_structure { + u16 type; + u16 reserved; + u32 length; +}; + +struct acpi_prmt_module_header { + u16 revision; + u16 length; +}; + +struct acpi_cedt_header { + u8 type; + u8 reserved; + u16 length; +}; + +union acpi_subtable_headers { + struct acpi_subtable_header common; + struct acpi_hmat_structure hmat; + struct acpi_prmt_module_header prmt; + struct acpi_cedt_header cedt; +}; + +struct mpc_intsrc { + unsigned char type; + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbus; + unsigned char srcbusirq; + unsigned char dstapic; + unsigned char dstirq; +}; + +typedef u32 phys_cpuid_t; + +typedef u64 acpi_physical_address; + +struct wakeup_header { + u16 video_mode; + u32 pmode_entry; + u16 pmode_cs; + u32 pmode_cr0; + u32 pmode_cr3; + u32 pmode_cr4; + u32 pmode_efer_low; + u32 pmode_efer_high; + u64 pmode_gdt; + u32 pmode_misc_en_low; + u32 pmode_misc_en_high; + u32 pmode_behavior; + u32 realmode_flags; + u32 real_magic; + u32 signature; +} __attribute__((packed)); + +struct acpi_hest_header { + u16 type; + u16 source_id; +}; + +struct acpi_hest_notify { + u8 type; + u8 length; + u16 config_write_enable; + u32 poll_interval; + u32 vector; + u32 polling_threshold_value; + u32 polling_threshold_window; + u32 error_threshold_value; + u32 error_threshold_window; +}; + +struct acpi_hest_ia_corrected { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +struct acpi_hest_ia_error_bank { + u8 bank_number; + u8 clear_status_on_init; + u8 status_format; + u8 reserved; + u32 control_register; + u64 control_data; + u32 status_register; + u32 address_register; + u32 misc_register; +} __attribute__((packed)); + +struct cppc_perf_caps { + u32 guaranteed_perf; + u32 highest_perf; + u32 nominal_perf; + u32 lowest_perf; + u32 lowest_nonlinear_perf; + u32 lowest_freq; + u32 nominal_freq; + u32 energy_perf; + bool auto_sel; +}; + +struct cpc_reg { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __attribute__((packed)); + +struct cstate_entry { + struct { + unsigned int eax; + unsigned int ecx; + } states[8]; +}; + +struct acpi_processor_cx { + u8 valid; + u8 type; + u32 address; + u8 entry_method; + u8 index; + u32 latency; + u8 bm_sts_skip; + char desc[32]; +}; + +struct acpi_processor_flags { + u8 power : 1; + u8 performance : 1; + u8 throttling : 1; + u8 limit : 1; + u8 bm_control : 1; + u8 bm_check : 1; + u8 has_cst : 1; + u8 has_lpi : 1; + u8 power_setup_done : 1; + u8 bm_rld_set : 1; + u8 need_hotplug_init : 1; +}; + +struct acpi_power_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct machine_ops { + void (*restart)(char *); + void (*halt)(); + void (*power_off)(); + void (*shutdown)(); + void (*crash_shutdown)(struct pt_regs *); + void (*emergency_restart)(); +}; + +typedef void cpu_emergency_virt_cb(); + +typedef void (*nmi_shootdown_cb)(int, struct pt_regs *); + +enum reboot_type { + BOOT_TRIPLE = 116, + BOOT_KBD = 107, + BOOT_BIOS = 98, + BOOT_ACPI = 97, + BOOT_EFI = 101, + BOOT_CF9_FORCE = 112, + BOOT_CF9_SAFE = 113, +}; + +enum reboot_mode { + REBOOT_UNDEFINED = -1, + REBOOT_COLD = 0, + REBOOT_WARM = 1, + REBOOT_HARD = 2, + REBOOT_SOFT = 3, + REBOOT_GPIO = 4, +}; + +enum allow_write_msrs { + MSR_WRITES_ON = 0, + MSR_WRITES_OFF = 1, + MSR_WRITES_DEFAULT = 2, +}; + +struct __call_single_data { + struct __call_single_node node; + smp_call_func_t func; + void *info; +}; + +typedef struct __call_single_data call_single_data_t; + +struct cpuid_regs { + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; +}; + +struct cpuid_regs_done { + struct cpuid_regs regs; + struct completion done; +}; + +struct chipset { + u32 vendor; + u32 device; + u32 class; + u32 class_mask; + u32 flags; + void (*f)(int, int, int); +}; + +struct intel_early_ops { + resource_size_t (*stolen_size)(int, int, int); + resource_size_t (*stolen_base)(int, int, int, resource_size_t); +}; + +struct smp_ops { + void (*smp_prepare_boot_cpu)(); + void (*smp_prepare_cpus)(unsigned int); + void (*smp_cpus_done)(unsigned int); + void (*stop_other_cpus)(int); + void (*crash_stop_other_cpus)(); + void (*smp_send_reschedule)(int); + void (*cleanup_dead_cpu)(unsigned int); + void (*poll_sync_state)(); + int (*kick_ap_alive)(unsigned int, struct task_struct *); + int (*cpu_disable)(); + void (*cpu_die)(unsigned int); + void (*play_dead)(); + void (*send_call_func_ipi)(const struct cpumask *); + void (*send_call_func_single_ipi)(int); +}; + +struct mwait_cpu_dead { + unsigned int control; + unsigned int status; +}; + +struct logical_maps { + u32 phys_pkg_id; + u32 phys_die_id; + u32 logical_pkg_id; + u32 logical_die_id; +}; + +typedef const struct cpumask *(*sched_domain_mask_f)(int); + +typedef int (*sched_domain_flags_f)(); + +struct sched_group; + +struct sched_group_capacity; + +struct sched_domain; + +struct sched_domain_shared; + +struct sd_data { + struct sched_domain *__attribute__((btf_type_tag("percpu"))) * sd; + struct sched_domain_shared *__attribute__((btf_type_tag("percpu"))) * sds; + struct sched_group *__attribute__((btf_type_tag("percpu"))) * sg; + struct sched_group_capacity *__attribute__((btf_type_tag("percpu"))) * sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; + char *name; +}; + +struct sched_domain { + struct sched_domain __attribute__((btf_type_tag("rcu"))) * parent; + struct sched_domain __attribute__((btf_type_tag("rcu"))) * child; + struct sched_group *groups; + unsigned long min_interval; + unsigned long max_interval; + unsigned int busy_factor; + unsigned int imbalance_pct; + unsigned int cache_nice_tries; + unsigned int imb_numa_nr; + int nohz_idle; + int flags; + int level; + unsigned long last_balance; + unsigned int balance_interval; + unsigned int nr_balance_failed; + u64 max_newidle_lb_cost; + unsigned long last_decay_max_lb_cost; + unsigned int lb_count[3]; + unsigned int lb_failed[3]; + unsigned int lb_balanced[3]; + unsigned int lb_imbalance[3]; + unsigned int lb_gained[3]; + unsigned int lb_hot_gained[3]; + unsigned int lb_nobusyg[3]; + unsigned int lb_nobusyq[3]; + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; + char *name; + union { + void *private; + struct callback_head rcu; + }; + struct sched_domain_shared *shared; + unsigned int span_weight; + unsigned long span[0]; +}; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; + int nr_idle_scan; +}; + +enum apic_intr_mode_id { + APIC_PIC = 0, + APIC_VIRTUAL_WIRE = 1, + APIC_VIRTUAL_WIRE_NO_CONFIG = 2, + APIC_SYMMETRIC_IO = 3, + APIC_SYMMETRIC_IO_NO_ROUTING = 4, +}; + +enum { + SD_BALANCE_NEWIDLE = 1, + SD_BALANCE_EXEC = 2, + SD_BALANCE_FORK = 4, + SD_BALANCE_WAKE = 8, + SD_WAKE_AFFINE = 16, + SD_ASYM_CPUCAPACITY = 32, + SD_ASYM_CPUCAPACITY_FULL = 64, + SD_SHARE_CPUCAPACITY = 128, + SD_CLUSTER = 256, + SD_SHARE_PKG_RESOURCES = 512, + SD_SERIALIZE = 1024, + SD_ASYM_PACKING = 2048, + SD_PREFER_SIBLING = 4096, + SD_OVERLAP = 8192, + SD_NUMA = 16384, +}; + +struct physid_mask { + unsigned long mask[512]; +}; + +typedef struct physid_mask physid_mask_t; + +struct tsc_adjust { + s64 bootval; + s64 adjusted; + unsigned long nextcheck; + bool warned; +}; + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +typedef int pcpu_fc_cpu_to_node_fn_t(int); + +typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); + +enum { + DUMP_PREFIX_NONE = 0, + DUMP_PREFIX_ADDRESS = 1, + DUMP_PREFIX_OFFSET = 2, +}; + +struct mpc_cpu { + unsigned char type; + unsigned char apicid; + unsigned char apicver; + unsigned char cpuflag; + unsigned int cpufeature; + unsigned int featureflag; + unsigned int reserved[2]; +}; + +struct mpc_bus { + unsigned char type; + unsigned char busid; + unsigned char bustype[6]; +}; + +struct mpc_ioapic { + unsigned char type; + unsigned char apicid; + unsigned char apicver; + unsigned char flags; + unsigned int apicaddr; +}; + +struct mpc_lintsrc { + unsigned char type; + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbusid; + unsigned char srcbusirq; + unsigned char destapic; + unsigned char destapiclint; +}; + +struct mpf_intel { + char signature[4]; + unsigned int physptr; + unsigned char length; + unsigned char specification; + unsigned char checksum; + unsigned char feature1; + unsigned char feature2; + unsigned char feature3; + unsigned char feature4; + unsigned char feature5; +}; + +struct mpc_table { + char signature[4]; + unsigned short length; + char spec; + char checksum; + char oem[8]; + char productid[12]; + unsigned int oemptr; + unsigned short oemsize; + unsigned short oemcount; + unsigned int lapic; + unsigned int reserved; +}; + +enum { + X2APIC_OFF = 0, + X2APIC_DISABLED = 1, + X2APIC_ON = 2, + X2APIC_ON_LOCKED = 3, +}; + +enum apic_delivery_modes { + APIC_DELIVERY_MODE_FIXED = 0, + APIC_DELIVERY_MODE_LOWESTPRIO = 1, + APIC_DELIVERY_MODE_SMI = 2, + APIC_DELIVERY_MODE_NMI = 4, + APIC_DELIVERY_MODE_INIT = 5, + APIC_DELIVERY_MODE_EXTINT = 7, +}; + +enum { + IRQ_REMAP_XAPIC_MODE = 0, + IRQ_REMAP_X2APIC_MODE = 1, +}; + +union apic_ir { + unsigned long map[4]; + u32 regs[8]; +}; + +struct apic { + void (*eoi)(); + void (*native_eoi)(); + void (*write)(u32, u32); + u32 (*read)(u32); + void (*wait_icr_idle)(); + u32 (*safe_wait_icr_idle)(); + void (*send_IPI)(int, int); + void (*send_IPI_mask)(const struct cpumask *, int); + void (*send_IPI_mask_allbutself)(const struct cpumask *, int); + void (*send_IPI_allbutself)(int); + void (*send_IPI_all)(int); + void (*send_IPI_self)(int); + enum apic_delivery_modes delivery_mode; + u32 disable_esr : 1; + u32 dest_mode_logical : 1; + u32 x2apic_set_max_apicid : 1; + u32 nmi_to_offline_cpu : 1; + u32 (*calc_dest_apicid)(unsigned int); + u64 (*icr_read)(); + void (*icr_write)(u32, u32); + u32 max_apic_id; + int (*probe)(); + int (*acpi_madt_oem_check)(char *, char *); + bool (*apic_id_registered)(); + bool (*check_apicid_used)(physid_mask_t *, u32); + void (*init_apic_ldr)(); + void (*ioapic_phys_id_map)(physid_mask_t *, physid_mask_t *); + u32 (*cpu_present_to_apicid)(int); + u32 (*phys_pkg_id)(u32, int); + u32 (*get_apic_id)(u32); + u32 (*set_apic_id)(u32); + int (*wakeup_secondary_cpu)(u32, unsigned long); + int (*wakeup_secondary_cpu_64)(u32, unsigned long); + char *name; +}; + +struct irq_cfg { + unsigned int dest_apicid; + unsigned int vector; +}; + +struct vector_cleanup { + struct hlist_head head; + struct timer_list timer; +}; + +enum { + IRQCHIP_FWNODE_REAL = 0, + IRQCHIP_FWNODE_NAMED = 1, + IRQCHIP_FWNODE_NAMED_ID = 2, +}; + +enum { + X86_IRQ_ALLOC_LEGACY = 1, +}; + +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY = 1, + IRQ_SET_MASK_OK_DONE = 2, +}; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, + IRQD_AFFINITY_ON_ACTIVATE = 268435456, + IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, + IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, +}; + +struct apic_chip_data { + struct irq_cfg hw_irq_cfg; + unsigned int vector; + unsigned int prev_vector; + unsigned int cpu; + unsigned int prev_cpu; + unsigned int irq; + struct hlist_node clist; + unsigned int move_in_progress : 1; + unsigned int is_managed : 1; + unsigned int can_reserve : 1; + unsigned int has_reserved : 1; +}; + +struct cpumap; + +struct irq_matrix { + unsigned int matrix_bits; + unsigned int alloc_start; + unsigned int alloc_end; + unsigned int alloc_size; + unsigned int global_available; + unsigned int global_reserved; + unsigned int systembits_inalloc; + unsigned int total_allocated; + unsigned int online_maps; + struct cpumap __attribute__((btf_type_tag("percpu"))) * maps; + unsigned long scratch_map[4]; + unsigned long system_map[4]; +}; + +struct cpumap { + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int managed_allocated; + bool initialized; + bool online; + unsigned long alloc_map[4]; + unsigned long managed_map[4]; +}; + +struct apic_override { + void (*eoi)(); + void (*native_eoi)(); + void (*write)(u32, u32); + u32 (*read)(u32); + void (*send_IPI)(int, int); + void (*send_IPI_mask)(const struct cpumask *, int); + void (*send_IPI_mask_allbutself)(const struct cpumask *, int); + void (*send_IPI_allbutself)(int); + void (*send_IPI_all)(int); + void (*send_IPI_self)(int); + u64 (*icr_read)(); + void (*icr_write)(u32, u32); + int (*wakeup_secondary_cpu)(u32, unsigned long); + int (*wakeup_secondary_cpu_64)(u32, unsigned long); +}; + +struct mp_ioapic_gsi { + u32 gsi_base; + u32 gsi_end; +}; + +struct IO_APIC_route_entry; + +struct ioapic { + int nr_registers; + struct IO_APIC_route_entry *saved_registers; + struct mpc_ioapic mp_config; + struct mp_ioapic_gsi gsi_config; + struct ioapic_domain_cfg irqdomain_cfg; + struct irq_domain *irqdomain; + struct resource *iomem_res; +}; + +struct IO_APIC_route_entry { + union { + struct { + u64 vector : 8; + u64 delivery_mode : 3; + u64 dest_mode_logical : 1; + u64 delivery_status : 1; + u64 active_low : 1; + u64 irr : 1; + u64 is_level : 1; + u64 masked : 1; + u64 reserved_0 : 15; + u64 reserved_1 : 17; + u64 virt_destid_8_14 : 7; + u64 destid_0_7 : 8; + }; + struct { + u64 ir_shared_0 : 8; + u64 ir_zero : 3; + u64 ir_index_15 : 1; + u64 ir_shared_1 : 5; + u64 ir_reserved_0 : 31; + u64 ir_format : 1; + u64 ir_index_0_14 : 15; + }; + struct { + u64 w1 : 32; + u64 w2 : 32; + }; + }; +}; + +struct irq_pin_list { + struct list_head list; + int apic; + int pin; +}; + +struct io_apic { + unsigned int index; + unsigned int unused[3]; + unsigned int data; + unsigned int unused2[11]; + unsigned int eoi; +}; + +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2 : 14; + u32 LTS : 1; + u32 delivery_type : 1; + u32 __reserved_1 : 8; + u32 ID : 8; + } bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version : 8; + u32 __reserved_2 : 7; + u32 PRQ : 1; + u32 entries : 8; + u32 __reserved_1 : 8; + } bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2 : 24; + u32 arbitration : 4; + u32 __reserved_1 : 4; + } bits; +}; + +struct mp_chip_data { + struct list_head irq_2_pin; + struct IO_APIC_route_entry entry; + bool is_level; + bool active_low; + bool isa_irq; + u32 count; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT : 1; + u32 __reserved_1 : 31; + } bits; +}; + +enum { + IRQ_DOMAIN_FLAG_HIERARCHY = 1, + IRQ_DOMAIN_NAME_ALLOCATED = 2, + IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, + IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, + IRQ_DOMAIN_FLAG_MSI = 16, + IRQ_DOMAIN_FLAG_ISOLATED_MSI = 32, + IRQ_DOMAIN_FLAG_NO_MAP = 64, + IRQ_DOMAIN_FLAG_MSI_PARENT = 256, + IRQ_DOMAIN_FLAG_MSI_DEVICE = 512, + IRQ_DOMAIN_FLAG_NONCORE = 65536, +}; + +enum { + MSI_FLAG_USE_DEF_DOM_OPS = 1, + MSI_FLAG_USE_DEF_CHIP_OPS = 2, + MSI_FLAG_ACTIVATE_EARLY = 4, + MSI_FLAG_MUST_REACTIVATE = 8, + MSI_FLAG_DEV_SYSFS = 16, + MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = 32, + MSI_FLAG_FREE_MSI_DESCS = 64, + MSI_GENERIC_FLAGS_MASK = 65535, + MSI_DOMAIN_FLAGS_MASK = 4294901760, + MSI_FLAG_MULTI_PCI_MSI = 65536, + MSI_FLAG_PCI_MSIX = 131072, + MSI_FLAG_LEVEL_CAPABLE = 262144, + MSI_FLAG_MSIX_CONTIGUOUS = 524288, + MSI_FLAG_PCI_MSIX_ALLOC_DYN = 1048576, + MSI_FLAG_PCI_IMS = 2097152, +}; + +enum { + IRQCHIP_SET_TYPE_MASKED = 1, + IRQCHIP_EOI_IF_HANDLED = 2, + IRQCHIP_MASK_ON_SUSPEND = 4, + IRQCHIP_ONOFFLINE_ENABLED = 8, + IRQCHIP_SKIP_SET_WAKE = 16, + IRQCHIP_ONESHOT_SAFE = 32, + IRQCHIP_EOI_THREADED = 64, + IRQCHIP_SUPPORTS_LEVEL_MSI = 128, + IRQCHIP_SUPPORTS_NMI = 256, + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, + IRQCHIP_AFFINITY_PRE_STARTUP = 1024, + IRQCHIP_IMMUTABLE = 2048, +}; + +enum { + FTRACE_UPDATE_IGNORE = 0, + FTRACE_UPDATE_MAKE_CALL = 1, + FTRACE_UPDATE_MODIFY_CALL = 2, + FTRACE_UPDATE_MAKE_NOP = 3, +}; + +enum { + FTRACE_OPS_FL_ENABLED = 1, + FTRACE_OPS_FL_DYNAMIC = 2, + FTRACE_OPS_FL_SAVE_REGS = 4, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8, + FTRACE_OPS_FL_RECURSION = 16, + FTRACE_OPS_FL_STUB = 32, + FTRACE_OPS_FL_INITIALIZED = 64, + FTRACE_OPS_FL_DELETED = 128, + FTRACE_OPS_FL_ADDING = 256, + FTRACE_OPS_FL_REMOVING = 512, + FTRACE_OPS_FL_MODIFYING = 1024, + FTRACE_OPS_FL_ALLOC_TRAMP = 2048, + FTRACE_OPS_FL_IPMODIFY = 4096, + FTRACE_OPS_FL_PID = 8192, + FTRACE_OPS_FL_RCU = 16384, + FTRACE_OPS_FL_TRACE_ARRAY = 32768, + FTRACE_OPS_FL_PERMANENT = 65536, + FTRACE_OPS_FL_DIRECT = 131072, +}; + +enum { + FTRACE_FL_ENABLED = 2147483648, + FTRACE_FL_REGS = 1073741824, + FTRACE_FL_REGS_EN = 536870912, + FTRACE_FL_TRAMP = 268435456, + FTRACE_FL_TRAMP_EN = 134217728, + FTRACE_FL_IPMODIFY = 67108864, + FTRACE_FL_DISABLED = 33554432, + FTRACE_FL_DIRECT = 16777216, + FTRACE_FL_DIRECT_EN = 8388608, + FTRACE_FL_CALL_OPS = 4194304, + FTRACE_FL_CALL_OPS_EN = 2097152, + FTRACE_FL_TOUCHED = 1048576, + FTRACE_FL_MODIFIED = 524288, +}; + +enum { + TRACE_FTRACE_BIT = 0, + TRACE_FTRACE_NMI_BIT = 1, + TRACE_FTRACE_IRQ_BIT = 2, + TRACE_FTRACE_SIRQ_BIT = 3, + TRACE_FTRACE_TRANSITION_BIT = 4, + TRACE_INTERNAL_BIT = 5, + TRACE_INTERNAL_NMI_BIT = 6, + TRACE_INTERNAL_IRQ_BIT = 7, + TRACE_INTERNAL_SIRQ_BIT = 8, + TRACE_INTERNAL_TRANSITION_BIT = 9, + TRACE_BRANCH_BIT = 10, + TRACE_IRQ_BIT = 11, + TRACE_GRAPH_BIT = 12, + TRACE_GRAPH_DEPTH_START_BIT = 13, + TRACE_GRAPH_DEPTH_END_BIT = 14, + TRACE_GRAPH_NOTRACE_BIT = 15, + TRACE_RECORD_RECURSION_BIT = 16, +}; + +enum { + TRACE_CTX_NMI = 0, + TRACE_CTX_IRQ = 1, + TRACE_CTX_SOFTIRQ = 2, + TRACE_CTX_NORMAL = 3, + TRACE_CTX_TRANSITION = 4, +}; + +struct dyn_arch_ftrace {}; + +struct dyn_ftrace { + unsigned long ip; + unsigned long flags; + struct dyn_arch_ftrace arch; +}; + +union ftrace_op_code_union { + char code[7]; + struct { + char op[3]; + int offset; + } __attribute__((packed)); +}; + +struct rethook; + +struct rethook_node { + struct callback_head rcu; + struct llist_node llist; + struct rethook *rethook; + unsigned long ret_addr; + unsigned long frame; +}; + +struct objpool_head; + +typedef int (*objpool_fini_cb)(struct objpool_head *, void *); + +struct objpool_slot; + +struct objpool_head { + int obj_size; + int nr_objs; + int nr_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + unsigned long flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +struct rethook { + void *data; + void (*handler)(struct rethook_node *, void *, unsigned long, struct pt_regs *); + struct objpool_head pool; + struct callback_head rcu; +}; + +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[0]; +}; + +typedef int kexec_probe_t(const char *, unsigned long); + +struct kimage; + +typedef void *kexec_load_t(struct kimage *, char *, unsigned long, char *, unsigned long, + char *, unsigned long); + +typedef int kexec_cleanup_t(void *); + +struct kexec_file_ops { + kexec_probe_t *probe; + kexec_load_t *load; + kexec_cleanup_t *cleanup; +}; + +typedef unsigned long kimage_entry_t; + +struct kexec_segment { + union { + void __attribute__((btf_type_tag("user"))) * buf; + void *kbuf; + }; + size_t bufsz; + unsigned long mem; + size_t memsz; +}; + +struct kimage_arch { + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; +}; + +struct elf64_hdr; + +typedef struct elf64_hdr Elf64_Ehdr; + +struct elf64_shdr; + +typedef struct elf64_shdr Elf64_Shdr; + +struct purgatory_info { + const Elf64_Ehdr *ehdr; + Elf64_Shdr *sechdrs; + void *purgatory_buf; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + unsigned long start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + unsigned long nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + unsigned long control_page; + unsigned int type : 1; + unsigned int preserve_context : 1; + unsigned int file_mode : 1; + struct kimage_arch arch; + void *kernel_buf; + unsigned long kernel_buf_len; + void *initrd_buf; + unsigned long initrd_buf_len; + char *cmdline_buf; + unsigned long cmdline_buf_len; + const struct kexec_file_ops *fops; + void *image_loader_data; + struct purgatory_info purgatory_info; + void *elf_headers; + unsigned long elf_headers_sz; + unsigned long elf_load_addr; +}; + +typedef __u64 Elf64_Off; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +struct elf64_shdr { + Elf64_Word sh_name; + Elf64_Word sh_type; + Elf64_Xword sh_flags; + Elf64_Addr sh_addr; + Elf64_Off sh_offset; + Elf64_Xword sh_size; + Elf64_Word sh_link; + Elf64_Word sh_info; + Elf64_Xword sh_addralign; + Elf64_Xword sh_entsize; +}; + +struct x86_mapping_info { + void *(*alloc_pgt_page)(void *); + void *context; + unsigned long page_flag; + unsigned long offset; + bool direct_gbpages; + unsigned long kernpg_flag; +}; + +struct init_pgtable_data { + struct x86_mapping_info *info; + pgd_t *level4p; +}; + +typedef __s64 Elf64_Sxword; + +struct elf64_rela { + Elf64_Addr r_offset; + Elf64_Xword r_info; + Elf64_Sxword r_addend; +}; + +typedef struct elf64_rela Elf64_Rela; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct range ranges[0]; +}; + +struct kexec_buf { + struct kimage *image; + void *buffer; + unsigned long bufsz; + unsigned long mem; + unsigned long memsz; + unsigned long buf_align; + unsigned long buf_min; + unsigned long buf_max; + bool top_down; +}; + +struct crash_memmap_data { + struct boot_params *params; + unsigned int type; +}; + +enum { + RNG_SEED_LENGTH = 32, +}; + +struct efi_setup_data { + u64 fw_vendor; + u64 __unused; + u64 tables; + u64 smbios; + u64 reserved[8]; +}; + +struct kexec_entry64_regs { + uint64_t rax; + uint64_t rcx; + uint64_t rdx; + uint64_t rbx; + uint64_t rsp; + uint64_t rbp; + uint64_t rsi; + uint64_t rdi; + uint64_t r8; + uint64_t r9; + uint64_t r10; + uint64_t r11; + uint64_t r12; + uint64_t r13; + uint64_t r14; + uint64_t r15; + uint64_t rip; +}; + +struct bzimage64_data { + void *bootparams_buf; +}; + +struct kretprobe_blackpoint { + const char *name; + void *addr; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; + unsigned long old_flags; + unsigned long saved_flags; +}; + +struct kprobe_ctlblk { + unsigned long kprobe_status; + unsigned long kprobe_old_flags; + unsigned long kprobe_saved_flags; + struct prev_kprobe prev_kprobe; +}; + +struct __arch_relative_insn { + u8 op; + s32 raddr; +} __attribute__((packed)); + +struct kprobe_insn_cache { + struct mutex mutex; + void *(*alloc)(); + void (*free)(void *); + const char *sym; + struct list_head pages; + size_t insn_size; + int nr_garbage; +}; + +struct arch_optimized_insn { + kprobe_opcode_t copied_insn[4]; + kprobe_opcode_t *insn; + size_t size; +}; + +struct optimized_kprobe { + struct kprobe kp; + struct list_head list; + struct arch_optimized_insn optinsn; +}; + +enum mod_mem_type { + MOD_TEXT = 0, + MOD_DATA = 1, + MOD_RODATA = 2, + MOD_RO_AFTER_INIT = 3, + MOD_INIT_TEXT = 4, + MOD_INIT_DATA = 5, + MOD_INIT_RODATA = 6, + MOD_MEM_NUM_TYPES = 7, + MOD_INVALID = -1, +}; + +typedef unsigned int uint; + +struct nbcon_write_context; + +struct printk_buffers; + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned int); + int (*read)(struct console *, char *, unsigned int); + struct tty_driver *(*device)(struct console *, int *); + void (*unblank)(); + int (*setup)(struct console *, char *); + int (*exit)(struct console *); + int (*match)(struct console *, char *, int, char *); + short flags; + short index; + int cflag; + uint ispeed; + uint ospeed; + u64 seq; + unsigned long dropped; + void *data; + struct hlist_node node; + bool (*write_atomic)(struct console *, struct nbcon_write_context *); + atomic_t nbcon_state; + atomic_long_t nbcon_seq; + struct printk_buffers *pbufs; +}; + +enum nbcon_prio { + NBCON_PRIO_NONE = 0, + NBCON_PRIO_NORMAL = 1, + NBCON_PRIO_EMERGENCY = 2, + NBCON_PRIO_PANIC = 3, + NBCON_PRIO_MAX = 4, +}; + +struct nbcon_context { + struct console *console; + unsigned int spinwait_max_us; + enum nbcon_prio prio; + unsigned int allow_unsafe_takeover : 1; + unsigned int backlog : 1; + struct printk_buffers *pbufs; + u64 seq; +}; + +struct nbcon_write_context { + struct nbcon_context ctxt; + char *outbuf; + unsigned int len; + bool unsafe_takeover; +}; + +enum cons_flags { + CON_PRINTBUFFER = 1, + CON_CONSDEV = 2, + CON_ENABLED = 4, + CON_BOOT = 8, + CON_ANYTIME = 16, + CON_BRL = 32, + CON_EXTENDED = 64, + CON_SUSPENDED = 128, + CON_NBCON = 256, +}; + +struct hpet_channel; + +struct hpet_base { + unsigned int nr_channels; + unsigned int nr_clockevents; + unsigned int boot_cfg; + struct hpet_channel *channels; +}; + +enum hpet_mode { + HPET_MODE_UNUSED = 0, + HPET_MODE_LEGACY = 1, + HPET_MODE_CLOCKEVT = 2, + HPET_MODE_DEVICE = 3, +}; + +struct hpet_channel { + struct clock_event_device evt; + unsigned int num; + unsigned int cpu; + unsigned int irq; + unsigned int in_use; + enum hpet_mode mode; + unsigned int boot_cfg; + char name[10]; + long : 64; + long : 64; + long : 64; +}; + +typedef irqreturn_t (*rtc_irq_handler)(int, void *); + +union hpet_lock { + struct { + arch_spinlock_t lock; + u32 value; + }; + u64 lockval; +}; + +struct hpet_data { + unsigned long hd_phys_address; + void *hd_address; + unsigned short hd_nirqs; + unsigned int hd_state; + unsigned int hd_irq[32]; +}; + +struct amd_nb_bus_dev_range { + u8 bus; + u8 dev_base; + u8 dev_limit; +}; + +struct amd_northbridge_info { + u16 num; + u64 flags; + struct amd_northbridge *nb; +}; + +struct scan_area { + u64 addr; + u64 size; +}; + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, + MEMBLOCK_DRIVER_MANAGED = 8, + MEMBLOCK_RSRV_NOINIT = 16, +}; + +struct memblock_region; + +struct memblock_type { + unsigned long cnt; + unsigned long max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; + int nid; +}; + +struct arch_uprobe; + +struct uprobe_xol_ops { + bool (*emulate)(struct arch_uprobe *, struct pt_regs *); + int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); + int (*post_xol)(struct arch_uprobe *, struct pt_regs *); + void (*abort)(struct arch_uprobe *, struct pt_regs *); +}; + +struct arch_uprobe { + union { + u8 insn[16]; + u8 ixol[16]; + }; + const struct uprobe_xol_ops *ops; + union { + struct { + s32 offs; + u8 ilen; + u8 opc1; + } branch; + struct { + u8 fixups; + u8 ilen; + } defparam; + struct { + u8 reg_offset; + u8 ilen; + } push; + }; +}; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + +struct x86_perf_regs { + struct pt_regs regs; + u64 *xmm_regs; +}; + +struct __va_list_tag { + unsigned int gp_offset; + unsigned int fp_offset; + void *overflow_arg_area; + void *reg_save_area; +}; + +typedef __builtin_va_list va_list; + +struct va_format { + const char *fmt; + va_list *va; +}; + +struct ghcb_save_area { + u8 reserved_0x0[203]; + u8 cpl; + u8 reserved_0xcc[116]; + u64 xss; + u8 reserved_0x148[24]; + u64 dr7; + u8 reserved_0x168[16]; + u64 rip; + u8 reserved_0x180[88]; + u64 rsp; + u8 reserved_0x1e0[24]; + u64 rax; + u8 reserved_0x200[264]; + u64 rcx; + u64 rdx; + u64 rbx; + u8 reserved_0x320[8]; + u64 rbp; + u64 rsi; + u64 rdi; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u8 reserved_0x380[16]; + u64 sw_exit_code; + u64 sw_exit_info_1; + u64 sw_exit_info_2; + u64 sw_scratch; + u8 reserved_0x3b0[56]; + u64 xcr0; + u8 valid_bitmap[16]; + u64 x87_state_gpa; +}; + +struct ghcb { + struct ghcb_save_area save; + u8 reserved_save[1016]; + u8 shared_buffer[2032]; + u8 reserved_0xff0[10]; + u16 protocol_version; + u32 ghcb_usage; +}; + +struct sev_es_runtime_data { + struct ghcb ghcb_page; + struct ghcb backup_ghcb; + bool ghcb_active; + bool backup_ghcb_active; + unsigned long dr7; +}; + +struct sev_config { + __u64 debug : 1; + __u64 ghcbs_initialized : 1; + __u64 __reserved : 62; +}; + +struct snp_cpuid_fn { + u32 eax_in; + u32 ecx_in; + u64 xcr0_in; + u64 xss_in; + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; + u64 __reserved; +}; + +struct snp_cpuid_table { + u32 count; + u32 __reserved1; + u64 __reserved2; + struct snp_cpuid_fn fn[64]; +}; + +struct vmcb_seg { + u16 selector; + u16 attrib; + u32 limit; + u64 base; +}; + +struct sev_es_save_area { + struct vmcb_seg es; + struct vmcb_seg cs; + struct vmcb_seg ss; + struct vmcb_seg ds; + struct vmcb_seg fs; + struct vmcb_seg gs; + struct vmcb_seg gdtr; + struct vmcb_seg ldtr; + struct vmcb_seg idtr; + struct vmcb_seg tr; + u64 vmpl0_ssp; + u64 vmpl1_ssp; + u64 vmpl2_ssp; + u64 vmpl3_ssp; + u64 u_cet; + u8 reserved_0xc8[2]; + u8 vmpl; + u8 cpl; + u8 reserved_0xcc[4]; + u64 efer; + u8 reserved_0xd8[104]; + u64 xss; + u64 cr4; + u64 cr3; + u64 cr0; + u64 dr7; + u64 dr6; + u64 rflags; + u64 rip; + u64 dr0; + u64 dr1; + u64 dr2; + u64 dr3; + u64 dr0_addr_mask; + u64 dr1_addr_mask; + u64 dr2_addr_mask; + u64 dr3_addr_mask; + u8 reserved_0x1c0[24]; + u64 rsp; + u64 s_cet; + u64 ssp; + u64 isst_addr; + u64 rax; + u64 star; + u64 lstar; + u64 cstar; + u64 sfmask; + u64 kernel_gs_base; + u64 sysenter_cs; + u64 sysenter_esp; + u64 sysenter_eip; + u64 cr2; + u8 reserved_0x248[32]; + u64 g_pat; + u64 dbgctl; + u64 br_from; + u64 br_to; + u64 last_excp_from; + u64 last_excp_to; + u8 reserved_0x298[80]; + u32 pkru; + u32 tsc_aux; + u8 reserved_0x2f0[24]; + u64 rcx; + u64 rdx; + u64 rbx; + u64 reserved_0x320; + u64 rbp; + u64 rsi; + u64 rdi; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u8 reserved_0x380[16]; + u64 guest_exit_info_1; + u64 guest_exit_info_2; + u64 guest_exit_int_info; + u64 guest_nrip; + u64 sev_features; + u64 vintr_ctrl; + u64 guest_exit_code; + u64 virtual_tom; + u64 tlb_id; + u64 pcpu_id; + u64 event_inj; + u64 xcr0; + u8 reserved_0x3f0[16]; + u64 x87_dp; + u32 mxcsr; + u16 x87_ftw; + u16 x87_fsw; + u16 x87_fcw; + u16 x87_fop; + u16 x87_ds; + u16 x87_cs; + u64 x87_rip; + u8 fpreg_x87[80]; + u8 fpreg_xmm[256]; + u8 fpreg_ymm[256]; +}; + +enum es_result { + ES_OK = 0, + ES_UNSUPPORTED = 1, + ES_VMM_ERROR = 2, + ES_DECODE_FAILED = 3, + ES_EXCEPTION = 4, + ES_RETRY = 5, +}; + +enum { + SEV_RET_NO_FW_CALL = -1, + SEV_RET_SUCCESS = 0, + SEV_RET_INVALID_PLATFORM_STATE = 1, + SEV_RET_INVALID_GUEST_STATE = 2, + SEV_RET_INAVLID_CONFIG = 3, + SEV_RET_INVALID_LEN = 4, + SEV_RET_ALREADY_OWNED = 5, + SEV_RET_INVALID_CERTIFICATE = 6, + SEV_RET_POLICY_FAILURE = 7, + SEV_RET_INACTIVE = 8, + SEV_RET_INVALID_ADDRESS = 9, + SEV_RET_BAD_SIGNATURE = 10, + SEV_RET_BAD_MEASUREMENT = 11, + SEV_RET_ASID_OWNED = 12, + SEV_RET_INVALID_ASID = 13, + SEV_RET_WBINVD_REQUIRED = 14, + SEV_RET_DFFLUSH_REQUIRED = 15, + SEV_RET_INVALID_GUEST = 16, + SEV_RET_INVALID_COMMAND = 17, + SEV_RET_ACTIVE = 18, + SEV_RET_HWSEV_RET_PLATFORM = 19, + SEV_RET_HWSEV_RET_UNSAFE = 20, + SEV_RET_UNSUPPORTED = 21, + SEV_RET_INVALID_PARAM = 22, + SEV_RET_RESOURCE_LIMIT = 23, + SEV_RET_SECURE_DATA_INVALID = 24, + SEV_RET_INVALID_KEY = 39, + SEV_RET_MAX = 40, +}; + +enum insn_mmio_type { + INSN_MMIO_DECODE_FAILED = 0, + INSN_MMIO_WRITE = 1, + INSN_MMIO_WRITE_IMM = 2, + INSN_MMIO_READ = 3, + INSN_MMIO_READ_ZERO_EXTEND = 4, + INSN_MMIO_READ_SIGN_EXTEND = 5, + INSN_MMIO_MOVS = 6, +}; + +enum pg_level { + PG_LEVEL_NONE = 0, + PG_LEVEL_4K = 1, + PG_LEVEL_2M = 2, + PG_LEVEL_1G = 3, + PG_LEVEL_512G = 4, + PG_LEVEL_NUM = 5, +}; + +struct psc_hdr { + u16 cur_entry; + u16 end_entry; + u32 reserved; +}; + +struct psc_entry { + u64 cur_page : 12; + u64 gfn : 40; + u64 operation : 4; + u64 pagesize : 1; + u64 reserved : 7; +}; + +struct snp_psc_desc { + struct psc_hdr hdr; + struct psc_entry entries[64]; +}; + +struct secrets_os_area { + u32 msg_seqno_0; + u32 msg_seqno_1; + u32 msg_seqno_2; + u32 msg_seqno_3; + u64 ap_jump_table_pa; + u8 rsvd[40]; + u8 guest_usage[32]; +}; + +struct snp_secrets_page_layout { + u32 version; + u32 imien : 1; + u32 rsvd1 : 31; + u32 fms; + u32 rsvd2; + u8 gosvw[16]; + u8 vmpck0[32]; + u8 vmpck1[32]; + u8 vmpck2[32]; + u8 vmpck3[32]; + struct secrets_os_area os_area; + u8 rsvd3[3840]; +}; + +struct cc_blob_sev_info { + u32 magic; + u16 version; + u16 reserved; + u64 secrets_phys; + u32 secrets_len; + u32 rsvd1; + u64 cpuid_phys; + u32 cpuid_len; + u32 rsvd2; +}; + +struct cc_setup_data { + struct setup_data header; + u32 cc_blob_address; +}; + +struct cpuid_leaf { + u32 fn; + u32 subfn; + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; +}; + +struct es_fault_info { + unsigned long vector; + unsigned long error_code; + unsigned long cr2; +}; + +struct es_em_ctxt { + struct pt_regs *regs; + struct insn insn; + struct es_fault_info fi; +}; + +struct ghcb_state { + struct ghcb *ghcb; +}; + +struct snp_req_data { + unsigned long req_gpa; + unsigned long resp_gpa; + unsigned long data_gpa; + unsigned int data_npages; +}; + +struct snp_guest_request_ioctl { + __u8 msg_version; + __u64 req_data; + __u64 resp_data; + union { + __u64 exitinfo2; + struct { + __u32 fw_error; + __u32 vmm_error; + }; + }; +}; + +struct sev_guest_platform_data { + u64 secrets_gpa; +}; + +enum cp_error_code { + CP_EC = 32767, + CP_RET = 1, + CP_IRET = 2, + CP_ENDBR = 3, + CP_RSTRORSSP = 4, + CP_SETSSBSY = 5, + CP_ENCL = 32768, +}; + +enum auditsc_class_t { + AUDITSC_NATIVE = 0, + AUDITSC_COMPAT = 1, + AUDITSC_OPEN = 2, + AUDITSC_OPENAT = 3, + AUDITSC_SOCKETCALL = 4, + AUDITSC_EXECVE = 5, + AUDITSC_OPENAT2 = 6, + AUDITSC_NVALS = 7, +}; + +struct pci_hostbridge_probe { + u32 bus; + u32 slot; + u32 vendor; + u32 device; +}; + +typedef u16 uint16_t; + +typedef u8 uint8_t; + +enum { + REGION_INTERSECTS = 0, + REGION_DISJOINT = 1, + REGION_MIXED = 2, +}; + +struct map_range { + unsigned long start; + unsigned long end; + unsigned int page_size_mask; +}; + +struct kcore_list { + struct list_head list; + unsigned long addr; + size_t size; + int type; +}; + +enum kcore_type { + KCORE_TEXT = 0, + KCORE_VMALLOC = 1, + KCORE_RAM = 2, + KCORE_VMEMMAP = 3, + KCORE_USER = 4, +}; + +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = 12, + MIX_SECTION_INFO = 13, + NODE_INFO = 14, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = 14, +}; + +struct mhp_params { + struct vmem_altmap *altmap; + pgprot_t pgprot; + struct dev_pagemap *pgmap; +}; + +typedef void (*btf_trace_page_fault_user)(void *, unsigned long, struct pt_regs *, + unsigned long); + +typedef void (*btf_trace_page_fault_kernel)(void *, unsigned long, struct pt_regs *, + unsigned long); + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +}; + +struct trace_event_raw_x86_exceptions { + struct trace_entry ent; + unsigned long address; + unsigned long ip; + unsigned long error_code; + char __data[0]; +}; + +struct trace_event_data_offsets_x86_exceptions {}; + +enum { + IORES_MAP_SYSTEM_RAM = 1, + IORES_MAP_ENCRYPTED = 2, +}; + +enum { + SECTION_MARKED_PRESENT_BIT = 0, + SECTION_HAS_MEM_MAP_BIT = 1, + SECTION_IS_ONLINE_BIT = 2, + SECTION_IS_EARLY_BIT = 3, + SECTION_TAINT_ZONE_DEVICE_BIT = 4, + SECTION_MAP_LAST_BIT = 5, +}; + +struct mem_section_usage { + struct callback_head rcu; + unsigned long subsection_map[1]; + unsigned long pageblock_flags[0]; +}; + +struct ioremap_desc { + unsigned int flags; +}; + +struct mem_section { + unsigned long section_mem_map; + struct mem_section_usage *usage; +}; + +struct va_alignment { + int flags; + unsigned long mask; + unsigned long bits; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_THROTTLED_WRITTEN = 33, + NR_KERNEL_MISC_RECLAIMABLE = 34, + NR_FOLL_PIN_ACQUIRED = 35, + NR_FOLL_PIN_RELEASED = 36, + NR_KERNEL_STACK_KB = 37, + NR_PAGETABLE = 38, + NR_SECONDARY_PAGETABLE = 39, + NR_SWAPCACHE = 40, + NR_VM_NODE_STAT_ITEMS = 41, +}; + +struct ptdesc { + unsigned long __page_flags; + union { + struct callback_head pt_rcu_head; + struct list_head pt_list; + struct { + unsigned long _pt_pad_1; + pgtable_t pmd_huge_pte; + }; + }; + unsigned long __page_mapping; + union { + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + union { + unsigned long _pt_pad_2; + spinlock_t ptl; + }; + unsigned int __page_type; + atomic_t _refcount; + unsigned long pt_memcg_data; +}; + +struct hstate { + struct mutex resize_lock; + int next_nid_to_alloc; + int next_nid_to_free; + unsigned int order; + unsigned int demote_order; + unsigned long mask; + unsigned long max_huge_pages; + unsigned long nr_huge_pages; + unsigned long free_huge_pages; + unsigned long resv_huge_pages; + unsigned long surplus_huge_pages; + unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_activelist; + struct list_head hugepage_freelists[64]; + unsigned int max_huge_pages_node[64]; + unsigned int nr_huge_pages_node[64]; + unsigned int free_huge_pages_node[64]; + unsigned int surplus_huge_pages_node[64]; + struct cftype cgroup_files_dfl[8]; + struct cftype cgroup_files_legacy[10]; + char name[32]; +}; + +struct hugepage_subpool; + +struct hugetlbfs_sb_info { + long max_inodes; + long free_inodes; + spinlock_t stat_lock; + struct hstate *hstate; + struct hugepage_subpool *spool; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +struct hugepage_subpool { + spinlock_t lock; + long count; + long max_hpages; + long used_hpages; + struct hstate *hstate; + long min_hpages; + long rsv_hpages; +}; + +struct tlb_state_shared { + bool is_lazy; +}; + +struct flush_tlb_info { + struct mm_struct *mm; + unsigned long start; + unsigned long end; + u64 new_tlb_gen; + unsigned int initiating_cpu; + u8 stride_shift; + u8 freed_tables; +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH = 0, + TLB_REMOTE_SHOOTDOWN = 1, + TLB_LOCAL_SHOOTDOWN = 2, + TLB_LOCAL_MM_SHOOTDOWN = 3, + TLB_REMOTE_SEND_IPI = 4, + NR_TLB_FLUSH_REASONS = 5, +}; + +struct exception_stacks { + char DF_stack_guard[0]; + char DF_stack[8192]; + char NMI_stack_guard[0]; + char NMI_stack[8192]; + char DB_stack_guard[0]; + char DB_stack[8192]; + char MCE_stack_guard[0]; + char MCE_stack[8192]; + char VC_stack_guard[0]; + char VC_stack[8192]; + char VC2_stack_guard[0]; + char VC2_stack[8192]; + char IST_top_guard[0]; +}; + +enum cpa_warn { + CPA_CONFLICT = 0, + CPA_PROTECT = 1, + CPA_DETECT = 2, +}; + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_DMA = 4, + PGALLOC_DMA32 = 5, + PGALLOC_NORMAL = 6, + PGALLOC_MOVABLE = 7, + PGALLOC_DEVICE = 8, + ALLOCSTALL_DMA = 9, + ALLOCSTALL_DMA32 = 10, + ALLOCSTALL_NORMAL = 11, + ALLOCSTALL_MOVABLE = 12, + ALLOCSTALL_DEVICE = 13, + PGSCAN_SKIP_DMA = 14, + PGSCAN_SKIP_DMA32 = 15, + PGSCAN_SKIP_NORMAL = 16, + PGSCAN_SKIP_MOVABLE = 17, + PGSCAN_SKIP_DEVICE = 18, + PGFREE = 19, + PGACTIVATE = 20, + PGDEACTIVATE = 21, + PGLAZYFREE = 22, + PGFAULT = 23, + PGMAJFAULT = 24, + PGLAZYFREED = 25, + PGREFILL = 26, + PGREUSE = 27, + PGSTEAL_KSWAPD = 28, + PGSTEAL_DIRECT = 29, + PGSTEAL_KHUGEPAGED = 30, + PGDEMOTE_KSWAPD = 31, + PGDEMOTE_DIRECT = 32, + PGDEMOTE_KHUGEPAGED = 33, + PGSCAN_KSWAPD = 34, + PGSCAN_DIRECT = 35, + PGSCAN_KHUGEPAGED = 36, + PGSCAN_DIRECT_THROTTLE = 37, + PGSCAN_ANON = 38, + PGSCAN_FILE = 39, + PGSTEAL_ANON = 40, + PGSTEAL_FILE = 41, + PGSCAN_ZONE_RECLAIM_FAILED = 42, + PGINODESTEAL = 43, + SLABS_SCANNED = 44, + KSWAPD_INODESTEAL = 45, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 46, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 47, + PAGEOUTRUN = 48, + PGROTATED = 49, + DROP_PAGECACHE = 50, + DROP_SLAB = 51, + OOM_KILL = 52, + PGMIGRATE_SUCCESS = 53, + PGMIGRATE_FAIL = 54, + THP_MIGRATION_SUCCESS = 55, + THP_MIGRATION_FAIL = 56, + THP_MIGRATION_SPLIT = 57, + COMPACTMIGRATE_SCANNED = 58, + COMPACTFREE_SCANNED = 59, + COMPACTISOLATED = 60, + COMPACTSTALL = 61, + COMPACTFAIL = 62, + COMPACTSUCCESS = 63, + KCOMPACTD_WAKE = 64, + KCOMPACTD_MIGRATE_SCANNED = 65, + KCOMPACTD_FREE_SCANNED = 66, + HTLB_BUDDY_PGALLOC = 67, + HTLB_BUDDY_PGALLOC_FAIL = 68, + UNEVICTABLE_PGCULLED = 69, + UNEVICTABLE_PGSCANNED = 70, + UNEVICTABLE_PGRESCUED = 71, + UNEVICTABLE_PGMLOCKED = 72, + UNEVICTABLE_PGMUNLOCKED = 73, + UNEVICTABLE_PGCLEARED = 74, + UNEVICTABLE_PGSTRANDED = 75, + THP_FAULT_ALLOC = 76, + THP_FAULT_FALLBACK = 77, + THP_FAULT_FALLBACK_CHARGE = 78, + THP_COLLAPSE_ALLOC = 79, + THP_COLLAPSE_ALLOC_FAILED = 80, + THP_FILE_ALLOC = 81, + THP_FILE_FALLBACK = 82, + THP_FILE_FALLBACK_CHARGE = 83, + THP_FILE_MAPPED = 84, + THP_SPLIT_PAGE = 85, + THP_SPLIT_PAGE_FAILED = 86, + THP_DEFERRED_SPLIT_PAGE = 87, + THP_SPLIT_PMD = 88, + THP_SCAN_EXCEED_NONE_PTE = 89, + THP_SCAN_EXCEED_SWAP_PTE = 90, + THP_SCAN_EXCEED_SHARED_PTE = 91, + THP_SPLIT_PUD = 92, + THP_ZERO_PAGE_ALLOC = 93, + THP_ZERO_PAGE_ALLOC_FAILED = 94, + THP_SWPOUT = 95, + THP_SWPOUT_FALLBACK = 96, + BALLOON_INFLATE = 97, + BALLOON_DEFLATE = 98, + BALLOON_MIGRATE = 99, + SWAP_RA = 100, + SWAP_RA_HIT = 101, + KSM_SWPIN_COPY = 102, + COW_KSM = 103, + ZSWPIN = 104, + ZSWPOUT = 105, + DIRECT_MAP_LEVEL2_SPLIT = 106, + DIRECT_MAP_LEVEL3_SPLIT = 107, + NR_VM_EVENT_ITEMS = 108, +}; + +struct cpa_data { + unsigned long *vaddr; + pgd_t *pgd; + pgprot_t mask_set; + pgprot_t mask_clr; + unsigned long numpages; + unsigned long curpage; + unsigned long pfn; + unsigned int flags; + unsigned int force_split : 1; + unsigned int force_static_prot : 1; + unsigned int force_flush_all : 1; + struct page **pages; +}; + +enum { + PAT_UC = 0, + PAT_WC = 1, + PAT_WT = 4, + PAT_WP = 5, + PAT_WB = 6, + PAT_UC_MINUS = 7, +}; + +struct memtype { + u64 start; + u64 end; + u64 subtree_max_end; + enum page_cache_mode type; + struct rb_node rb; +}; + +struct pagerange_state { + unsigned long cur_pfn; + int ram; + int not_ram; +}; + +typedef struct { + u64 val; +} pfn_t; + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *, struct rb_node *); + void (*copy)(struct rb_node *, struct rb_node *); + void (*rotate)(struct rb_node *, struct rb_node *); +}; + +enum { + MEMTYPE_EXACT_MATCH = 0, + MEMTYPE_END_MATCH = 1, +}; + +struct addr_marker { + unsigned long start_address; + const char *name; + unsigned long max_lines; +}; + +enum address_markers_idx { + USER_SPACE_NR = 0, + KERNEL_SPACE_NR = 1, + LDT_NR = 2, + LOW_KERNEL_NR = 3, + VMALLOC_START_NR = 4, + VMEMMAP_START_NR = 5, + CPU_ENTRY_AREA_NR = 6, + ESPFIX_START_NR = 7, + EFI_END_NR = 8, + HIGH_KERNEL_NR = 9, + MODULES_VADDR_NR = 10, + MODULES_END_NR = 11, + FIXADDR_START_NR = 12, + END_OF_SPACE_NR = 13, +}; + +struct ptdump_range; + +struct ptdump_state { + void (*note_page)(struct ptdump_state *, unsigned long, int, u64); + void (*effective_prot)(struct ptdump_state *, int, u64); + const struct ptdump_range *range; +}; + +struct pg_state { + struct ptdump_state ptdump; + int level; + pgprotval_t current_prot; + pgprotval_t effective_prot; + pgprotval_t prot_levels[5]; + unsigned long start_address; + const struct addr_marker *marker; + unsigned long lines; + bool to_dmesg; + bool check_wx; + unsigned long wx_pages; + struct seq_file *seq; +}; + +struct ptdump_range { + unsigned long start; + unsigned long end; +}; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[128]; +}; + +enum uv_system_type { + UV_NONE = 0, + UV_LEGACY_APIC = 1, + UV_X2APIC = 2, +}; + +struct acpi_srat_x2apic_cpu_affinity { + struct acpi_subtable_header header; + u16 reserved; + u32 proximity_domain; + u32 apic_id; + u32 flags; + u32 clock_domain; + u32 reserved2; +}; + +struct acpi_srat_cpu_affinity { + struct acpi_subtable_header header; + u8 proximity_domain_lo; + u8 apic_id; + u32 flags; + u8 local_sapic_eid; + u8 proximity_domain_hi[3]; + u32 clock_domain; +}; + +struct kaslr_memory_region { + unsigned long *base; + unsigned long size_tb; +}; + +struct rnd_state { + __u32 s1; + __u32 s2; + __u32 s3; + __u32 s4; +}; + +enum pti_mode { + PTI_AUTO = 0, + PTI_FORCE_OFF = 1, + PTI_FORCE_ON = 2, +}; + +enum pti_clone_level { + PTI_CLONE_PMD = 0, + PTI_CLONE_PTE = 1, +}; + +struct sme_populate_pgd_data { + void *pgtable_area; + pgd_t *pgd; + pmdval_t pmd_flags; + pteval_t pte_flags; + unsigned long paddr; + unsigned long vaddr; + unsigned long vaddr_end; +}; + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[64]; + unsigned int buflen; + unsigned int outlen; +}; + +struct efi_memory_map_data { + phys_addr_t phys_map; + unsigned long size; + unsigned long desc_version; + unsigned long desc_size; + unsigned long flags; +}; + +typedef struct { + u32 type; + u32 pad; + u64 phys_addr; + u64 virt_addr; + u64 num_pages; + u64 attribute; +} efi_memory_desc_t; + +struct efi_memory_map { + phys_addr_t phys_map; + void *map; + void *map_end; + int nr_map; + unsigned long desc_version; + unsigned long desc_size; + unsigned long flags; +}; + +struct efi_mem_range { + struct range range; + u64 attribute; +}; + +typedef u16 efi_char16_t; + +enum efi_rts_ids { + EFI_NONE = 0, + EFI_GET_TIME = 1, + EFI_SET_TIME = 2, + EFI_GET_WAKEUP_TIME = 3, + EFI_SET_WAKEUP_TIME = 4, + EFI_GET_VARIABLE = 5, + EFI_GET_NEXT_VARIABLE = 6, + EFI_SET_VARIABLE = 7, + EFI_QUERY_VARIABLE_INFO = 8, + EFI_GET_NEXT_HIGH_MONO_COUNT = 9, + EFI_RESET_SYSTEM = 10, + EFI_UPDATE_CAPSULE = 11, + EFI_QUERY_CAPSULE_CAPS = 12, + EFI_ACPI_PRM_HANDLER = 13, +}; + +typedef guid_t efi_guid_t; + +typedef struct { + efi_guid_t guid; + u64 table; +} efi_config_table_64_t; + +typedef unsigned long efi_status_t; + +typedef struct { + efi_guid_t guid; + unsigned long *ptr; + const char name[16]; +} efi_config_table_type_t; + +typedef struct { + u64 signature; + u32 revision; + u32 headersize; + u32 crc32; + u32 reserved; +} efi_table_hdr_t; + +typedef struct { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 pad1; + u32 nanosecond; + s16 timezone; + u8 daylight; + u8 pad2; +} efi_time_t; + +typedef struct { + u32 resolution; + u32 accuracy; + u8 sets_to_zero; +} efi_time_cap_t; + +typedef u8 efi_bool_t; + +typedef struct { + efi_guid_t guid; + u32 headersize; + u32 flags; + u32 imagesize; +} efi_capsule_header_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 get_time; + u32 set_time; + u32 get_wakeup_time; + u32 set_wakeup_time; + u32 set_virtual_address_map; + u32 convert_pointer; + u32 get_variable; + u32 get_next_variable; + u32 set_variable; + u32 get_next_high_mono_count; + u32 reset_system; + u32 update_capsule; + u32 query_capsule_caps; + u32 query_variable_info; +} efi_runtime_services_32_t; + +typedef union { + struct { + efi_table_hdr_t hdr; + efi_status_t (*get_time)(efi_time_t *, efi_time_cap_t *); + efi_status_t (*set_time)(efi_time_t *); + efi_status_t (*get_wakeup_time)(efi_bool_t *, efi_bool_t *, efi_time_t *); + efi_status_t (*set_wakeup_time)(efi_bool_t, efi_time_t *); + efi_status_t (*set_virtual_address_map)(unsigned long, unsigned long, u32, + efi_memory_desc_t *); + void *convert_pointer; + efi_status_t (*get_variable)(efi_char16_t *, efi_guid_t *, u32 *, + unsigned long *, void *); + efi_status_t (*get_next_variable)(unsigned long *, efi_char16_t *, + efi_guid_t *); + efi_status_t (*set_variable)(efi_char16_t *, efi_guid_t *, u32, + unsigned long, void *); + efi_status_t (*get_next_high_mono_count)(u32 *); + void (*reset_system)(int, efi_status_t, unsigned long, efi_char16_t *); + efi_status_t (*update_capsule)(efi_capsule_header_t **, unsigned long, + unsigned long); + efi_status_t (*query_capsule_caps)(efi_capsule_header_t **, unsigned long, + u64 *, int *); + efi_status_t (*query_variable_info)(u32, u64 *, u64 *, u64 *); + }; + efi_runtime_services_32_t mixed_mode; +} efi_runtime_services_t; + +typedef struct { + efi_guid_t guid; + u32 table; +} efi_config_table_32_t; + +typedef union { + struct { + efi_guid_t guid; + void *table; + }; + efi_config_table_32_t mixed_mode; +} efi_config_table_t; + +typedef struct { + u32 version; + u32 length; + u64 memory_protection_attribute; +} efi_properties_table_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 fw_vendor; + u32 fw_revision; + u32 con_in_handle; + u32 con_in; + u32 con_out_handle; + u32 con_out; + u32 stderr_handle; + u32 stderr; + u32 runtime; + u32 boottime; + u32 nr_tables; + u32 tables; +} efi_system_table_32_t; + +typedef struct { + efi_table_hdr_t hdr; + u64 fw_vendor; + u32 fw_revision; + u32 __pad1; + u64 con_in_handle; + u64 con_in; + u64 con_out_handle; + u64 con_out; + u64 stderr_handle; + u64 stderr; + u64 runtime; + u64 boottime; + u32 nr_tables; + u32 __pad2; + u64 tables; +} efi_system_table_64_t; + +union efi_simple_text_input_protocol; + +typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; + +union efi_simple_text_output_protocol; + +typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; + +union efi_boot_services; + +typedef union efi_boot_services efi_boot_services_t; + +typedef union { + struct { + efi_table_hdr_t hdr; + unsigned long fw_vendor; + u32 fw_revision; + unsigned long con_in_handle; + efi_simple_text_input_protocol_t *con_in; + unsigned long con_out_handle; + efi_simple_text_output_protocol_t *con_out; + unsigned long stderr_handle; + unsigned long stderr; + efi_runtime_services_t *runtime; + efi_boot_services_t *boottime; + unsigned long nr_tables; + unsigned long tables; + }; + efi_system_table_32_t mixed_mode; +} efi_system_table_t; + +typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool); + +typedef u16 ucs2_char_t; + +struct efi_runtime_map_entry { + efi_memory_desc_t md; + struct kobject kobj; +}; + +struct map_attribute { + struct attribute attr; + ssize_t (*show)(struct efi_runtime_map_entry *, char *); +}; + +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +struct hotplug_slot_ops; + +struct hotplug_slot { + const struct hotplug_slot_ops *ops; + struct list_head slot_list; + struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; +}; + +struct hotplug_slot_ops { + int (*enable_slot)(struct hotplug_slot *); + int (*disable_slot)(struct hotplug_slot *); + int (*set_attention_status)(struct hotplug_slot *, u8); + int (*hardware_test)(struct hotplug_slot *, u32); + int (*get_power_status)(struct hotplug_slot *, u8 *); + int (*get_attention_status)(struct hotplug_slot *, u8 *); + int (*get_latch_status)(struct hotplug_slot *, u8 *); + int (*get_adapter_status)(struct hotplug_slot *, u8 *); + int (*reset_slot)(struct hotplug_slot *, bool); +}; + +struct rcec_ea { + u8 nextbusn; + u8 lastbusn; + u32 bitmap; +}; + +struct pci_sriov { + int pos; + int nres; + u32 cap; + u16 ctrl; + u16 total_VFs; + u16 initial_VFs; + u16 num_VFs; + u16 offset; + u16 stride; + u16 vf_device; + u32 pgsz; + u8 link; + u8 max_VF_buses; + u16 driver_max_VFs; + struct pci_dev *dev; + struct pci_dev *self; + u32 class; + u8 hdr_type; + u16 subsystem_vendor; + u16 subsystem_device; + resource_size_t barsz[6]; + bool drivers_autoprobe; +}; + +enum bpf_text_poke_type { + BPF_MOD_CALL = 0, + BPF_MOD_JUMP = 1, +}; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, + MAX_BPF_LINK_TYPE = 14, +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY = 0, + BPF_TRAMP_FEXIT = 1, + BPF_TRAMP_MODIFY_RETURN = 2, + BPF_TRAMP_MAX = 3, + BPF_TRAMP_REPLACE = 4, +}; + +enum { + BPF_REG_0 = 0, + BPF_REG_1 = 1, + BPF_REG_2 = 2, + BPF_REG_3 = 3, + BPF_REG_4 = 4, + BPF_REG_5 = 5, + BPF_REG_6 = 6, + BPF_REG_7 = 7, + BPF_REG_8 = 8, + BPF_REG_9 = 9, + BPF_REG_10 = 10, + __MAX_BPF_REG = 11, +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL = 0, +}; + +struct bpf_array_aux; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + union { + struct { + struct { + } __empty_value; + char value[0]; + }; + struct { + struct { + } __empty_ptrs; + void *ptrs[0]; + }; + struct { + struct { + } __empty_pptrs; + void __attribute__((btf_type_tag("percpu"))) * pptrs[0]; + }; + }; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_array_aux { + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_tramp_link; + +struct bpf_tramp_links { + struct bpf_tramp_link *links[38]; + int nr_links; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_tramp_link { + struct bpf_link link; + struct hlist_node tramp_hlist; + u64 cookie; +}; + +struct bpf_link_info; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); + int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; + __u32 target_btf_id; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + union { + struct { + __u64 cgroup_id; + __u32 order; + } cgroup; + struct { + __u32 tid; + __u32 pid; + } task; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + struct { + __u32 map_id; + } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + __u64 addrs; + __u32 count; + __u32 flags; + __u64 missed; + } kprobe_multi; + struct { + __u32 type; + union { + struct { + __u64 file_name; + __u32 name_len; + __u32 offset; + } uprobe; + struct { + __u64 func_name; + __u32 name_len; + __u32 offset; + __u64 addr; + __u64 missed; + } kprobe; + struct { + __u64 tp_name; + __u32 name_len; + } tracepoint; + struct { + __u64 config; + __u32 type; + } event; + }; + } perf_event; + struct { + __u32 ifindex; + __u32 attach_type; + } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; + }; +}; + +struct jit_context { + int cleanup_addr; + int tail_call_direct_label; + int tail_call_indirect_label; +}; + +struct bpf_binary_header { + u32 size; + long : 0; + u8 image[0]; +}; + +typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); + +struct bpf_tramp_run_ctx; + +typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *, struct bpf_tramp_run_ctx *); + +struct bpf_tramp_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + struct bpf_run_ctx *saved_run_ctx; +}; + +typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *, u64, struct bpf_tramp_run_ctx *); + +struct x64_jit_data { + struct bpf_binary_header *rw_header; + struct bpf_binary_header *header; + int *addrs; + u8 *image; + int proglen; + struct jit_context ctx; +}; + +typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, unsigned long); + +typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); + +enum { + MM_FILEPAGES = 0, + MM_ANONPAGES = 1, + MM_SWAPENTS = 2, + MM_SHMEMPAGES = 3, + NR_MM_COUNTERS = 4, +}; + +enum ucount_type { + UCOUNT_USER_NAMESPACES = 0, + UCOUNT_PID_NAMESPACES = 1, + UCOUNT_UTS_NAMESPACES = 2, + UCOUNT_IPC_NAMESPACES = 3, + UCOUNT_NET_NAMESPACES = 4, + UCOUNT_MNT_NAMESPACES = 5, + UCOUNT_CGROUP_NAMESPACES = 6, + UCOUNT_TIME_NAMESPACES = 7, + UCOUNT_INOTIFY_INSTANCES = 8, + UCOUNT_INOTIFY_WATCHES = 9, + UCOUNT_FANOTIFY_GROUPS = 10, + UCOUNT_FANOTIFY_MARKS = 11, + UCOUNT_COUNTS = 12, +}; + +enum rlimit_type { + UCOUNT_RLIMIT_NPROC = 0, + UCOUNT_RLIMIT_MSGQUEUE = 1, + UCOUNT_RLIMIT_SIGPENDING = 2, + UCOUNT_RLIMIT_MEMLOCK = 3, + UCOUNT_RLIMIT_COUNTS = 4, +}; + +enum { + TASK_COMM_LEN = 16, +}; + +enum mm_cid_state { + MM_CID_UNSET = 4294967295, + MM_CID_LAZY_PUT = 2147483648, +}; + +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, +}; + +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0, + HRTIMER_MODE_REL = 1, + HRTIMER_MODE_PINNED = 2, + HRTIMER_MODE_SOFT = 4, + HRTIMER_MODE_HARD = 8, + HRTIMER_MODE_ABS_PINNED = 2, + HRTIMER_MODE_REL_PINNED = 3, + HRTIMER_MODE_ABS_SOFT = 4, + HRTIMER_MODE_REL_SOFT = 5, + HRTIMER_MODE_ABS_PINNED_SOFT = 6, + HRTIMER_MODE_REL_PINNED_SOFT = 7, + HRTIMER_MODE_ABS_HARD = 8, + HRTIMER_MODE_REL_HARD = 9, + HRTIMER_MODE_ABS_PINNED_HARD = 10, + HRTIMER_MODE_REL_PINNED_HARD = 11, +}; + +enum { + FUTEX_STATE_OK = 0, + FUTEX_STATE_EXITING = 1, + FUTEX_STATE_DEAD = 2, +}; + +enum tk_offsets { + TK_OFFS_REAL = 0, + TK_OFFS_BOOT = 1, + TK_OFFS_TAI = 2, + TK_OFFS_MAX = 3, +}; + +struct trace_event_raw_task_newtask { + struct trace_entry ent; + pid_t pid; + char comm[16]; + unsigned long clone_flags; + short oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_task_rename { + struct trace_entry ent; + pid_t pid; + char oldcomm[16]; + char newcomm[16]; + short oom_score_adj; + char __data[0]; +}; + +struct vm_stack { + struct callback_head rcu; + struct vm_struct *stack_vm_area; +}; + +struct clone_args { + __u64 flags; + __u64 pidfd; + __u64 child_tid; + __u64 parent_tid; + __u64 exit_signal; + __u64 stack; + __u64 stack_size; + __u64 tls; + __u64 set_tid; + __u64 set_tid_size; + __u64 cgroup; +}; + +typedef void (*rcu_callback_t)(struct callback_head *); + +typedef struct poll_table_struct poll_table; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; + struct dentry *proc_thread_self; + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; +}; + +struct trace_event_data_offsets_task_newtask {}; + +struct trace_event_data_offsets_task_rename {}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +typedef int (*proc_visitor)(struct task_struct *, void *); + +struct task_group { + struct cgroup_subsys_state css; + struct callback_head rcu; + struct list_head list; + struct task_group *parent; + struct list_head siblings; + struct list_head children; + unsigned long shares; +}; + +struct io_ring_ctx; + +struct io_wq; + +struct io_uring_task { + int cached_refs; + const struct io_ring_ctx *last; + struct io_wq *io_wq; + struct file *registered_rings[16]; + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_cancel; + atomic_t inflight_tracked; + struct percpu_counter inflight; + long : 64; + long : 64; + struct { + struct llist_head task_list; + struct callback_head task_work; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + }; +}; + +struct io_fixed_file; + +struct io_file_table { + struct io_fixed_file *files; + unsigned long *bitmap; + unsigned int alloc_hint; +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_kiocb; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + struct io_wq_work_node free_list; + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + bool plug_started; + bool need_plug; + unsigned short submit_nr; + unsigned int cqes_count; + struct blk_plug plug; +}; + +struct io_hash_bucket; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned int hash_bits; +}; + +struct io_alloc_cache { + struct io_wq_work_node list; + unsigned int nr_cached; + unsigned int max_cached; + size_t elem_size; +}; + +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; + __u64 big_cqe[0]; +}; + +struct io_restriction { + unsigned long register_op[1]; + unsigned long sqe_op[1]; + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_rings; + +struct io_rsrc_node; + +struct io_mapped_ubuf; + +struct io_buffer_list; + +struct io_ev_fd; + +struct io_sq_data; + +struct io_rsrc_data; + +struct io_wq_hash; + +struct io_ring_ctx { + struct { + unsigned int flags; + unsigned int drain_next : 1; + unsigned int restricted : 1; + unsigned int off_timeout_used : 1; + unsigned int drain_active : 1; + unsigned int has_evfd : 1; + unsigned int task_complete : 1; + unsigned int lockless_cq : 1; + unsigned int syscall_iopoll : 1; + unsigned int poll_activated : 1; + unsigned int drain_disabled : 1; + unsigned int compat : 1; + struct task_struct *submitter_task; + struct io_rings *rings; + struct percpu_ref refs; + enum task_work_notify_mode notify_method; + long : 64; + long : 64; + }; + struct { + struct mutex uring_lock; + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned int cached_sq_head; + unsigned int sq_entries; + struct io_rsrc_node *rsrc_node; + atomic_t cancel_seq; + struct io_file_table file_table; + unsigned int nr_user_files; + unsigned int nr_user_bufs; + struct io_mapped_ubuf **user_bufs; + struct io_submit_state submit_state; + struct io_buffer_list *io_bl; + struct xarray io_bl_xa; + struct io_hash_table cancel_table_locked; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + struct io_wq_work_list iopoll_list; + bool poll_multi_queue; + struct hlist_head cancelable_uring_cmd; + }; + struct { + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + unsigned int cached_cq_tail; + unsigned int cq_entries; + struct io_ev_fd __attribute__((btf_type_tag("rcu"))) * io_ev_fd; + unsigned int cq_extra; + long : 64; + long : 64; + long : 64; + }; + struct { + struct llist_head work_llist; + unsigned long check_cq; + atomic_t cq_wait_nr; + atomic_t cq_timeouts; + struct wait_queue_head cq_wait; + long : 64; + long : 64; + }; + struct { + spinlock_t timeout_lock; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned int cq_last_tm_flush; + long : 64; + long : 64; + }; + struct io_uring_cqe completion_cqes[16]; + spinlock_t completion_lock; + struct io_wq_work_list locked_free_list; + unsigned int locked_free_nr; + struct list_head io_buffers_comp; + struct list_head cq_overflow_list; + struct io_hash_table cancel_table; + struct hlist_head waitid_list; + struct hlist_head futex_list; + struct io_alloc_cache futex_cache; + const struct cred *sq_creds; + struct io_sq_data *sq_data; + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + unsigned int file_alloc_start; + unsigned int file_alloc_end; + struct xarray personalities; + u32 pers_next; + struct list_head io_buffers_cache; + struct hlist_head io_buf_list; + struct wait_queue_head poll_wq; + struct io_restriction restrictions; + struct io_mapped_ubuf *dummy_ubuf; + struct io_rsrc_data *file_data; + struct io_rsrc_data *buf_data; + struct list_head rsrc_ref_list; + struct io_alloc_cache rsrc_node_cache; + struct wait_queue_head rsrc_quiesce_wq; + unsigned int rsrc_quiesce; + struct socket *ring_sock; + struct io_wq_hash *hash_map; + struct user_struct *user; + struct mm_struct *mm_account; + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + u32 iowq_limits[2]; + bool iowq_limits_set; + struct callback_head poll_wq_task_work; + struct list_head defer_list; + unsigned int sq_thread_idle; + unsigned int evfd_last_cq_tail; + unsigned short n_ring_pages; + unsigned short n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct io_uring { + u32 head; + u32 tail; +}; + +struct io_rings { + struct io_uring sq; + struct io_uring cq; + u32 sq_ring_mask; + u32 cq_ring_mask; + u32 sq_ring_entries; + u32 cq_ring_entries; + u32 sq_dropped; + atomic_t sq_flags; + u32 cq_flags; + u32 cq_overflow; + long : 64; + long : 64; + struct io_uring_cqe cqes[0]; +}; + +struct io_fixed_file { + unsigned long file_ptr; +}; + +struct io_cmd_data { + struct file *file; + __u8 data[56]; +}; + +struct io_cqe { + __u64 user_data; + __s32 res; + union { + __u32 flags; + int fd; + }; +}; + +struct io_tw_state; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *, struct io_tw_state *); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_wq_work { + struct io_wq_work_node list; + unsigned int flags; + int cancel_seq; +}; + +struct io_buffer; + +struct async_poll; + +struct io_kiocb { + union { + struct file *file; + struct io_cmd_data cmd; + }; + u8 opcode; + u8 iopoll_completed; + u16 buf_index; + unsigned int flags; + struct io_cqe cqe; + struct io_ring_ctx *ctx; + struct task_struct *task; + struct io_rsrc_node *rsrc_node; + union { + struct io_mapped_ubuf *imu; + struct io_buffer *kbuf; + struct io_buffer_list *buf_list; + }; + union { + struct io_wq_work_node comp_list; + __poll_t apoll_events; + }; + atomic_t refs; + atomic_t poll_refs; + struct io_task_work io_task_work; + unsigned int nr_tw; + struct hlist_node hash_node; + struct async_poll *apoll; + void *async_data; + struct io_kiocb *link; + const struct cred *creds; + struct io_wq_work work; + struct { + u64 extra1; + u64 extra2; + } big_cqe; +}; + +struct io_tw_state { + bool locked; +}; + +struct io_hash_bucket { + spinlock_t lock; + struct hlist_head list; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async : 1; + struct callback_head rcu; + atomic_t refs; + atomic_t ops; +}; + +struct io_wq_hash { + refcount_t refs; + unsigned long map; + struct wait_queue_head wait; +}; + +struct key_preparsed_payload { + const char *orig_description; + char *description; + union key_payload payload; + const void *data; + size_t datalen; + size_t quotalen; + time64_t expiry; +}; + +struct key_match_data { + bool (*cmp)(const struct key *, const struct key_match_data *); + const void *raw_data; + void *preparsed; + unsigned int lookup_type; +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt = 0, + kernel_pkey_decrypt = 1, + kernel_pkey_sign = 2, + kernel_pkey_verify = 3, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op : 8; +}; + +struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block __attribute__((btf_type_tag("rcu"))) * head; +}; + +struct taint_flag { + char c_true; + char c_false; + bool module; +}; + +enum con_flush_mode { + CONSOLE_FLUSH_PENDING = 0, + CONSOLE_REPLAY_ALL = 1, +}; + +enum error_detector { + ERROR_DETECTOR_KFENCE = 0, + ERROR_DETECTOR_KASAN = 1, + ERROR_DETECTOR_WARN = 2, +}; + +enum ftrace_dump_mode { + DUMP_NONE = 0, + DUMP_ALL = 1, + DUMP_ORIG = 2, +}; + +enum ctx_state { + CONTEXT_DISABLED = -1, + CONTEXT_KERNEL = 0, + CONTEXT_IDLE = 1, + CONTEXT_USER = 2, + CONTEXT_GUEST = 3, + CONTEXT_MAX = 4, +}; + +struct warn_args { + const char *fmt; + va_list args; +}; + +typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); + +typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, + int (*)(unsigned int, struct hlist_node *), + struct hlist_node *); + +typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); + +struct cpuhp_cpu_state { + enum cpuhp_state state; + enum cpuhp_state target; + enum cpuhp_state fail; + struct task_struct *thread; + bool should_run; + bool rollback; + bool single; + bool bringup; + struct hlist_node *node; + struct hlist_node *last; + enum cpuhp_state cb_state; + int result; + atomic_t ap_sync_state; + struct completion done_up; + struct completion done_down; +}; + +struct smp_hotplug_thread { + struct task_struct *__attribute__((btf_type_tag("percpu"))) * store; + struct list_head list; + int (*thread_should_run)(unsigned int); + void (*thread_fn)(unsigned int); + void (*create)(unsigned int); + void (*setup)(unsigned int); + void (*cleanup)(unsigned int, bool); + void (*park)(unsigned int); + void (*unpark)(unsigned int); + bool selfparking; + const char *thread_comm; +}; + +struct cpuhp_step { + const char *name; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } startup; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } teardown; + struct hlist_head list; + bool cant_stop; + bool multi_instance; +}; + +enum cpu_mitigations { + CPU_MITIGATIONS_OFF = 0, + CPU_MITIGATIONS_AUTO = 1, + CPU_MITIGATIONS_AUTO_NOSMT = 2, +}; + +enum cpuhp_sync_state { + SYNC_STATE_DEAD = 0, + SYNC_STATE_KICKED = 1, + SYNC_STATE_SHOULD_DIE = 2, + SYNC_STATE_ALIVE = 3, + SYNC_STATE_SHOULD_ONLINE = 4, + SYNC_STATE_ONLINE = 5, +}; + +enum hk_type { + HK_TYPE_TIMER = 0, + HK_TYPE_RCU = 1, + HK_TYPE_MISC = 2, + HK_TYPE_SCHED = 3, + HK_TYPE_TICK = 4, + HK_TYPE_DOMAIN = 5, + HK_TYPE_WQ = 6, + HK_TYPE_MANAGED_IRQ = 7, + HK_TYPE_KTHREAD = 8, + HK_TYPE_MAX = 9, +}; + +struct trace_event_raw_cpuhp_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_multi_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_exit { + struct trace_entry ent; + unsigned int cpu; + int state; + int idx; + int ret; + char __data[0]; +}; + +struct cpu_down_work { + unsigned int cpu; + enum cpuhp_state target; +}; + +struct trace_event_data_offsets_cpuhp_enter {}; + +struct trace_event_data_offsets_cpuhp_multi_enter {}; + +struct trace_event_data_offsets_cpuhp_exit {}; + +struct pipe_buffer; + +struct watch_queue; + +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait; + wait_queue_head_t wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + bool poll_usage; + bool note_loss; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; + struct watch_queue *watch_queue; +}; + +struct pipe_buf_operations; + +struct pipe_buffer { + struct page *page; + unsigned int offset; + unsigned int len; + const struct pipe_buf_operations *ops; + unsigned int flags; + unsigned long private; +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +struct waitid_info; + +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + struct waitid_info *wo_info; + int wo_stat; + struct rusage *wo_rusage; + wait_queue_entry_t child_wait; + int notask_error; +}; + +struct waitid_info { + pid_t pid; + uid_t uid; + int status; + int cause; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct compat_rusage { + struct old_timeval32 ru_utime; + struct old_timeval32 ru_stime; + compat_long_t ru_maxrss; + compat_long_t ru_ixrss; + compat_long_t ru_idrss; + compat_long_t ru_isrss; + compat_long_t ru_minflt; + compat_long_t ru_majflt; + compat_long_t ru_nswap; + compat_long_t ru_inblock; + compat_long_t ru_oublock; + compat_long_t ru_msgsnd; + compat_long_t ru_msgrcv; + compat_long_t ru_nsignals; + compat_long_t ru_nvcsw; + compat_long_t ru_nivcsw; +}; + +typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); + +typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); + +typedef void (*btf_trace_softirq_entry)(void *, unsigned int); + +typedef void (*btf_trace_softirq_exit)(void *, unsigned int); + +typedef void (*btf_trace_softirq_raise)(void *, unsigned int); + +struct tasklet_struct; + +typedef void (*btf_trace_tasklet_entry)(void *, struct tasklet_struct *, void *); + +struct tasklet_struct { + struct tasklet_struct *next; + unsigned long state; + atomic_t count; + bool use_callback; + union { + void (*func)(unsigned long); + void (*callback)(struct tasklet_struct *); + }; + unsigned long data; +}; + +typedef void (*btf_trace_tasklet_exit)(void *, struct tasklet_struct *, void *); + +struct softirq_action { + void (*action)(struct softirq_action *); +}; + +struct tasklet_head { + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +struct trace_print_flags { + unsigned long mask; + const char *name; +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +enum { + TASKLET_STATE_SCHED = 0, + TASKLET_STATE_RUN = 1, +}; + +struct trace_event_raw_irq_handler_entry { + struct trace_entry ent; + int irq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_irq_handler_exit { + struct trace_entry ent; + int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_softirq { + struct trace_entry ent; + unsigned int vec; + char __data[0]; +}; + +struct trace_event_raw_tasklet { + struct trace_entry ent; + void *tasklet; + void *func; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_handler_entry { + u32 name; +}; + +struct wait_bit_key { + void *flags; + int bit_nr; + unsigned long timeout; +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + +struct trace_event_data_offsets_irq_handler_exit {}; + +struct trace_event_data_offsets_softirq {}; + +struct trace_event_data_offsets_tasklet {}; + +enum { + MAX_IORES_LEVEL = 5, +}; + +struct resource_entry { + struct list_head node; + struct resource *res; + resource_size_t offset; + struct resource __res; +}; + +struct resource_constraint { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, + resource_size_t); + void *alignf_data; +}; + +typedef void (*dr_release_t)(struct device *, void *); + +typedef int (*dr_match_t)(struct device *, void *, void *); + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler *const *xattr; + const struct dentry_operations *dops; + unsigned long magic; +}; + +struct region_devres { + struct resource *parent; + resource_size_t start; + resource_size_t n; +}; + +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = -1, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; + +struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +}; + +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +typedef __kernel_clock_t clock_t; + +struct __user_cap_header_struct; + +typedef struct __user_cap_header_struct *cap_user_header_t; + +struct __user_cap_header_struct { + __u32 version; + int pid; +}; + +struct __user_cap_data_struct; + +typedef struct __user_cap_data_struct __attribute__((btf_type_tag("user"))) * cap_user_data_t; + +struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +}; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct ucounts *ucounts; +}; + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + +struct ptrace_syscall_info { + __u8 op; + __u8 pad[3]; + __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + } seccomp; + }; +}; + +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + +typedef int wait_bit_action_f(struct wait_bit_key *, int); + +struct core_vma_metadata; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct file *file; + unsigned long limit; + unsigned long mm_flags; + int cpu; + loff_t written; + loff_t pos; + loff_t to_skip; + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; +}; + +struct core_vma_metadata { + unsigned long start; + unsigned long end; + unsigned long flags; + unsigned long dump_size; + unsigned long pgoff; + struct file *file; +}; + +typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, + struct task_struct *, int, int); + +typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, + struct k_sigaction *); + +enum sig_handler { + HANDLER_CURRENT = 0, + HANDLER_SIG_DFL = 1, + HANDLER_EXIT = 2, +}; + +enum { + TRACE_SIGNAL_DELIVERED = 0, + TRACE_SIGNAL_IGNORED = 1, + TRACE_SIGNAL_ALREADY_PENDING = 2, + TRACE_SIGNAL_OVERFLOW_FAIL = 3, + TRACE_SIGNAL_LOSE_INFO = 4, +}; + +enum siginfo_layout { + SIL_KILL = 0, + SIL_TIMER = 1, + SIL_POLL = 2, + SIL_FAULT = 3, + SIL_FAULT_TRAPNO = 4, + SIL_FAULT_MCEERR = 5, + SIL_FAULT_BNDERR = 6, + SIL_FAULT_PKUERR = 7, + SIL_FAULT_PERF_EVENT = 8, + SIL_CHLD = 9, + SIL_RT = 10, + SIL_SYS = 11, +}; + +struct trace_event_raw_signal_generate { + struct trace_entry ent; + int sig; + int errno; + int code; + char comm[16]; + pid_t pid; + int group; + int result; + char __data[0]; +}; + +struct trace_event_raw_signal_deliver { + struct trace_entry ent; + int sig; + int errno; + int code; + unsigned long sa_handler; + unsigned long sa_flags; + char __data[0]; +}; + +typedef unsigned long old_sigset_t; + +typedef u32 compat_old_sigset_t; + +struct compat_sigaction { + compat_uptr_t sa_handler; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; + compat_sigset_t sa_mask; +}; + +struct compat_old_sigaction { + compat_uptr_t sa_handler; + compat_old_sigset_t sa_mask; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; +}; + +struct fd { + struct file *file; + unsigned int flags; +}; + +struct trace_event_data_offsets_signal_generate {}; + +struct trace_event_data_offsets_signal_deliver {}; + +enum { + PER_LINUX = 0, + PER_LINUX_32BIT = 8388608, + PER_LINUX_FDPIC = 524288, + PER_SVR4 = 68157441, + PER_SVR3 = 83886082, + PER_SCOSVR3 = 117440515, + PER_OSR5 = 100663299, + PER_WYSEV386 = 83886084, + PER_ISCR4 = 67108869, + PER_BSD = 6, + PER_SUNOS = 67108870, + PER_XENIX = 83886087, + PER_LINUX32 = 8, + PER_LINUX32_3GB = 134217736, + PER_IRIX32 = 67108873, + PER_IRIXN32 = 67108874, + PER_IRIX64 = 67108875, + PER_RISCOS = 12, + PER_SOLARIS = 67108877, + PER_UW7 = 68157454, + PER_OSF4 = 15, + PER_HPUX = 16, + PER_MASK = 255, +}; + +enum uts_proc { + UTS_PROC_ARCH = 0, + UTS_PROC_OSTYPE = 1, + UTS_PROC_OSRELEASE = 2, + UTS_PROC_VERSION = 3, + UTS_PROC_HOSTNAME = 4, + UTS_PROC_DOMAINNAME = 5, +}; + +enum pkc_action { + PKSM_ENABLE = 0, + PKSM_DISABLE = 1, + PKSM_STATUS = 2, +}; + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +struct compat_tms { + compat_clock_t tms_utime; + compat_clock_t tms_stime; + compat_clock_t tms_cutime; + compat_clock_t tms_cstime; +}; + +struct old_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; +}; + +struct oldold_utsname { + char sysname[9]; + char nodename[9]; + char release[9]; + char version[9]; + char machine[9]; +}; + +struct compat_rlimit { + compat_ulong_t rlim_cur; + compat_ulong_t rlim_max; +}; + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +struct getcpu_cache { + unsigned long blob[16]; +}; + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[0]; +}; + +struct compat_sysinfo { + s32 uptime; + u32 loads[3]; + u32 totalram; + u32 freeram; + u32 sharedram; + u32 bufferram; + u32 totalswap; + u32 freeswap; + u16 procs; + u16 pad; + u32 totalhigh; + u32 freehigh; + u32 mem_unit; + char _f[8]; +}; + +struct prctl_mm_map { + __u64 start_code; + __u64 end_code; + __u64 start_data; + __u64 end_data; + __u64 start_brk; + __u64 brk; + __u64 start_stack; + __u64 arg_start; + __u64 arg_end; + __u64 env_start; + __u64 env_end; + __u64 *auxv; + __u32 auxv_size; + __u32 exe_fd; +}; + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *, struct cred *); + void (*cleanup)(struct subprocess_info *); + void *data; +}; + +struct wq_flusher; + +struct worker; + +struct workqueue_attrs; + +struct pool_workqueue; + +struct wq_device; + +struct workqueue_struct { + struct list_head pwqs; + struct list_head list; + struct mutex mutex; + int work_color; + int flush_color; + atomic_t nr_pwqs_to_flush; + struct wq_flusher *first_flusher; + struct list_head flusher_queue; + struct list_head flusher_overflow; + struct list_head maydays; + struct worker *rescuer; + int nr_drainers; + int saved_max_active; + struct workqueue_attrs *unbound_attrs; + struct pool_workqueue *dfl_pwq; + struct wq_device *wq_dev; + char name[24]; + struct callback_head rcu; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + unsigned int flags; + struct pool_workqueue __attribute__((btf_type_tag("percpu"))) + __attribute__((btf_type_tag("rcu"))) * + *cpu_pwq; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct wq_flusher { + struct list_head list; + int flush_color; + struct completion done; +}; + +struct worker_pool; + +struct worker { + union { + struct list_head entry; + struct hlist_node hentry; + }; + struct work_struct *current_work; + work_func_t current_func; + struct pool_workqueue *current_pwq; + u64 current_at; + unsigned int current_color; + int sleeping; + work_func_t last_func; + struct list_head scheduled; + struct task_struct *task; + struct worker_pool *pool; + struct list_head node; + unsigned long last_active; + unsigned int flags; + int id; + char desc[24]; + struct workqueue_struct *rescue_wq; +}; + +struct kthread_work; + +typedef void (*kthread_work_func_t)(struct kthread_work *); + +struct kthread_worker; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + int canceling; +}; + +struct pool_workqueue { + struct worker_pool *pool; + struct workqueue_struct *wq; + int work_color; + int flush_color; + int refcnt; + int nr_in_flight[16]; + int nr_active; + int max_active; + struct list_head inactive_works; + struct list_head pwqs_node; + struct list_head mayday_node; + u64 stats[8]; + struct kthread_work release_work; + struct callback_head rcu; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct ida { + struct xarray xa; +}; + +struct worker_pool { + raw_spinlock_t lock; + int cpu; + int node; + int id; + unsigned int flags; + unsigned long watchdog_ts; + bool cpu_stall; + int nr_running; + struct list_head worklist; + int nr_workers; + int nr_idle; + struct list_head idle_list; + struct timer_list idle_timer; + struct work_struct idle_cull_work; + struct timer_list mayday_timer; + struct hlist_head busy_hash[64]; + struct worker *manager; + struct list_head workers; + struct list_head dying_workers; + struct completion *detach_completion; + struct ida worker_ida; + struct workqueue_attrs *attrs; + struct hlist_node hash_node; + int refcnt; + struct callback_head rcu; +}; + +enum wq_affn_scope { + WQ_AFFN_DFL = 0, + WQ_AFFN_CPU = 1, + WQ_AFFN_SMT = 2, + WQ_AFFN_CACHE = 3, + WQ_AFFN_NUMA = 4, + WQ_AFFN_SYSTEM = 5, + WQ_AFFN_NR_TYPES = 6, +}; + +struct workqueue_attrs { + int nice; + cpumask_var_t cpumask; + cpumask_var_t __pod_cpumask; + bool affn_strict; + enum wq_affn_scope affn_scope; + bool ordered; +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct wq_device { + struct workqueue_struct *wq; + struct device dev; +}; + +typedef void (*btf_trace_workqueue_queue_work)(void *, int, struct pool_workqueue *, + struct work_struct *); + +typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); + +struct wq_pod_type { + int nr_pods; + cpumask_var_t *pod_cpus; + int *pod_node; + int *cpu_pod; +}; + +struct wci_ent { + work_func_t func; + atomic64_t cnt; + struct hlist_node hash_node; +}; + +enum { + POOL_MANAGER_ACTIVE = 1, + POOL_DISASSOCIATED = 4, + WORKER_DIE = 2, + WORKER_IDLE = 4, + WORKER_PREP = 8, + WORKER_CPU_INTENSIVE = 64, + WORKER_UNBOUND = 128, + WORKER_REBOUND = 256, + WORKER_NOT_RUNNING = 456, + NR_STD_WORKER_POOLS = 2, + UNBOUND_POOL_HASH_ORDER = 6, + BUSY_WORKER_HASH_ORDER = 6, + MAX_IDLE_WORKERS_RATIO = 4, + IDLE_WORKER_TIMEOUT = 300000, + MAYDAY_INITIAL_TIMEOUT = 10, + MAYDAY_INTERVAL = 100, + CREATE_COOLDOWN = 1000, + RESCUER_NICE_LEVEL = -20, + HIGHPRI_NICE_LEVEL = -20, + WQ_NAME_LEN = 24, +}; + +enum pool_workqueue_stats { + PWQ_STAT_STARTED = 0, + PWQ_STAT_COMPLETED = 1, + PWQ_STAT_CPU_TIME = 2, + PWQ_STAT_CPU_INTENSIVE = 3, + PWQ_STAT_CM_WAKEUP = 4, + PWQ_STAT_REPATRIATED = 5, + PWQ_STAT_MAYDAY = 6, + PWQ_STAT_RESCUED = 7, + PWQ_NR_STATS = 8, +}; + +enum { + WQ_UNBOUND = 2, + WQ_FREEZABLE = 4, + WQ_MEM_RECLAIM = 8, + WQ_HIGHPRI = 16, + WQ_CPU_INTENSIVE = 32, + WQ_SYSFS = 64, + WQ_POWER_EFFICIENT = 128, + __WQ_DESTROYING = 32768, + __WQ_DRAINING = 65536, + __WQ_ORDERED = 131072, + __WQ_LEGACY = 262144, + __WQ_ORDERED_EXPLICIT = 524288, + WQ_MAX_ACTIVE = 512, + WQ_UNBOUND_MAX_ACTIVE = 512, + WQ_DFL_ACTIVE = 256, +}; + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +struct trace_event_raw_workqueue_queue_work { + struct trace_entry ent; + void *work; + void *function; + u32 __data_loc_workqueue; + int req_cpu; + int cpu; + char __data[0]; +}; + +struct trace_event_raw_workqueue_activate_work { + struct trace_entry ent; + void *work; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct wq_barrier { + struct work_struct work; + struct completion done; + struct task_struct *task; +}; + +struct cwt_wait { + wait_queue_entry_t wait; + struct work_struct *work; +}; + +struct __una_u32 { + u32 x; +}; + +struct work_for_cpu { + struct work_struct work; + long (*fn)(void *); + void *arg; + long ret; +}; + +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; + struct workqueue_attrs *attrs; + struct list_head list; + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[0]; +}; + +typedef unsigned int xa_mark_t; + +struct trace_event_data_offsets_workqueue_queue_work { + u32 workqueue; +}; + +struct pr_cont_work_struct { + bool comma; + work_func_t func; + long ctr; +}; + +struct trace_event_data_offsets_workqueue_activate_work {}; + +struct trace_event_data_offsets_workqueue_execute_start {}; + +struct trace_event_data_offsets_workqueue_execute_end {}; + +struct execute_work { + struct work_struct work; +}; + +typedef void (*task_work_func_t)(struct callback_head *); + +struct param_attribute { + struct module_attribute mattr; + const struct kernel_param *param; +}; + +struct module_param_attrs { + unsigned int num; + struct attribute_group grp; + struct param_attribute attrs[0]; +}; + +enum { + KERNEL_PARAM_OPS_FL_NOARG = 1, +}; + +enum { + KERNEL_PARAM_FL_UNSAFE = 1, + KERNEL_PARAM_FL_HWPARAM = 2, +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +}; + +struct kmalloced_param { + struct list_head list; + char val[0]; +}; + +struct sched_param { + int sched_priority; +}; + +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP = 1, + KTHREAD_SHOULD_PARK = 2, +}; + +enum { + KTW_FREEZABLE = 1, +}; + +enum { + CSS_NO_REF = 1, + CSS_ONLINE = 2, + CSS_RELEASED = 4, + CSS_VISIBLE = 8, + CSS_DYING = 16, +}; + +enum { + __PERCPU_REF_ATOMIC = 1, + __PERCPU_REF_DEAD = 2, + __PERCPU_REF_ATOMIC_DEAD = 3, + __PERCPU_REF_FLAG_BITS = 2, +}; + +struct kthread_create_info { + char *full_name; + int (*threadfn)(void *); + void *data; + int node; + struct task_struct *result; + struct completion *done; + struct list_head list; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +struct kthread { + unsigned long flags; + unsigned int cpu; + int result; + int (*threadfn)(void *); + void *data; + struct completion parked; + struct completion exited; + struct cgroup_subsys_state *blkcg_css; + char *full_name; +}; + +struct ipc_ids { + int in_use; + unsigned short seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; + int last_idx; + int next_id; + struct rhashtable key_ht; +}; + +struct ipc_namespace { + struct ipc_ids ids[3]; + int sem_ctls[4]; + int used_sems; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + struct percpu_counter percpu_msg_bytes; + struct percpu_counter percpu_msg_hdrs; + size_t shm_ctlmax; + size_t shm_ctlall; + unsigned long shm_tot; + int shm_ctlmni; + int shm_rmid_forced; + struct notifier_block ipcns_nb; + struct vfsmount *mq_mnt; + unsigned int mq_queues_count; + unsigned int mq_queues_max; + unsigned int mq_msg_max; + unsigned int mq_msgsize_max; + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + struct ctl_table_set mq_set; + struct ctl_table_header *mq_sysctls; + struct ctl_table_set ipc_set; + struct ctl_table_header *ipc_sysctls; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; +}; + +typedef void (*btf_trace_notifier_register)(void *, void *); + +typedef void (*btf_trace_notifier_unregister)(void *, void *); + +typedef void (*btf_trace_notifier_run)(void *, void *); + +struct trace_event_raw_notifier_info { + struct trace_entry ent; + void *cb; + char __data[0]; +}; + +struct trace_event_data_offsets_notifier_info {}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_usage srcuu; + struct srcu_struct srcu; + struct notifier_block __attribute__((btf_type_tag("rcu"))) * head; +}; + +enum proc_cn_event { + PROC_EVENT_NONE = 0, + PROC_EVENT_FORK = 1, + PROC_EVENT_EXEC = 2, + PROC_EVENT_UID = 4, + PROC_EVENT_GID = 64, + PROC_EVENT_SID = 128, + PROC_EVENT_PTRACE = 256, + PROC_EVENT_COMM = 512, + PROC_EVENT_NONZERO_EXIT = 536870912, + PROC_EVENT_COREDUMP = 1073741824, + PROC_EVENT_EXIT = 2147483648, +}; + +enum sys_off_mode { + SYS_OFF_MODE_POWER_OFF_PREPARE = 0, + SYS_OFF_MODE_POWER_OFF = 1, + SYS_OFF_MODE_RESTART_PREPARE = 2, + SYS_OFF_MODE_RESTART = 3, +}; + +struct sys_off_data; + +struct sys_off_handler { + struct notifier_block nb; + int (*sys_off_cb)(struct sys_off_data *); + void *cb_data; + enum sys_off_mode mode; + bool blocking; + void *list; + struct device *dev; +}; + +struct sys_off_data { + int mode; + void *cb_data; + const char *cmd; + struct device *dev; +}; + +struct async_entry { + struct list_head domain_list; + struct list_head global_list; + struct work_struct work; + async_cookie_t cookie; + async_func_t func; + void *data; + struct async_domain *domain; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE = 1, + HP_THREAD_PARKED = 2, +}; + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; + +enum vhost_task_flags { + VHOST_TASK_FLAGS_STOP = 0, +}; + +struct vhost_task { + bool (*fn)(void *); + void *data; + struct completion exited; + unsigned long flags; + struct task_struct *task; +}; + +typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); + +typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, + struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, + kthread_work_func_t); + +typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, + struct task_struct *, unsigned int); + +typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); + +typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); + +typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, + struct task_struct *); + +typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, + struct linux_binprm *); + +typedef void (*btf_trace_sched_stat_wait)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_sleep)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_iowait)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_blocked)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64, u64); + +typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); + +typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, + struct task_struct *, int); + +typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, + struct task_struct *, int); + +typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); + +struct cfs_rq; + +typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); + +struct rq; + +typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); + +struct sched_queue { + unsigned long bitmap[1]; + struct list_head heads[65]; +}; + +struct cpu_stop_done; + +struct cpu_stop_work { + struct list_head list; + cpu_stop_fn_t fn; + unsigned long caller; + void *arg; + struct cpu_stop_done *done; +}; + +struct balance_callback; + +struct cpuidle_state; + +struct rq { + raw_spinlock_t lock; + struct task_struct __attribute__((btf_type_tag("rcu"))) * curr; + struct task_struct *idle; + struct task_struct *stop; + struct task_struct *skip; + struct mm_struct *prev_mm; + struct sched_queue queue; + unsigned long prio; + u64 nr_switches; + atomic_t nr_iowait; + u64 last_seen_need_resched_ns; + int ticks_without_resched; + int membarrier_state; + int cpu; + bool online; + unsigned int ttwu_pending; + unsigned char nohz_idle_balance; + unsigned char idle_balance; + int active_balance; + struct cpu_stop_work active_balance_work; + struct balance_callback *balance_callback; + struct rcuwait hotplug_wait; + unsigned int nr_pinned; + s32 load_history; + u64 load_block; + u64 load_stamp; + unsigned long calc_load_update; + long calc_load_active; + long : 64; + long : 64; + u64 clock; + u64 clock_task; + u64 last_ts_switch; + unsigned int nr_running; + unsigned long nr_uninterruptible; + long : 64; + long : 64; + long : 64; + call_single_data_t hrtick_csd; + struct hrtimer hrtick_timer; + ktime_t hrtick_time; + struct sched_info rq_sched_info; + unsigned long long rq_cpu_time; + unsigned int yld_count; + unsigned int sched_switch; + unsigned int sched_count; + unsigned int sched_goidle; + unsigned int ttwu_count; + unsigned int ttwu_local; + struct cpuidle_state *idle_state; + long : 64; + long : 64; + call_single_data_t nohz_csd; + atomic_t nohz_flags; + cpumask_var_t scratch_mask; + long : 64; + long : 64; +}; + +struct balance_callback { + struct balance_callback *next; + void (*func)(struct rq *); +}; + +typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_thermal_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); + +struct sched_entity; + +typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); + +struct load_weight { + unsigned long weight; + u32 inv_weight; +}; + +struct util_est { + unsigned int enqueued; + unsigned int ewma; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long runnable_avg; + unsigned long util_avg; + struct util_est util_est; +}; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + u64 deadline; + u64 min_deadline; + struct list_head group_node; + unsigned int on_rq; + u64 exec_start; + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; + u64 nr_migrations; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + unsigned long runnable_weight; + long : 64; + long : 64; + struct sched_avg avg; +}; + +typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); + +struct root_domain; + +typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); + +typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); + +typedef void (*btf_trace_sched_compute_energy_tp)(void *, struct task_struct *, int, + unsigned long, unsigned long, unsigned long); + +typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); + +typedef void (*btf_trace_ipi_send_cpu)(void *, const unsigned int, unsigned long, void *); + +typedef void (*btf_trace_ipi_send_cpumask)(void *, const struct cpumask *, unsigned long, + void *); + +typedef void (*btf_trace_ipi_entry)(void *, const char *); + +typedef void (*btf_trace_ipi_exit)(void *, const char *); + +struct kernel_stat { + unsigned long irqs_sum; + unsigned int softirqs[10]; +}; + +struct kernel_cpustat { + u64 cpustat[10]; +}; + +struct tick_work { + int cpu; + atomic_t state; + struct delayed_work work; +}; + +enum { + preempt_dynamic_undefined = -1, + preempt_dynamic_none = 0, + preempt_dynamic_voluntary = 1, + preempt_dynamic_full = 2, +}; + +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4, + TICK_DEP_BIT_RCU_EXP = 5, +}; + +enum { + cpuset = 0, + possible = 1, + fail = 2, +}; + +enum { + CSD_FLAG_LOCK = 1, + IRQ_WORK_PENDING = 1, + IRQ_WORK_BUSY = 2, + IRQ_WORK_LAZY = 4, + IRQ_WORK_HARD_IRQ = 8, + IRQ_WORK_CLAIMED = 3, + CSD_TYPE_ASYNC = 0, + CSD_TYPE_SYNC = 16, + CSD_TYPE_IRQ_WORK = 32, + CSD_TYPE_TTWU = 48, + CSD_FLAG_TYPE_MASK = 240, +}; + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + net_prio_cgrp_id = 9, + hugetlb_cgrp_id = 10, + pids_cgrp_id = 11, + CGROUP_SUBSYS_COUNT = 12, +}; + +union cpumask_rcuhead { + cpumask_t cpumask; + struct callback_head rcu; +}; + +struct trace_event_raw_sched_kthread_stop { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_stop_ret { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_queue_work { + struct trace_entry ent; + void *work; + void *function; + void *worker; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_wakeup_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int target_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_switch { + struct trace_entry ent; + char prev_comm[16]; + pid_t prev_pid; + int prev_prio; + long prev_state; + char next_comm[16]; + pid_t next_pid; + int next_prio; + char __data[0]; +}; + +struct trace_event_raw_sched_migrate_task { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int orig_cpu; + int dest_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_process_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_wait { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_fork { + struct trace_entry ent; + char parent_comm[16]; + pid_t parent_pid; + char child_comm[16]; + pid_t child_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_process_exec { + struct trace_entry ent; + u32 __data_loc_filename; + pid_t pid; + pid_t old_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + u64 delay; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_runtime { + struct trace_entry ent; + char comm[16]; + pid_t pid; + u64 runtime; + u64 vruntime; + char __data[0]; +}; + +struct trace_event_raw_sched_pi_setprio { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int oldprio; + int newprio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_hang { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_move_numa { + struct trace_entry ent; + pid_t pid; + pid_t tgid; + pid_t ngid; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_numa_pair_template { + struct trace_entry ent; + pid_t src_pid; + pid_t src_tgid; + pid_t src_ngid; + int src_cpu; + int src_nid; + pid_t dst_pid; + pid_t dst_tgid; + pid_t dst_ngid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_wake_idle_without_ipi { + struct trace_entry ent; + int cpu; + char __data[0]; +}; + +struct trace_event_raw_ipi_raise { + struct trace_entry ent; + u32 __data_loc_target_cpus; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpumask { + struct trace_entry ent; + u32 __data_loc_cpumask; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_handler { + struct trace_entry ent; + const char *reason; + char __data[0]; +}; + +struct update_util_data { + void (*func)(struct update_util_data *, u64, unsigned int); +}; + +struct preempt_ops; + +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *, int); + void (*sched_out)(struct preempt_notifier *, struct task_struct *); +}; + +struct sched_attr { + __u32 size; + __u32 sched_policy; + __u64 sched_flags; + __s32 sched_nice; + __u32 sched_priority; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; + __u32 sched_util_min; + __u32 sched_util_max; +}; + +struct trace_event_data_offsets_sched_process_exec { + u32 filename; +}; + +struct trace_event_data_offsets_ipi_raise { + u32 target_cpus; +}; + +struct trace_event_data_offsets_ipi_send_cpumask { + u32 cpumask; +}; + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_irq_t; + +typedef struct { + void *lock; +} class_preempt_t; + +struct affinity_context { + const struct cpumask *new_mask; + struct cpumask *user_mask; + unsigned int flags; +}; + +struct rq_flags { + unsigned long flags; +}; + +typedef struct { + void *lock; +} class_rcu_t; + +typedef struct { + raw_spinlock_t *lock; + unsigned long flags; +} class_raw_spinlock_irqsave_t; + +typedef struct task_struct *class_find_get_task_t; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irqsave_t; + +struct migration_arg { + struct task_struct *task; + int dest_cpu; +}; + +typedef struct { + void *lock; + unsigned long flags; +} class_irqsave_t; + +struct trace_event_data_offsets_sched_kthread_stop {}; + +struct trace_event_data_offsets_sched_kthread_stop_ret {}; + +struct trace_event_data_offsets_sched_kthread_work_queue_work {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_start {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_end {}; + +struct trace_event_data_offsets_sched_wakeup_template {}; + +struct trace_event_data_offsets_sched_switch {}; + +struct trace_event_data_offsets_sched_migrate_task {}; + +struct trace_event_data_offsets_sched_process_template {}; + +struct trace_event_data_offsets_sched_process_wait {}; + +struct trace_event_data_offsets_sched_process_fork {}; + +struct trace_event_data_offsets_sched_stat_template {}; + +struct trace_event_data_offsets_sched_stat_runtime {}; + +struct trace_event_data_offsets_sched_pi_setprio {}; + +struct trace_event_data_offsets_sched_process_hang {}; + +struct trace_event_data_offsets_sched_move_numa {}; + +struct trace_event_data_offsets_sched_numa_pair_template {}; + +struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; + +struct trace_event_data_offsets_ipi_send_cpu {}; + +struct trace_event_data_offsets_ipi_handler {}; + +typedef int (*task_call_f)(struct task_struct *, void *); + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + NR_STATS = 10, +}; + +enum s2idle_states { + S2IDLE_STATE_NONE = 0, + S2IDLE_STATE_ENTER = 1, + S2IDLE_STATE_WAKE = 2, +}; + +struct cpuidle_device; + +struct cpuidle_driver; + +struct cpuidle_state { + char name[16]; + char desc[32]; + s64 exit_latency_ns; + s64 target_residency_ns; + unsigned int flags; + unsigned int exit_latency; + int power_usage; + unsigned int target_residency; + int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); + int (*enter_dead)(struct cpuidle_device *, int); + int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); +}; + +struct cpuidle_state_usage { + unsigned long long disable; + unsigned long long usage; + u64 time_ns; + unsigned long long above; + unsigned long long below; + unsigned long long rejected; + unsigned long long s2idle_usage; + unsigned long long s2idle_time; +}; + +struct cpuidle_driver_kobj; + +struct cpuidle_state_kobj; + +struct cpuidle_device_kobj; + +struct cpuidle_device { + unsigned int registered : 1; + unsigned int enabled : 1; + unsigned int poll_time_limit : 1; + unsigned int cpu; + ktime_t next_hrtimer; + int last_state_idx; + u64 last_residency_ns; + u64 poll_limit_ns; + u64 forced_idle_latency_limit_ns; + struct cpuidle_state_usage states_usage[10]; + struct cpuidle_state_kobj *kobjs[10]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; +}; + +struct cpuidle_driver { + const char *name; + struct module *owner; + unsigned int bctimer : 1; + struct cpuidle_state states[10]; + int state_count; + int safe_state_index; + struct cpumask *cpumask; + const char *governor; +}; + +struct idle_timer { + struct hrtimer timer; + int done; +}; + +struct housekeeping { + cpumask_var_t cpumasks[9]; + unsigned long flags; +}; + +struct sched_clock_data { + u64 tick_raw; + u64 tick_gtod; + u64 clock; +}; + +struct cpuacct { + struct cgroup_subsys_state css; + u64 __attribute__((btf_type_tag("percpu"))) * cpuusage; + struct kernel_cpustat __attribute__((btf_type_tag("percpu"))) * cpustat; +}; + +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +struct sugov_tunables { + struct gov_attr_set attr_set; + unsigned int rate_limit_us; +}; + +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *, char *); + ssize_t (*store)(struct gov_attr_set *, const char *, size_t); +}; + +struct sugov_policy; + +struct sugov_cpu { + struct update_util_data update_util; + struct sugov_policy *sg_policy; + unsigned int cpu; + bool iowait_boost_pending; + unsigned int iowait_boost; + u64 last_update; + unsigned long util; + unsigned long bw_dl; + unsigned long saved_idle_calls; +}; + +struct sugov_policy { + struct cpufreq_policy *policy; + struct sugov_tunables *tunables; + struct list_head tunables_hook; + raw_spinlock_t update_lock; + u64 last_freq_update_time; + s64 freq_update_delay_ns; + unsigned int next_freq; + unsigned int cached_raw_freq; + struct irq_work irq_work; + struct kthread_work work; + struct mutex work_lock; + struct kthread_worker worker; + struct task_struct *thread; + bool work_in_progress; + bool limits_changed; + bool need_freq_update; +}; + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = 2, + HK_FLAG_MISC = 4, + HK_FLAG_SCHED = 8, + HK_FLAG_TICK = 16, + HK_FLAG_DOMAIN = 32, + HK_FLAG_WQ = 64, + HK_FLAG_MANAGED_IRQ = 128, + HK_FLAG_KTHREAD = 256, +}; + +enum cpuacct_stat_index { + CPUACCT_STAT_USER = 0, + CPUACCT_STAT_SYSTEM = 1, + CPUACCT_STAT_NSTATS = 2, +}; + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = 1, + MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, + MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, + MEMBARRIER_CMD_GET_REGISTRATIONS = 512, + MEMBARRIER_CMD_SHARED = 1, +}; + +enum membarrier_cmd_flag { + MEMBARRIER_CMD_FLAG_CPU = 1, +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = 1, + MEMBARRIER_FLAG_RSEQ = 2, +}; + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +struct cpupri_vec { + atomic_t count; + cpumask_var_t mask; +}; + +struct cpupri { + struct cpupri_vec pri_to_cpu[101]; + int *cpu_to_pri; +}; + +struct sched_domain_attr { + int relax_domain_level; +}; + +struct thermal_cooling_device_ops; + +struct thermal_cooling_device { + int id; + char *type; + unsigned long max_state; + struct device device; + struct device_node *np; + void *devdata; + void *stats; + const struct thermal_cooling_device_ops *ops; + bool updated; + struct mutex lock; + struct list_head thermal_instances; + struct list_head node; +}; + +struct thermal_cooling_device_ops { + int (*get_max_state)(struct thermal_cooling_device *, unsigned long *); + int (*get_cur_state)(struct thermal_cooling_device *, unsigned long *); + int (*set_cur_state)(struct thermal_cooling_device *, unsigned long); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, unsigned long, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, unsigned long *); +}; + +typedef void (*btf_trace_contention_begin)(void *, void *, unsigned int); + +typedef void (*btf_trace_contention_end)(void *, void *, int); + +struct trace_event_raw_contention_begin { + struct trace_entry ent; + void *lock_addr; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_contention_end { + struct trace_entry ent; + void *lock_addr; + int ret; + char __data[0]; +}; + +struct ww_acquire_ctx; + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + unsigned long stamp; + unsigned int acquired; + unsigned short wounded; + unsigned short is_wait_die; +}; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +}; + +struct trace_event_data_offsets_contention_begin {}; + +struct trace_event_data_offsets_contention_end {}; + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +enum rwsem_waiter_type { + RWSEM_WAITING_FOR_WRITE = 0, + RWSEM_WAITING_FOR_READ = 1, +}; + +enum rwsem_wake_type { + RWSEM_WAKE_ANY = 0, + RWSEM_WAKE_READERS = 1, + RWSEM_WAKE_READ_OWNED = 2, +}; + +enum owner_state { + OWNER_NULL = 1, + OWNER_WRITER = 2, + OWNER_READER = 4, + OWNER_NONSPINNABLE = 8, +}; + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + enum rwsem_waiter_type type; + unsigned long timeout; + bool handoff_set; +}; + +struct optimistic_spin_node { + struct optimistic_spin_node *next; + struct optimistic_spin_node *prev; + int locked; + int cpu; +}; + +struct mcs_spinlock { + struct mcs_spinlock *next; + int locked; + int count; +}; + +struct qnode { + struct mcs_spinlock mcs; +}; + +enum rtmutex_chainwalk { + RT_MUTEX_MIN_CHAINWALK = 0, + RT_MUTEX_FULL_CHAINWALK = 1, +}; + +struct rt_waiter_node { + struct rb_node entry; + int prio; + u64 deadline; +}; + +struct rt_mutex_base; + +struct rt_mutex_waiter { + struct rt_waiter_node tree; + struct rt_waiter_node pi_tree; + struct task_struct *task; + struct rt_mutex_base *lock; + unsigned int wake_state; + struct ww_acquire_ctx *ww_ctx; +}; + +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +struct rt_mutex { + struct rt_mutex_base rtmutex; +}; + +struct rt_wake_q_head { + struct wake_q_head head; + struct task_struct *rtlock_task; +}; + +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; +}; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +enum pm_qos_req_action { + PM_QOS_ADD_REQ = 0, + PM_QOS_UPDATE_REQ = 1, + PM_QOS_REMOVE_REQ = 2, +}; + +enum suspend_stat_step { + SUSPEND_FREEZE = 1, + SUSPEND_PREPARE = 2, + SUSPEND_SUSPEND = 3, + SUSPEND_SUSPEND_LATE = 4, + SUSPEND_SUSPEND_NOIRQ = 5, + SUSPEND_RESUME_NOIRQ = 6, + SUSPEND_RESUME_EARLY = 7, + SUSPEND_RESUME = 8, +}; + +enum { + TEST_NONE = 0, + TEST_CORE = 1, + TEST_CPUS = 2, + TEST_PLATFORM = 3, + TEST_DEVICES = 4, + TEST_FREEZER = 5, + __TEST_AFTER_LAST = 6, +}; + +typedef int suspend_state_t; + +struct pm_vt_switch { + struct list_head head; + struct device *dev; + bool required; +}; + +struct platform_suspend_ops { + int (*valid)(suspend_state_t); + int (*begin)(suspend_state_t); + int (*prepare)(); + int (*prepare_late)(); + int (*enter)(suspend_state_t); + void (*wake)(); + void (*finish)(); + bool (*suspend_again)(); + void (*end)(); + void (*recover)(); +}; + +struct platform_s2idle_ops { + int (*begin)(); + int (*prepare)(); + int (*prepare_late)(); + void (*check)(); + bool (*wake)(); + void (*restore_early)(); + void (*restore)(); + void (*end)(); +}; + +struct platform_hibernation_ops { + int (*begin)(pm_message_t); + void (*end)(); + int (*pre_snapshot)(); + void (*finish)(); + int (*prepare)(); + int (*enter)(); + void (*leave)(); + int (*pre_restore)(); + void (*restore_cleanup)(); + void (*recover)(); +}; + +enum { + HIBERNATION_INVALID = 0, + HIBERNATION_PLATFORM = 1, + HIBERNATION_SHUTDOWN = 2, + HIBERNATION_REBOOT = 3, + HIBERNATION_SUSPEND = 4, + HIBERNATION_TEST_RESUME = 5, + __HIBERNATION_AFTER_LAST = 6, +}; + +struct linked_page; + +struct chain_allocator { + struct linked_page *chain; + unsigned int used_space; + gfp_t gfp_mask; + int safe_needed; +}; + +struct linked_page { + struct linked_page *next; + char data[4088]; +}; + +struct pbe { + void *address; + void *orig_address; + struct pbe *next; +}; + +struct mem_zone_bm_rtree; + +struct rtree_node; + +struct bm_position { + struct mem_zone_bm_rtree *zone; + struct rtree_node *node; + unsigned long node_pfn; + unsigned long cur_pfn; + int node_bit; +}; + +struct memory_bitmap { + struct list_head zones; + struct linked_page *p_list; + struct bm_position cur; +}; + +struct mem_zone_bm_rtree { + struct list_head list; + struct list_head nodes; + struct list_head leaves; + unsigned long start_pfn; + unsigned long end_pfn; + struct rtree_node *rtree; + int levels; + unsigned int blocks; +}; + +struct rtree_node { + struct list_head list; + unsigned long *data; +}; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_BOUNCE = 8, + NR_ZSPAGES = 9, + NR_FREE_CMA_PAGES = 10, + NR_UNACCEPTED = 11, + NR_VM_ZONE_STAT_ITEMS = 12, +}; + +enum migratetype { + MIGRATE_UNMOVABLE = 0, + MIGRATE_MOVABLE = 1, + MIGRATE_RECLAIMABLE = 2, + MIGRATE_PCPTYPES = 3, + MIGRATE_HIGHATOMIC = 3, + MIGRATE_ISOLATE = 4, + MIGRATE_TYPES = 5, +}; + +struct nosave_region { + struct list_head list; + unsigned long start_pfn; + unsigned long end_pfn; +}; + +struct swsusp_info { + struct new_utsname uts; + u32 version_code; + unsigned long num_physpages; + int cpus; + unsigned long image_pages; + unsigned long pages; + unsigned long size; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct mem_extent { + struct list_head hook; + unsigned long start; + unsigned long end; +}; + +struct snapshot_handle { + unsigned int cur; + void *buffer; + int sync_read; +}; + +struct swsusp_header { + char reserved[4056]; + u32 hw_sig; + u32 crc32; + sector_t image; + unsigned int flags; + char orig_sig[10]; + char sig[10]; +}; + +enum req_op { + REQ_OP_READ = 0, + REQ_OP_WRITE = 1, + REQ_OP_FLUSH = 2, + REQ_OP_DISCARD = 3, + REQ_OP_SECURE_ERASE = 5, + REQ_OP_WRITE_ZEROES = 9, + REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_APPEND = 13, + REQ_OP_ZONE_RESET = 15, + REQ_OP_ZONE_RESET_ALL = 17, + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST = 36, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = 8, + __REQ_FAILFAST_TRANSPORT = 9, + __REQ_FAILFAST_DRIVER = 10, + __REQ_SYNC = 11, + __REQ_META = 12, + __REQ_PRIO = 13, + __REQ_NOMERGE = 14, + __REQ_IDLE = 15, + __REQ_INTEGRITY = 16, + __REQ_FUA = 17, + __REQ_PREFLUSH = 18, + __REQ_RAHEAD = 19, + __REQ_BACKGROUND = 20, + __REQ_NOWAIT = 21, + __REQ_POLLED = 22, + __REQ_ALLOC_CACHE = 23, + __REQ_SWAP = 24, + __REQ_DRV = 25, + __REQ_FS_PRIVATE = 26, + __REQ_NOUNMAP = 27, + __REQ_NR_BITS = 28, +}; + +enum { + BIO_PAGE_PINNED = 0, + BIO_CLONED = 1, + BIO_BOUNCED = 2, + BIO_QUIET = 3, + BIO_CHAIN = 4, + BIO_REFFED = 5, + BIO_BPS_THROTTLED = 6, + BIO_TRACE_COMPLETION = 7, + BIO_CGROUP_ACCT = 8, + BIO_QOS_THROTTLED = 9, + BIO_QOS_MERGED = 10, + BIO_REMAPPED = 11, + BIO_ZONE_WRITE_LOCKED = 12, + BIO_FLAG_LAST = 13, +}; + +struct swsusp_extent { + struct rb_node node; + unsigned long start; + unsigned long end; +}; + +struct swap_map_page { + sector_t entries[511]; + sector_t next_swap; +}; + +struct swap_map_page_list; + +struct swap_map_handle { + struct swap_map_page *cur; + struct swap_map_page_list *maps; + sector_t cur_swap; + sector_t first_sector; + unsigned int k; + unsigned long reqd_free_pages; + u32 crc32; +}; + +struct swap_map_page_list { + struct swap_map_page *map; + struct swap_map_page_list *next; +}; + +struct hib_bio_batch { + atomic_t count; + wait_queue_head_t wait; + blk_status_t error; + struct blk_plug plug; +}; + +struct cmp_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; + unsigned char wrk[16384]; +}; + +struct crc_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + unsigned int run_threads; + wait_queue_head_t go; + wait_queue_head_t done; + u32 *crc32; + size_t *unc_len[3]; + unsigned char *unc[3]; +}; + +struct dec_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; +}; + +struct snapshot_data { + struct snapshot_handle handle; + int swap; + int mode; + bool frozen; + bool ready; + bool platform_support; + bool free_bitmaps; + dev_t dev; +}; + +struct nlm_lockowner; + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; + +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +struct file_lock_operations; + +struct lock_manager_operations; + +struct file_lock { + struct file_lock *fl_blocker; + struct list_head fl_list; + struct hlist_node fl_link; + struct list_head fl_blocked_requests; + struct list_head fl_blocked_member; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned int fl_pid; + int fl_link_cpu; + wait_queue_head_t fl_wait; + struct file *fl_file; + loff_t fl_start; + loff_t fl_end; + struct fasync_struct *fl_fasync; + unsigned long fl_break_time; + unsigned long fl_downgrade_time; + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + struct { + struct inode *inode; + } ceph; + } fl_u; +}; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + void *lm_mod_owner; + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock *, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); + bool (*lm_lock_expirable)(struct file_lock *); + void (*lm_expire_lock)(); +}; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct wakelock { + char *name; + struct rb_node node; + struct wakeup_source *ws; + struct list_head lru; +}; + +struct sysrq_key_op { + void (*const handler)(u8); + const char *const help_msg; + const char *const action_msg; + const int enable_mask; +}; + +struct em_data_callback { + int (*active_power)(struct device *, unsigned long *, unsigned long *); + int (*get_cost)(struct device *, unsigned long, unsigned long *); +}; + +typedef void (*btf_trace_console)(void *, const char *, size_t); + +struct prb_desc; + +struct printk_info; + +struct prb_desc_ring { + unsigned int count_bits; + struct prb_desc *descs; + struct printk_info *infos; + atomic_long_t head_id; + atomic_long_t tail_id; + atomic_long_t last_finalized_id; +}; + +struct prb_data_ring { + unsigned int size_bits; + char *data; + atomic_long_t head_lpos; + atomic_long_t tail_lpos; +}; + +struct printk_ringbuffer { + struct prb_desc_ring desc_ring; + struct prb_data_ring text_data_ring; + atomic_long_t fail; +}; + +struct prb_data_blk_lpos { + unsigned long begin; + unsigned long next; +}; + +struct prb_desc { + atomic_long_t state_var; + struct prb_data_blk_lpos text_blk_lpos; +}; + +struct dev_printk_info { + char subsystem[16]; + char device[48]; +}; + +struct printk_info { + u64 seq; + u64 ts_nsec; + u16 text_len; + u8 facility; + u8 flags : 5; + u8 level : 3; + u32 caller_id; + struct dev_printk_info dev_info; +}; + +struct printk_buffers { + char outbuf[2048]; + char scratchbuf[1024]; +}; + +struct latched_seq { + seqcount_latch_t latch; + u64 val[2]; +}; + +struct console_cmdline { + char name[16]; + int index; + bool user_specified; + char *options; +}; + +enum devkmsg_log_masks { + DEVKMSG_LOG_MASK_ON = 1, + DEVKMSG_LOG_MASK_OFF = 2, + DEVKMSG_LOG_MASK_LOCK = 4, +}; + +enum printk_info_flags { + LOG_NEWLINE = 2, + LOG_CONT = 8, +}; + +enum con_msg_format_flags { + MSG_FORMAT_DEFAULT = 0, + MSG_FORMAT_SYSLOG = 1, +}; + +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *, enum kmsg_dump_reason); + enum kmsg_dump_reason max_reason; + bool registered; +}; + +struct trace_event_raw_console { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_console { + u32 msg; +}; + +struct printk_record { + struct printk_info *info; + char *text_buf; + unsigned int text_buf_size; +}; + +struct prb_reserved_entry { + struct printk_ringbuffer *rb; + unsigned long irqflags; + unsigned long id; + unsigned int text_space; +}; + +struct printk_message { + struct printk_buffers *pbufs; + unsigned int outbuf_len; + u64 seq; + unsigned long dropped; +}; + +struct devkmsg_user { + atomic64_t seq; + struct ratelimit_state rs; + struct mutex lock; + struct printk_buffers pbufs; +}; + +struct kmsg_dump_iter { + u64 cur_seq; + u64 next_seq; +}; + +struct nbcon_state { + union { + unsigned int atom; + struct { + unsigned int prio : 2; + unsigned int req_prio : 2; + unsigned int unsafe : 1; + unsigned int unsafe_takeover : 1; + unsigned int cpu : 24; + }; + }; +}; + +enum desc_state { + desc_miss = -1, + desc_reserved = 0, + desc_committed = 1, + desc_finalized = 2, + desc_reusable = 3, +}; + +struct prb_data_block { + unsigned long id; + char data[0]; +}; + +enum { + IRQS_AUTODETECT = 1, + IRQS_SPURIOUS_DISABLED = 2, + IRQS_POLL_INPROGRESS = 8, + IRQS_ONESHOT = 32, + IRQS_REPLAY = 64, + IRQS_WAITING = 128, + IRQS_PENDING = 512, + IRQS_SUSPENDED = 2048, + IRQS_TIMINGS = 4096, + IRQS_NMI = 8192, + IRQS_SYSFS = 16384, +}; + +enum { + _IRQ_DEFAULT_INIT_FLAGS = 0, + _IRQ_PER_CPU = 512, + _IRQ_LEVEL = 256, + _IRQ_NOPROBE = 1024, + _IRQ_NOREQUEST = 2048, + _IRQ_NOTHREAD = 65536, + _IRQ_NOAUTOEN = 4096, + _IRQ_MOVE_PCNTXT = 16384, + _IRQ_NO_BALANCING = 8192, + _IRQ_NESTED_THREAD = 32768, + _IRQ_PER_CPU_DEVID = 131072, + _IRQ_IS_POLLED = 262144, + _IRQ_DISABLE_UNLAZY = 524288, + _IRQ_HIDDEN = 1048576, + _IRQ_NO_DEBUG = 2097152, + _IRQF_MODIFY_MASK = 2096911, +}; + +enum { + IRQTF_RUNTHREAD = 0, + IRQTF_WARNED = 1, + IRQTF_AFFINITY = 2, + IRQTF_FORCED_THREAD = 3, + IRQTF_READY = 4, +}; + +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED = 1, +}; + +enum { + IRQ_STARTUP_NORMAL = 0, + IRQ_STARTUP_MANAGED = 1, + IRQ_STARTUP_ABORT = 2, +}; + +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +struct irq_desc_devres { + unsigned int from; + unsigned int cnt; +}; + +struct irqchip_fwid { + struct fwnode_handle fwnode; + unsigned int type; + char *name; + phys_addr_t *pa; +}; + +enum { + AFFINITY = 0, + AFFINITY_LIST = 1, + EFFECTIVE = 2, + EFFECTIVE_LIST = 3, +}; + +enum msi_domain_ids { + MSI_DEFAULT_DOMAIN = 0, + MSI_SECONDARY_DOMAIN = 1, + MSI_MAX_DEVICE_IRQDOMAINS = 2, +}; + +enum msi_desc_filter { + MSI_DESC_ALL = 0, + MSI_DESC_NOTASSOCIATED = 1, + MSI_DESC_ASSOCIATED = 2, +}; + +struct msi_domain_template { + char name[48]; + struct irq_chip chip; + struct msi_domain_ops ops; + struct msi_domain_info info; +}; + +struct xa_limit { + u32 max; + u32 min; +}; + +struct msi_ctrl { + unsigned int domid; + unsigned int first; + unsigned int last; + unsigned int nirqs; +}; + +struct msi_map { + int index; + int virq; +}; + +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int); + void *priv; +}; + +typedef void (*btf_trace_irq_matrix_online)(void *, struct irq_matrix *); + +typedef void (*btf_trace_irq_matrix_offline)(void *, struct irq_matrix *); + +typedef void (*btf_trace_irq_matrix_reserve)(void *, struct irq_matrix *); + +typedef void (*btf_trace_irq_matrix_remove_reserved)(void *, struct irq_matrix *); + +typedef void (*btf_trace_irq_matrix_assign_system)(void *, int, struct irq_matrix *); + +typedef void (*btf_trace_irq_matrix_alloc_reserved)(void *, int, unsigned int, + struct irq_matrix *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_reserve_managed)(void *, int, unsigned int, + struct irq_matrix *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_remove_managed)(void *, int, unsigned int, + struct irq_matrix *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_alloc_managed)(void *, int, unsigned int, + struct irq_matrix *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_assign)(void *, int, unsigned int, + struct irq_matrix *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_alloc)(void *, int, unsigned int, struct irq_matrix *, + struct cpumap *); + +typedef void (*btf_trace_irq_matrix_free)(void *, int, unsigned int, struct irq_matrix *, + struct cpumap *); + +struct trace_event_raw_irq_matrix_global { + struct trace_entry ent; + unsigned int online_maps; + unsigned int global_available; + unsigned int global_reserved; + unsigned int total_allocated; + char __data[0]; +}; + +struct trace_event_raw_irq_matrix_global_update { + struct trace_entry ent; + int bit; + unsigned int online_maps; + unsigned int global_available; + unsigned int global_reserved; + unsigned int total_allocated; + char __data[0]; +}; + +struct trace_event_raw_irq_matrix_cpu { + struct trace_entry ent; + int bit; + unsigned int cpu; + bool online; + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int online_maps; + unsigned int global_available; + unsigned int global_reserved; + unsigned int total_allocated; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_matrix_global {}; + +struct trace_event_data_offsets_irq_matrix_global_update {}; + +struct trace_event_data_offsets_irq_matrix_cpu {}; + +typedef void (*btf_trace_rcu_utilization)(void *, const char *); + +typedef void (*btf_trace_rcu_grace_period)(void *, const char *, unsigned long, const char *); + +typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, unsigned long, + unsigned long, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, unsigned long, u8, + int, int, unsigned long); + +typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, unsigned long, + const char *); + +typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_nocb_wake)(void *, const char *, int, const char *); + +typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, unsigned long); + +typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, unsigned long, int); + +typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, unsigned long, + unsigned long, unsigned long, u8, + int, int, int); + +typedef void (*btf_trace_rcu_fqs)(void *, const char *, unsigned long, int, const char *); + +typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); + +typedef void (*btf_trace_rcu_dyntick)(void *, const char *, long, long, int); + +typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long); + +typedef void (*btf_trace_rcu_segcb_stats)(void *, struct rcu_segcblist *, const char *); + +typedef void (*btf_trace_rcu_kvfree_callback)(void *, const char *, + struct callback_head *, unsigned long, long); + +typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long, long); + +typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *); + +typedef void (*btf_trace_rcu_invoke_kvfree_callback)(void *, const char *, + struct callback_head *, unsigned long); + +typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, + unsigned long, void **); + +typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char); + +typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, + unsigned long, unsigned long, unsigned long); + +typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, + unsigned long); + +struct rcu_tasks; + +typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); + +typedef void (*pregp_func_t)(struct list_head *); + +typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); + +typedef void (*postscan_func_t)(struct list_head *); + +typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); + +typedef void (*postgp_func_t)(struct rcu_tasks *); + +typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); + +struct rcu_tasks_percpu; + +struct rcu_tasks { + struct rcuwait cbs_wait; + raw_spinlock_t cbs_gbl_lock; + struct mutex tasks_gp_mutex; + int gp_state; + int gp_sleep; + int init_fract; + unsigned long gp_jiffies; + unsigned long gp_start; + unsigned long tasks_gp_seq; + unsigned long n_ipis; + unsigned long n_ipis_fails; + struct task_struct *kthread_ptr; + unsigned long lazy_jiffies; + rcu_tasks_gp_func_t gp_func; + pregp_func_t pregp_func; + pertask_func_t pertask_func; + postscan_func_t postscan_func; + holdouts_func_t holdouts_func; + postgp_func_t postgp_func; + call_rcu_func_t call_func; + struct rcu_tasks_percpu __attribute__((btf_type_tag("percpu"))) * rtpcpu; + int percpu_enqueue_shift; + int percpu_enqueue_lim; + int percpu_dequeue_lim; + unsigned long percpu_dequeue_gpseq; + struct mutex barrier_q_mutex; + atomic_t barrier_q_count; + struct completion barrier_q_completion; + unsigned long barrier_q_seq; + char *name; + char *kname; +}; + +struct rcu_tasks_percpu { + struct rcu_segcblist cblist; + raw_spinlock_t lock; + unsigned long rtp_jiffies; + unsigned long rtp_n_lock_retries; + struct timer_list lazy_timer; + unsigned int urgent_gp; + struct work_struct rtp_work; + struct irq_work rtp_irq_work; + struct callback_head barrier_q_head; + struct list_head rtp_blkd_tasks; + int cpu; + struct rcu_tasks *rtpp; +}; + +struct rcu_synchronize { + struct callback_head head; + struct completion completion; +}; + +struct trace_event_raw_rcu_utilization { + struct trace_entry ent; + const char *s; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period { + struct trace_entry ent; + const char *rcuname; + long gp_seq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_future_grace_period { + struct trace_entry ent; + const char *rcuname; + long gp_seq; + long gp_seq_req; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period_init { + struct trace_entry ent; + const char *rcuname; + long gp_seq; + u8 level; + int grplo; + int grphi; + unsigned long qsmask; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_grace_period { + struct trace_entry ent; + const char *rcuname; + long gpseq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_funnel_lock { + struct trace_entry ent; + const char *rcuname; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_nocb_wake { + struct trace_entry ent; + const char *rcuname; + int cpu; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_rcu_preempt_task { + struct trace_entry ent; + const char *rcuname; + long gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_unlock_preempted_task { + struct trace_entry ent; + const char *rcuname; + long gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_quiescent_state_report { + struct trace_entry ent; + const char *rcuname; + long gp_seq; + unsigned long mask; + unsigned long qsmask; + u8 level; + int grplo; + int grphi; + u8 gp_tasks; + char __data[0]; +}; + +struct trace_event_raw_rcu_fqs { + struct trace_entry ent; + const char *rcuname; + long gp_seq; + int cpu; + const char *qsevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_stall_warning { + struct trace_entry ent; + const char *rcuname; + const char *msg; + char __data[0]; +}; + +struct trace_event_raw_rcu_dyntick { + struct trace_entry ent; + const char *polarity; + long oldnesting; + long newnesting; + int dynticks; + char __data[0]; +}; + +struct trace_event_raw_rcu_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + long qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_segcb_stats { + struct trace_entry ent; + const char *ctx; + unsigned long gp_seq[4]; + long seglen[4]; + char __data[0]; +}; + +struct trace_event_raw_rcu_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + unsigned long offset; + long qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_start { + struct trace_entry ent; + const char *rcuname; + long qlen; + long blimit; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + unsigned long offset; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kfree_bulk_callback { + struct trace_entry ent; + const char *rcuname; + unsigned long nr_records; + void **p; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_end { + struct trace_entry ent; + const char *rcuname; + int callbacks_invoked; + char cb; + char nr; + char iit; + char risk; + char __data[0]; +}; + +struct trace_event_raw_rcu_torture_read { + struct trace_entry ent; + char rcutorturename[8]; + struct callback_head *rhp; + unsigned long secs; + unsigned long c_old; + unsigned long c; + char __data[0]; +}; + +struct trace_event_raw_rcu_barrier { + struct trace_entry ent; + const char *rcuname; + const char *s; + int cpu; + int cnt; + unsigned long done; + char __data[0]; +}; + +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long len; +}; + +struct trc_stall_chk_rdr { + int nesting; + int ipi_to_cpu; + u8 needqs; +}; + +struct trace_event_data_offsets_rcu_utilization {}; + +struct trace_event_data_offsets_rcu_grace_period {}; + +struct trace_event_data_offsets_rcu_future_grace_period {}; + +struct trace_event_data_offsets_rcu_grace_period_init {}; + +struct trace_event_data_offsets_rcu_exp_grace_period {}; + +struct trace_event_data_offsets_rcu_exp_funnel_lock {}; + +struct trace_event_data_offsets_rcu_nocb_wake {}; + +struct trace_event_data_offsets_rcu_preempt_task {}; + +struct trace_event_data_offsets_rcu_unlock_preempted_task {}; + +struct trace_event_data_offsets_rcu_quiescent_state_report {}; + +struct trace_event_data_offsets_rcu_fqs {}; + +struct trace_event_data_offsets_rcu_stall_warning {}; + +struct trace_event_data_offsets_rcu_dyntick {}; + +struct trace_event_data_offsets_rcu_callback {}; + +struct trace_event_data_offsets_rcu_segcb_stats {}; + +struct trace_event_data_offsets_rcu_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_batch_start {}; + +struct trace_event_data_offsets_rcu_invoke_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {}; + +struct trace_event_data_offsets_rcu_batch_end {}; + +struct trace_event_data_offsets_rcu_torture_read {}; + +struct trace_event_data_offsets_rcu_barrier {}; + +enum { + GP_IDLE = 0, + GP_ENTER = 1, + GP_PASSED = 2, + GP_EXIT = 3, + GP_REPLAY = 4, +}; + +typedef unsigned long ulong; + +enum rcutorture_type { + RCU_FLAVOR = 0, + RCU_TASKS_FLAVOR = 1, + RCU_TASKS_RUDE_FLAVOR = 2, + RCU_TASKS_TRACING_FLAVOR = 3, + RCU_TRIVIAL_FLAVOR = 4, + SRCU_FLAVOR = 5, + INVALID_RCU_FLAVOR = 6, +}; + +struct rcu_exp_work { + unsigned long rew_s; + struct work_struct rew_work; +}; + +struct rcu_node { + raw_spinlock_t lock; + unsigned long gp_seq; + unsigned long gp_seq_needed; + unsigned long completedqs; + unsigned long qsmask; + unsigned long rcu_gp_init_mask; + unsigned long qsmaskinit; + unsigned long qsmaskinitnext; + unsigned long expmask; + unsigned long expmaskinit; + unsigned long expmaskinitnext; + unsigned long cbovldmask; + unsigned long ffmask; + unsigned long grpmask; + int grplo; + int grphi; + u8 grpnum; + u8 level; + bool wait_blkd_tasks; + struct rcu_node *parent; + struct list_head blkd_tasks; + struct list_head *gp_tasks; + struct list_head *exp_tasks; + struct list_head *boost_tasks; + struct rt_mutex boost_mtx; + unsigned long boost_time; + struct mutex boost_kthread_mutex; + struct task_struct *boost_kthread_task; + unsigned int boost_kthread_status; + unsigned long n_boosts; + struct swait_queue_head nocb_gp_wq[2]; + raw_spinlock_t fqslock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + spinlock_t exp_lock; + unsigned long exp_seq_rq; + wait_queue_head_t exp_wq[4]; + struct rcu_exp_work rew; + bool exp_need_flush; + raw_spinlock_t exp_poll_lock; + unsigned long exp_seq_poll_rq; + struct work_struct exp_poll_wq; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +union rcu_noqs { + struct { + u8 norm; + u8 exp; + } b; + u16 s; +}; + +struct rcu_snap_record { + unsigned long gp_seq; + u64 cputime_irq; + u64 cputime_softirq; + u64 cputime_system; + unsigned long nr_hardirqs; + unsigned int nr_softirqs; + unsigned long long nr_csw; + unsigned long jiffies; +}; + +struct rcu_data { + unsigned long gp_seq; + unsigned long gp_seq_needed; + union rcu_noqs cpu_no_qs; + bool core_needs_qs; + bool beenonline; + bool gpwrap; + bool cpu_started; + struct rcu_node *mynode; + unsigned long grpmask; + unsigned long ticks_this_gp; + struct irq_work defer_qs_iw; + bool defer_qs_iw_pending; + struct work_struct strict_work; + struct rcu_segcblist cblist; + long qlen_last_fqs_check; + unsigned long n_cbs_invoked; + unsigned long n_force_qs_snap; + long blimit; + int dynticks_snap; + bool rcu_need_heavy_qs; + bool rcu_urgent_qs; + bool rcu_forced_tick; + bool rcu_forced_tick_exp; + unsigned long barrier_seq_snap; + struct callback_head barrier_head; + int exp_dynticks_snap; + struct swait_queue_head nocb_cb_wq; + struct swait_queue_head nocb_state_wq; + struct task_struct *nocb_gp_kthread; + raw_spinlock_t nocb_lock; + atomic_t nocb_lock_contended; + int nocb_defer_wakeup; + struct timer_list nocb_timer; + unsigned long nocb_gp_adv_time; + struct mutex nocb_gp_kthread_mutex; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + raw_spinlock_t nocb_bypass_lock; + struct rcu_cblist nocb_bypass; + unsigned long nocb_bypass_first; + unsigned long nocb_nobypass_last; + int nocb_nobypass_count; + long : 64; + raw_spinlock_t nocb_gp_lock; + u8 nocb_gp_sleep; + u8 nocb_gp_bypass; + u8 nocb_gp_gp; + unsigned long nocb_gp_seq; + unsigned long nocb_gp_loops; + struct swait_queue_head nocb_gp_wq; + bool nocb_cb_sleep; + struct task_struct *nocb_cb_kthread; + struct list_head nocb_head_rdp; + struct list_head nocb_entry_rdp; + struct rcu_data *nocb_toggling_rdp; + long : 64; + long : 64; + long : 64; + struct rcu_data *nocb_gp_rdp; + struct task_struct *rcu_cpu_kthread_task; + unsigned int rcu_cpu_kthread_status; + char rcu_cpu_has_work; + unsigned long rcuc_activity; + unsigned int softirq_snap; + struct irq_work rcu_iw; + bool rcu_iw_pending; + unsigned long rcu_iw_gp_seq; + unsigned long rcu_ofl_gp_seq; + short rcu_ofl_gp_flags; + unsigned long rcu_onl_gp_seq; + short rcu_onl_gp_flags; + unsigned long last_fqs_resched; + unsigned long last_sched_clock; + struct rcu_snap_record snap_record; + long lazy_len; + int cpu; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct rcu_state { + struct rcu_node node[5]; + struct rcu_node *level[3]; + int ncpus; + int n_online_cpus; + long : 64; + long : 64; + long : 64; + long : 64; + unsigned long gp_seq; + unsigned long gp_max; + struct task_struct *gp_kthread; + struct swait_queue_head gp_wq; + short gp_flags; + short gp_state; + unsigned long gp_wake_time; + unsigned long gp_wake_seq; + unsigned long gp_seq_polled; + unsigned long gp_seq_polled_snap; + unsigned long gp_seq_polled_exp_snap; + struct mutex barrier_mutex; + atomic_t barrier_cpu_count; + struct completion barrier_completion; + unsigned long barrier_sequence; + raw_spinlock_t barrier_lock; + struct mutex exp_mutex; + struct mutex exp_wake_mutex; + unsigned long expedited_sequence; + atomic_t expedited_need_qs; + struct swait_queue_head expedited_wq; + int ncpus_snap; + u8 cbovld; + u8 cbovldnext; + unsigned long jiffies_force_qs; + unsigned long jiffies_kick_kthreads; + unsigned long n_force_qs; + unsigned long gp_start; + unsigned long gp_end; + unsigned long gp_activity; + unsigned long gp_req_activity; + unsigned long jiffies_stall; + int nr_fqs_jiffies_stall; + unsigned long jiffies_resched; + unsigned long n_force_qs_gpstart; + const char *name; + char abbr; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + arch_spinlock_t ofl_lock; + int nocb_is_setup; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct rcu_gp_oldstate { + unsigned long rgos_norm; + unsigned long rgos_exp; +}; + +struct kfree_rcu_cpu; + +struct kfree_rcu_cpu_work { + struct rcu_work rcu_work; + struct callback_head *head_free; + struct rcu_gp_oldstate head_free_gp_snap; + struct list_head bulk_head_free[2]; + struct kfree_rcu_cpu *krcp; +}; + +struct kfree_rcu_cpu { + struct callback_head *head; + unsigned long head_gp_snap; + atomic_t head_count; + struct list_head bulk_head[2]; + atomic_t bulk_count[2]; + struct kfree_rcu_cpu_work krw_arr[2]; + raw_spinlock_t lock; + struct delayed_work monitor_work; + bool initialized; + struct delayed_work page_cache_work; + atomic_t backoff_page_cache_fill; + atomic_t work_in_progress; + struct hrtimer hrtimer; + struct llist_head bkvcache; + int nr_bkv_objs; +}; + +struct context_tracking { + bool active; + int recursion; + atomic_t state; + long dynticks_nesting; + long dynticks_nmi_nesting; +}; + +struct kvfree_rcu_bulk_data { + struct list_head list; + struct rcu_gp_oldstate gp_snap; + unsigned long nr_records; + void *records[0]; +}; + +struct rcu_stall_chk_rdr { + int nesting; + union rcu_special rs; + bool on_blkd_list; +}; + +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; + +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + unsigned long attrs; +}; + +enum pci_p2pdma_map_type { + PCI_P2PDMA_MAP_UNKNOWN = 0, + PCI_P2PDMA_MAP_NOT_SUPPORTED = 1, + PCI_P2PDMA_MAP_BUS_ADDR = 2, + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE = 3, +}; + +struct pci_p2pdma_map_state { + struct dev_pagemap *pgmap; + int map; + u64 bus_off; +}; + +struct io_tlb_area { + unsigned long used; + unsigned int index; + spinlock_t lock; +}; + +struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; +}; + +typedef void (*btf_trace_swiotlb_bounced)(void *, struct device *, dma_addr_t, size_t); + +struct trace_event_raw_swiotlb_bounced { + struct trace_entry ent; + u32 __data_loc_dev_name; + u64 dma_mask; + dma_addr_t dev_addr; + size_t size; + bool force; + char __data[0]; +}; + +struct trace_event_data_offsets_swiotlb_bounced { + u32 dev_name; +}; + +typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long); + +typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long); + +enum { + TRACE_EVENT_FL_FILTERED = 1, + TRACE_EVENT_FL_CAP_ANY = 2, + TRACE_EVENT_FL_NO_SET_FILTER = 4, + TRACE_EVENT_FL_IGNORE_ENABLE = 8, + TRACE_EVENT_FL_TRACEPOINT = 16, + TRACE_EVENT_FL_DYNAMIC = 32, + TRACE_EVENT_FL_KPROBE = 64, + TRACE_EVENT_FL_UPROBE = 128, + TRACE_EVENT_FL_EPROBE = 256, + TRACE_EVENT_FL_FPROBE = 512, + TRACE_EVENT_FL_CUSTOM = 1024, +}; + +struct trace_event_raw_sys_enter { + struct trace_entry ent; + long id; + unsigned long args[6]; + char __data[0]; +}; + +struct trace_event_raw_sys_exit { + struct trace_entry ent; + long id; + long ret; + char __data[0]; +}; + +struct trace_event_data_offsets_sys_enter {}; + +struct trace_event_data_offsets_sys_exit {}; + +struct ptrace_sud_config { + __u64 mode; + __u64 selector; + __u64 offset; + __u64 len; +}; + +enum kvm_apic_logical_mode { + KVM_APIC_MODE_SW_DISABLED = 0, + KVM_APIC_MODE_XAPIC_CLUSTER = 1, + KVM_APIC_MODE_XAPIC_FLAT = 2, + KVM_APIC_MODE_X2APIC = 3, + KVM_APIC_MODE_MAP_DISABLED = 4, +}; + +enum hv_tsc_page_status { + HV_TSC_PAGE_UNSET = 0, + HV_TSC_PAGE_GUEST_CHANGED = 1, + HV_TSC_PAGE_HOST_CHANGED = 2, + HV_TSC_PAGE_SET = 3, + HV_TSC_PAGE_BROKEN = 4, +}; + +enum pfn_cache_usage { + KVM_GUEST_USES_PFN = 1, + KVM_HOST_USES_PFN = 2, + KVM_GUEST_AND_HOST_USE_PFN = 3, +}; + +enum kvm_irqchip_mode { + KVM_IRQCHIP_NONE = 0, + KVM_IRQCHIP_KERNEL = 1, + KVM_IRQCHIP_SPLIT = 2, +}; + +enum mmu_notifier_event { + MMU_NOTIFY_UNMAP = 0, + MMU_NOTIFY_CLEAR = 1, + MMU_NOTIFY_PROTECTION_VMA = 2, + MMU_NOTIFY_PROTECTION_PAGE = 3, + MMU_NOTIFY_SOFT_DIRTY = 4, + MMU_NOTIFY_RELEASE = 5, + MMU_NOTIFY_MIGRATE = 6, + MMU_NOTIFY_EXCLUSIVE = 7, +}; + +enum kvm_stat_kind { + KVM_STAT_VM = 0, + KVM_STAT_VCPU = 1, +}; + +enum pmc_type { + KVM_PMC_GP = 0, + KVM_PMC_FIXED = 1, +}; + +typedef u64 gpa_t; + +struct kvm_mmio_fragment { + gpa_t gpa; + void *data; + unsigned int len; +}; + +struct kvm_lapic; + +struct kvm_page_fault; + +struct x86_exception; + +struct kvm_mmu_page; + +typedef u64 hpa_t; + +struct kvm_mmu_root_info { + gpa_t pgd; + hpa_t hpa; +}; + +union kvm_mmu_page_role { + u32 word; + struct { + unsigned int level : 4; + unsigned int has_4_byte_gpte : 1; + unsigned int quadrant : 2; + unsigned int direct : 1; + unsigned int access : 3; + unsigned int invalid : 1; + unsigned int efer_nx : 1; + unsigned int cr0_wp : 1; + unsigned int smep_andnot_wp : 1; + unsigned int smap_andnot_wp : 1; + unsigned int ad_disabled : 1; + unsigned int guest_mode : 1; + unsigned int passthrough : 1; + char : 5; + unsigned int smm : 8; + }; +}; + +union kvm_mmu_extended_role { + u32 word; + struct { + unsigned int valid : 1; + unsigned int execonly : 1; + unsigned int cr4_pse : 1; + unsigned int cr4_pke : 1; + unsigned int cr4_smap : 1; + unsigned int cr4_smep : 1; + unsigned int cr4_la57 : 1; + unsigned int efer_lma : 1; + }; +}; + +union kvm_cpu_role { + u64 as_u64; + struct { + union kvm_mmu_page_role base; + union kvm_mmu_extended_role ext; + }; +}; + +struct rsvd_bits_validate { + u64 rsvd_bits_mask[10]; + u64 bad_mt_xwr; +}; + +struct kvm_vcpu; + +struct kvm_mmu { + unsigned long (*get_guest_pgd)(struct kvm_vcpu *); + u64 (*get_pdptr)(struct kvm_vcpu *, int); + int (*page_fault)(struct kvm_vcpu *, struct kvm_page_fault *); + void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *, struct kvm_mmu *, gpa_t, u64, + struct x86_exception *); + int (*sync_spte)(struct kvm_vcpu *, struct kvm_mmu_page *, int); + struct kvm_mmu_root_info root; + union kvm_cpu_role cpu_role; + union kvm_mmu_page_role root_role; + u32 pkru_mask; + struct kvm_mmu_root_info prev_roots[3]; + u8 permissions[16]; + u64 *pae_root; + u64 *pml4_root; + u64 *pml5_root; + struct rsvd_bits_validate shadow_zero_check; + struct rsvd_bits_validate guest_rsvd_check; + u64 pdptrs[4]; +}; + +struct kvm_mmu_memory_cache { + gfp_t gfp_zero; + gfp_t gfp_custom; + struct kmem_cache *kmem_cache; + int capacity; + int nobjs; + void **objects; +}; + +struct kvm_pio_request { + unsigned long linear_rip; + unsigned long count; + int in; + int port; + int size; +}; + +struct kvm_queued_exception { + bool pending; + bool injected; + bool has_error_code; + u8 vector; + u32 error_code; + unsigned long payload; + bool has_payload; +}; + +struct kvm_queued_interrupt { + bool injected; + bool soft; + u8 nr; +}; + +struct kvm_hypervisor_cpuid { + u32 base; + u32 limit; +}; + +struct x86_emulate_ctxt; + +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +}; + +typedef u64 hfn_t; + +typedef hfn_t kvm_pfn_t; + +struct kvm_memory_slot; + +struct kvm; + +struct gfn_to_pfn_cache { + u64 generation; + gpa_t gpa; + unsigned long uhva; + struct kvm_memory_slot *memslot; + struct kvm *kvm; + struct kvm_vcpu *vcpu; + struct list_head list; + rwlock_t lock; + struct mutex refresh_lock; + void *khva; + kvm_pfn_t pfn; + enum pfn_cache_usage usage; + bool active; + bool valid; +}; + +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + unsigned long hva; + unsigned long len; + struct kvm_memory_slot *memslot; +}; + +struct kvm_mtrr_range { + u64 base; + u64 mask; + struct list_head node; +}; + +struct kvm_mtrr { + struct kvm_mtrr_range var_ranges[8]; + mtrr_type fixed_ranges[88]; + u64 deftype; + struct list_head head; +}; + +typedef u64 gfn_t; + +struct kvm_pmc { + enum pmc_type type; + u8 idx; + bool is_paused; + bool intr; + u64 counter; + u64 prev_counter; + u64 eventsel; + struct perf_event *perf_event; + struct kvm_vcpu *vcpu; + u64 current_config; +}; + +struct kvm_pmu { + u8 version; + unsigned int nr_arch_gp_counters; + unsigned int nr_arch_fixed_counters; + unsigned int available_event_types; + u64 fixed_ctr_ctrl; + u64 fixed_ctr_ctrl_mask; + u64 global_ctrl; + u64 global_status; + u64 counter_bitmask[2]; + u64 global_ctrl_mask; + u64 global_status_mask; + u64 reserved_bits; + u64 raw_event_mask; + struct kvm_pmc gp_counters[8]; + struct kvm_pmc fixed_counters[3]; + union { + unsigned long reprogram_pmi[1]; + atomic64_t __reprogram_pmi; + }; + unsigned long all_valid_pmc_idx[1]; + unsigned long pmc_in_use[1]; + u64 ds_area; + u64 pebs_enable; + u64 pebs_enable_mask; + u64 pebs_data_cfg; + u64 pebs_data_cfg_mask; + u64 host_cross_mapped_mask; + bool need_cleanup; + u8 event_count; +}; + +struct kvm_cpuid_entry2; + +struct kvm_vcpu_hv; + +struct kvm_vcpu_arch { + unsigned long regs[17]; + u32 regs_avail; + u32 regs_dirty; + unsigned long cr0; + unsigned long cr0_guest_owned_bits; + unsigned long cr2; + unsigned long cr3; + unsigned long cr4; + unsigned long cr4_guest_owned_bits; + unsigned long cr4_guest_rsvd_bits; + unsigned long cr8; + u32 host_pkru; + u32 pkru; + u32 hflags; + u64 efer; + u64 apic_base; + struct kvm_lapic *apic; + bool load_eoi_exitmap_pending; + unsigned long ioapic_handled_vectors[4]; + unsigned long apic_attention; + int32_t apic_arb_prio; + int mp_state; + u64 ia32_misc_enable_msr; + u64 smbase; + u64 smi_count; + bool at_instruction_boundary; + bool tpr_access_reporting; + bool xfd_no_write_intercept; + u64 ia32_xss; + u64 microcode_version; + u64 arch_capabilities; + u64 perf_capabilities; + struct kvm_mmu *mmu; + struct kvm_mmu root_mmu; + struct kvm_mmu guest_mmu; + struct kvm_mmu nested_mmu; + struct kvm_mmu *walk_mmu; + struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; + struct kvm_mmu_memory_cache mmu_shadow_page_cache; + struct kvm_mmu_memory_cache mmu_shadowed_info_cache; + struct kvm_mmu_memory_cache mmu_page_header_cache; + struct fpu_guest guest_fpu; + u64 xcr0; + u64 guest_supported_xcr0; + struct kvm_pio_request pio; + void *pio_data; + void *sev_pio_data; + unsigned int sev_pio_count; + u8 event_exit_inst_len; + bool exception_from_userspace; + struct kvm_queued_exception exception; + struct kvm_queued_exception exception_vmexit; + struct kvm_queued_interrupt interrupt; + int halt_request; + int cpuid_nent; + struct kvm_cpuid_entry2 *cpuid_entries; + struct kvm_hypervisor_cpuid kvm_cpuid; + struct { + unsigned long enabled[1]; + } governed_features; + u64 reserved_gpa_bits; + int maxphyaddr; + struct x86_emulate_ctxt *emulate_ctxt; + bool emulate_regs_need_sync_to_vcpu; + bool emulate_regs_need_sync_from_vcpu; + int (*complete_userspace_io)(struct kvm_vcpu *); + gpa_t time; + struct pvclock_vcpu_time_info hv_clock; + unsigned int hw_tsc_khz; + struct gfn_to_pfn_cache pv_time; + bool pvclock_set_guest_stopped_request; + struct { + u8 preempted; + u64 msr_val; + u64 last_steal; + struct gfn_to_hva_cache cache; + } st; + u64 l1_tsc_offset; + u64 tsc_offset; + u64 last_guest_tsc; + u64 last_host_tsc; + u64 tsc_offset_adjustment; + u64 this_tsc_nsec; + u64 this_tsc_write; + u64 this_tsc_generation; + bool tsc_catchup; + bool tsc_always_catchup; + s8 virtual_tsc_shift; + u32 virtual_tsc_mult; + u32 virtual_tsc_khz; + s64 ia32_tsc_adjust_msr; + u64 msr_ia32_power_ctl; + u64 l1_tsc_scaling_ratio; + u64 tsc_scaling_ratio; + atomic_t nmi_queued; + unsigned int nmi_pending; + bool nmi_injected; + bool smi_pending; + u8 handling_intr_from_guest; + struct kvm_mtrr mtrr_state; + u64 pat; + unsigned int switch_db_regs; + unsigned long db[4]; + unsigned long dr6; + unsigned long dr7; + unsigned long eff_db[4]; + unsigned long guest_debug_dr7; + u64 msr_platform_info; + u64 msr_misc_features_enables; + u64 mcg_cap; + u64 mcg_status; + u64 mcg_ctl; + u64 mcg_ext_ctl; + u64 *mce_banks; + u64 *mci_ctl2_banks; + u64 mmio_gva; + unsigned int mmio_access; + gfn_t mmio_gfn; + u64 mmio_gen; + struct kvm_pmu pmu; + unsigned long singlestep_rip; + bool hyperv_enabled; + struct kvm_vcpu_hv *hyperv; + cpumask_var_t wbinvd_dirty_mask; + unsigned long last_retry_eip; + unsigned long last_retry_addr; + struct { + bool halted; + gfn_t gfns[64]; + struct gfn_to_hva_cache data; + u64 msr_en_val; + u64 msr_int_val; + u16 vec; + u32 id; + bool send_user_only; + u32 host_apf_flags; + bool delivery_as_pf_vmexit; + bool pageready_pending; + } apf; + struct { + u64 length; + u64 status; + } osvw; + struct { + u64 msr_val; + struct gfn_to_hva_cache data; + } pv_eoi; + u64 msr_kvm_poll_control; + unsigned long exit_qualification; + struct { + bool pv_unhalted; + } pv; + int pending_ioapic_eoi; + int pending_external_vector; + bool preempted_in_kernel; + bool l1tf_flush_l1d; + int last_vmentry_cpu; + u64 msr_hwcr; + struct { + u32 features; + bool enforce; + } pv_cpuid; + bool guest_state_protected; + bool pdptrs_from_userspace; +}; + +struct kvm_vcpu_stat_generic { + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_wait_ns; + u64 halt_poll_success_hist[32]; + u64 halt_poll_fail_hist[32]; + u64 halt_wait_hist[32]; + u64 blocking; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 pf_taken; + u64 pf_fixed; + u64 pf_emulate; + u64 pf_spurious; + u64 pf_fast; + u64 pf_mmio_spte_created; + u64 pf_guest; + u64 tlb_flush; + u64 invlpg; + u64 exits; + u64 io_exits; + u64 mmio_exits; + u64 signal_exits; + u64 irq_window_exits; + u64 nmi_window_exits; + u64 l1d_flush; + u64 halt_exits; + u64 request_irq_exits; + u64 irq_exits; + u64 host_state_reload; + u64 fpu_reload; + u64 insn_emulation; + u64 insn_emulation_fail; + u64 hypercalls; + u64 irq_injections; + u64 nmi_injections; + u64 req_event; + u64 nested_run; + u64 directed_yield_attempted; + u64 directed_yield_successful; + u64 preemption_reported; + u64 preemption_other; + u64 guest_mode; + u64 notify_window_exits; +}; + +struct kvm_dirty_gfn; + +struct kvm_dirty_ring { + u32 dirty_index; + u32 reset_index; + u32 size; + u32 soft_limit; + struct kvm_dirty_gfn *dirty_gfns; + int index; +}; + +struct kvm_run; + +struct kvm_vcpu { + struct kvm *kvm; + struct preempt_notifier preempt_notifier; + int cpu; + int vcpu_id; + int vcpu_idx; + int ____srcu_idx; + int mode; + u64 requests; + unsigned long guest_debug; + struct mutex mutex; + struct kvm_run *run; + struct rcuwait wait; + struct pid __attribute__((btf_type_tag("rcu"))) * pid; + int sigset_active; + sigset_t sigset; + unsigned int halt_poll_ns; + bool valid_wakeup; + int mmio_needed; + int mmio_read_completed; + int mmio_is_write; + int mmio_cur_fragment; + int mmio_nr_fragments; + struct kvm_mmio_fragment mmio_fragments[2]; + struct { + u32 queued; + struct list_head queue; + struct list_head done; + spinlock_t lock; + } async_pf; + struct { + bool in_spin_loop; + bool dy_eligible; + } spin_loop; + bool preempted; + bool ready; + struct kvm_vcpu_arch arch; + struct kvm_vcpu_stat stat; + char stats_id[48]; + struct kvm_dirty_ring dirty_ring; + struct kvm_memory_slot *last_used_slot; + u64 last_used_slot_gen; +}; + +struct kvm_memslots { + u64 generation; + atomic_long_t last_used_slot; + struct rb_root_cached hva_tree; + struct rb_root gfn_tree; + struct hlist_head id_hash[128]; + int node_idx; +}; + +struct kvm_vm_stat_generic { + u64 remote_tlb_flush; + u64 remote_tlb_flush_requests; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; + u64 mmu_shadow_zapped; + u64 mmu_pte_write; + u64 mmu_pde_zapped; + u64 mmu_flooded; + u64 mmu_recycled; + u64 mmu_cache_miss; + u64 mmu_unsync; + union { + struct { + atomic64_t pages_4k; + atomic64_t pages_2m; + atomic64_t pages_1g; + }; + atomic64_t pages[3]; + }; + u64 nx_lpage_splits; + u64 max_mmu_page_hash_collisions; + u64 max_mmu_rmap_size; +}; + +struct kvm_pic; + +struct kvm_ioapic; + +struct kvm_pit; + +struct kvm_xen_hvm_config { + __u32 flags; + __u32 msr; + __u64 blob_addr_32; + __u64 blob_addr_64; + __u8 blob_size_32; + __u8 blob_size_64; + __u8 pad2[30]; +}; + +struct ms_hyperv_tsc_page { + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; +}; + +struct kvm_hv_syndbg { + struct { + u64 control; + u64 status; + u64 send_page; + u64 recv_page; + u64 pending_page; + } control; + u64 options; +}; + +struct hv_partition_assist_pg; + +struct kvm_hv { + struct mutex hv_lock; + u64 hv_guest_os_id; + u64 hv_hypercall; + u64 hv_tsc_page; + enum hv_tsc_page_status hv_tsc_page_status; + u64 hv_crash_param[5]; + u64 hv_crash_ctl; + struct ms_hyperv_tsc_page tsc_ref; + struct idr conn_to_evt; + u64 hv_reenlightenment_control; + u64 hv_tsc_emulation_control; + u64 hv_tsc_emulation_status; + u64 hv_invtsc_control; + atomic_t num_mismatched_vp_indexes; + unsigned int synic_auto_eoi_used; + struct hv_partition_assist_pg *hv_pa_pg; + struct kvm_hv_syndbg hv_syndbg; +}; + +struct kvm_xen { + struct mutex xen_lock; + u32 xen_version; + bool long_mode; + bool runstate_update_flag; + u8 upcall_vector; + struct gfn_to_pfn_cache shinfo_cache; + struct idr evtchn_ports; + unsigned long poll_mask[16]; +}; + +struct kvm_apic_map; + +struct kvm_x86_msr_filter; + +struct kvm_x86_pmu_event_filter; + +struct kvm_arch { + unsigned long n_used_mmu_pages; + unsigned long n_requested_mmu_pages; + unsigned long n_max_mmu_pages; + unsigned int indirect_shadow_pages; + u8 mmu_valid_gen; + struct hlist_head mmu_page_hash[4096]; + struct list_head active_mmu_pages; + struct list_head zapped_obsolete_pages; + struct list_head possible_nx_huge_pages; + spinlock_t mmu_unsync_pages_lock; + struct iommu_domain *iommu_domain; + bool iommu_noncoherent; + atomic_t noncoherent_dma_count; + atomic_t assigned_device_count; + struct kvm_pic *vpic; + struct kvm_ioapic *vioapic; + struct kvm_pit *vpit; + atomic_t vapics_in_nmi_mode; + struct mutex apic_map_lock; + struct kvm_apic_map __attribute__((btf_type_tag("rcu"))) * apic_map; + atomic_t apic_map_dirty; + bool apic_access_memslot_enabled; + bool apic_access_memslot_inhibited; + struct rw_semaphore apicv_update_lock; + unsigned long apicv_inhibit_reasons; + gpa_t wall_clock; + bool mwait_in_guest; + bool hlt_in_guest; + bool pause_in_guest; + bool cstate_in_guest; + unsigned long irq_sources_bitmap; + s64 kvmclock_offset; + raw_spinlock_t tsc_write_lock; + u64 last_tsc_nsec; + u64 last_tsc_write; + u32 last_tsc_khz; + u64 last_tsc_offset; + u64 cur_tsc_nsec; + u64 cur_tsc_write; + u64 cur_tsc_offset; + u64 cur_tsc_generation; + int nr_vcpus_matched_tsc; + u32 default_tsc_khz; + bool user_set_tsc; + seqcount_raw_spinlock_t pvclock_sc; + bool use_master_clock; + u64 master_kernel_ns; + u64 master_cycle_now; + struct delayed_work kvmclock_update_work; + struct delayed_work kvmclock_sync_work; + struct kvm_xen_hvm_config xen_hvm_config; + struct hlist_head mask_notifier_list; + struct kvm_hv hyperv; + struct kvm_xen xen; + bool backwards_tsc_observed; + bool boot_vcpu_runs_old_kvmclock; + u32 bsp_vcpu_id; + u64 disabled_quirks; + enum kvm_irqchip_mode irqchip_mode; + u8 nr_reserved_ioapic_pins; + bool disabled_lapic_found; + bool x2apic_format; + bool x2apic_broadcast_quirk_disabled; + bool guest_can_read_msr_platform_info; + bool exception_payload_enabled; + bool triple_fault_event; + bool bus_lock_detection_enabled; + bool enable_pmu; + u32 notify_window; + u32 notify_vmexit_flags; + bool exit_on_emulation_error; + u32 user_space_msr_mask; + struct kvm_x86_msr_filter __attribute__((btf_type_tag("rcu"))) * msr_filter; + u32 hypercall_exit_enabled; + bool sgx_provisioning_allowed; + struct kvm_x86_pmu_event_filter __attribute__((btf_type_tag("rcu"))) * pmu_event_filter; + struct task_struct *nx_huge_page_recovery_thread; + atomic64_t tdp_mmu_pages; + struct list_head tdp_mmu_roots; + spinlock_t tdp_mmu_pages_lock; + bool shadow_root_allocated; + u32 max_vcpu_ids; + bool disable_nx_huge_pages; + struct kvm_mmu_memory_cache split_shadow_page_cache; + struct kvm_mmu_memory_cache split_page_header_cache; + struct kvm_mmu_memory_cache split_desc_cache; +}; + +struct mmu_notifier_ops; + +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; + struct mm_struct *mm; + struct callback_head rcu; + unsigned int users; +}; + +struct kvm_io_bus; + +struct kvm_coalesced_mmio_ring; + +struct kvm_irq_routing_table; + +struct kvm_stat_data; + +struct kvm { + rwlock_t mmu_lock; + struct mutex slots_lock; + struct mutex slots_arch_lock; + struct mm_struct *mm; + unsigned long nr_memslot_pages; + struct kvm_memslots __memslots[4]; + struct kvm_memslots __attribute__((btf_type_tag("rcu"))) * memslots[2]; + struct xarray vcpu_array; + atomic_t nr_memslots_dirty_logging; + spinlock_t mn_invalidate_lock; + unsigned long mn_active_invalidate_count; + struct rcuwait mn_memslots_update_rcuwait; + spinlock_t gpc_lock; + struct list_head gpc_list; + atomic_t online_vcpus; + int max_vcpus; + int created_vcpus; + int last_boosted_vcpu; + struct list_head vm_list; + struct mutex lock; + struct kvm_io_bus __attribute__((btf_type_tag("rcu"))) * buses[4]; + struct { + spinlock_t lock; + struct list_head items; + struct list_head resampler_list; + struct mutex resampler_lock; + } irqfds; + struct list_head ioeventfds; + struct kvm_vm_stat stat; + struct kvm_arch arch; + refcount_t users_count; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + spinlock_t ring_lock; + struct list_head coalesced_zones; + struct mutex irq_lock; + struct kvm_irq_routing_table __attribute__((btf_type_tag("rcu"))) * irq_routing; + struct hlist_head irq_ack_notifier_list; + struct mmu_notifier mmu_notifier; + unsigned long mmu_invalidate_seq; + long mmu_invalidate_in_progress; + unsigned long mmu_invalidate_range_start; + unsigned long mmu_invalidate_range_end; + struct list_head devices; + u64 manual_dirty_log_protect; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; + pid_t userspace_pid; + bool override_halt_poll_ns; + unsigned int max_halt_poll_ns; + u32 dirty_ring_size; + bool dirty_ring_with_bitmap; + bool vm_bugged; + bool vm_dead; + struct notifier_block pm_notifier; + char stats_id[48]; +}; + +struct kvm_io_device; + +struct kvm_io_range { + gpa_t addr; + int len; + struct kvm_io_device *dev; +}; + +struct kvm_io_bus { + int dev_count; + int ioeventfd_count; + struct kvm_io_range range[0]; +}; + +struct kvm_apic_map { + struct callback_head rcu; + enum kvm_apic_logical_mode logical_mode; + u32 max_apic_id; + union { + struct kvm_lapic *xapic_flat_map[8]; + struct kvm_lapic *xapic_cluster_map[64]; + }; + struct kvm_lapic *phys_map[0]; +}; + +struct hv_partition_assist_pg { + u32 tlb_lock_count; +}; + +struct interval_tree_node { + struct rb_node rb; + unsigned long start; + unsigned long last; + unsigned long __subtree_last; +}; + +struct kvm_rmap_head; + +struct kvm_lpage_info; + +struct kvm_arch_memory_slot { + struct kvm_rmap_head *rmap[3]; + struct kvm_lpage_info *lpage_info[2]; + unsigned short *gfn_write_track; +}; + +struct kvm_memory_slot { + struct hlist_node id_node[2]; + struct interval_tree_node hva_node[2]; + struct rb_node gfn_node[2]; + gfn_t base_gfn; + unsigned long npages; + unsigned long *dirty_bitmap; + struct kvm_arch_memory_slot arch; + unsigned long userspace_addr; + u32 flags; + short id; + u16 as_id; +}; + +struct kvm_rmap_head { + unsigned long val; +}; + +struct kvm_lpage_info { + int disallow_lpage; +}; + +struct msr_bitmap_range { + u32 flags; + u32 nmsrs; + u32 base; + unsigned long *bitmap; +}; + +struct kvm_x86_msr_filter { + u8 count; + bool default_allow : 1; + struct msr_bitmap_range ranges[16]; +}; + +struct kvm_x86_pmu_event_filter { + __u32 action; + __u32 nevents; + __u32 fixed_counter_bitmap; + __u32 flags; + __u32 nr_includes; + __u32 nr_excludes; + __u64 *includes; + __u64 *excludes; + __u64 events[0]; +}; + +struct kvm_coalesced_mmio { + __u64 phys_addr; + __u32 len; + union { + __u32 pad; + __u32 pio; + }; + __u8 data[8]; +}; + +struct kvm_coalesced_mmio_ring { + __u32 first; + __u32 last; + struct kvm_coalesced_mmio coalesced_mmio[0]; +}; + +struct kvm_irq_routing_table { + int chip[72]; + u32 nr_rt_entries; + struct hlist_head map[0]; +}; + +struct mmu_notifier_range; + +struct mmu_notifier_ops { + void (*release)(struct mmu_notifier *, struct mm_struct *); + int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, + unsigned long); + int (*clear_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, + unsigned long); + int (*test_young)(struct mmu_notifier *, struct mm_struct *, unsigned long); + void (*change_pte)(struct mmu_notifier *, struct mm_struct *, unsigned long, pte_t); + int (*invalidate_range_start)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*invalidate_range_end)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*arch_invalidate_secondary_tlbs)(struct mmu_notifier *, struct mm_struct *, + unsigned long, unsigned long); + struct mmu_notifier *(*alloc_notifier)(struct mm_struct *); + void (*free_notifier)(struct mmu_notifier *); +}; + +struct mmu_notifier_range { + struct mm_struct *mm; + unsigned long start; + unsigned long end; + unsigned int flags; + enum mmu_notifier_event event; + void *owner; +}; + +struct _kvm_stats_desc; + +struct kvm_stat_data { + struct kvm *kvm; + const struct _kvm_stats_desc *desc; + enum kvm_stat_kind kind; +}; + +struct kvm_stats_desc { + __u32 flags; + __s16 exponent; + __u16 size; + __u32 offset; + __u32 bucket_size; + char name[0]; +}; + +struct _kvm_stats_desc { + struct kvm_stats_desc desc; + char name[48]; +}; + +struct kvm_debug_exit_arch { + __u32 exception; + __u32 pad; + __u64 pc; + __u64 dr6; + __u64 dr7; +}; + +struct kvm_hyperv_exit { + __u32 type; + __u32 pad1; + union { + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; + } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 status; + __u64 send_page; + __u64 recv_page; + __u64 pending_page; + } syndbg; + } u; +}; + +struct kvm_xen_exit { + __u32 type; + union { + struct { + __u32 longmode; + __u32 cpl; + __u64 input; + __u64 result; + __u64 params[6]; + } hcall; + } u; +}; + +struct kvm_regs { + __u64 rax; + __u64 rbx; + __u64 rcx; + __u64 rdx; + __u64 rsi; + __u64 rdi; + __u64 rsp; + __u64 rbp; + __u64 r8; + __u64 r9; + __u64 r10; + __u64 r11; + __u64 r12; + __u64 r13; + __u64 r14; + __u64 r15; + __u64 rip; + __u64 rflags; +}; + +struct kvm_segment { + __u64 base; + __u32 limit; + __u16 selector; + __u8 type; + __u8 present; + __u8 dpl; + __u8 db; + __u8 s; + __u8 l; + __u8 g; + __u8 avl; + __u8 unusable; + __u8 padding; +}; + +struct kvm_dtable { + __u64 base; + __u16 limit; + __u16 padding[3]; +}; + +struct kvm_sregs { + struct kvm_segment cs; + struct kvm_segment ds; + struct kvm_segment es; + struct kvm_segment fs; + struct kvm_segment gs; + struct kvm_segment ss; + struct kvm_segment tr; + struct kvm_segment ldt; + struct kvm_dtable gdt; + struct kvm_dtable idt; + __u64 cr0; + __u64 cr2; + __u64 cr3; + __u64 cr4; + __u64 cr8; + __u64 efer; + __u64 apic_base; + __u64 interrupt_bitmap[4]; +}; + +struct kvm_vcpu_events { + struct { + __u8 injected; + __u8 nr; + __u8 has_error_code; + __u8 pending; + __u32 error_code; + } exception; + struct { + __u8 injected; + __u8 nr; + __u8 soft; + __u8 shadow; + } interrupt; + struct { + __u8 injected; + __u8 pending; + __u8 masked; + __u8 pad; + } nmi; + __u32 sipi_vector; + __u32 flags; + struct { + __u8 smm; + __u8 pending; + __u8 smm_inside_nmi; + __u8 latched_init; + } smi; + struct { + __u8 pending; + } triple_fault; + __u8 reserved[26]; + __u8 exception_has_payload; + __u64 exception_payload; +}; + +struct kvm_sync_regs { + struct kvm_regs regs; + struct kvm_sregs sregs; + struct kvm_vcpu_events events; +}; + +struct kvm_run { + __u8 request_interrupt_window; + __u8 immediate_exit; + __u8 padding1[6]; + __u32 exit_reason; + __u8 ready_for_interrupt_injection; + __u8 if_flag; + __u16 flags; + __u64 cr8; + __u64 apic_base; + union { + struct { + __u64 hardware_exit_reason; + } hw; + struct { + __u64 hardware_entry_failure_reason; + __u32 cpu; + } fail_entry; + struct { + __u32 exception; + __u32 error_code; + } ex; + struct { + __u8 direction; + __u8 size; + __u16 port; + __u32 count; + __u64 data_offset; + } io; + struct { + struct kvm_debug_exit_arch arch; + } debug; + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } mmio; + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; + struct { + __u64 nr; + __u64 args[6]; + __u64 ret; + union { + __u64 flags; + }; + } hypercall; + struct { + __u64 rip; + __u32 is_write; + __u32 pad; + } tpr_access; + struct { + __u8 icptcode; + __u16 ipa; + __u32 ipb; + } s390_sieic; + __u64 s390_reset_flags; + struct { + __u64 trans_exc_code; + __u32 pgm_code; + } s390_ucontrol; + struct { + __u32 dcrn; + __u32 data; + __u8 is_write; + } dcr; + struct { + __u32 suberror; + __u32 ndata; + __u64 data[16]; + } internal; + struct { + __u32 suberror; + __u32 ndata; + __u64 flags; + union { + struct { + __u8 insn_size; + __u8 insn_bytes[15]; + }; + }; + } emulation_failure; + struct { + __u64 gprs[32]; + } osi; + struct { + __u64 nr; + __u64 ret; + __u64 args[9]; + } papr_hcall; + struct { + __u16 subchannel_id; + __u16 subchannel_nr; + __u32 io_int_parm; + __u32 io_int_word; + __u32 ipb; + __u8 dequeued; + } s390_tsch; + struct { + __u32 epr; + } epr; + struct { + __u32 type; + __u32 ndata; + union { + __u64 data[16]; + }; + } system_event; + struct { + __u64 addr; + __u8 ar; + __u8 reserved; + __u8 fc; + __u8 sel1; + __u16 sel2; + } s390_stsi; + struct { + __u8 vector; + } eoi; + struct kvm_hyperv_exit hyperv; + struct { + __u64 esr_iss; + __u64 fault_ipa; + } arm_nisv; + struct { + __u8 error; + __u8 pad[7]; + __u32 reason; + __u32 index; + __u64 data; + } msr; + struct kvm_xen_exit xen; + struct { + unsigned long extension_id; + unsigned long function_id; + unsigned long args[6]; + unsigned long ret[2]; + } riscv_sbi; + struct { + unsigned long csr_num; + unsigned long new_value; + unsigned long write_mask; + unsigned long ret_value; + } riscv_csr; + struct { + __u32 flags; + } notify; + char padding[256]; + }; + __u64 kvm_valid_regs; + __u64 kvm_dirty_regs; + union { + struct kvm_sync_regs regs; + char padding[2048]; + } s; +}; + +struct kvm_cpuid_entry2 { + __u32 function; + __u32 index; + __u32 flags; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding[3]; +}; + +struct kvm_vcpu_hv_synic { + u64 version; + u64 control; + u64 msg_page; + u64 evt_page; + atomic64_t sint[16]; + atomic_t sint_to_gsi[16]; + unsigned long auto_eoi_bitmap[4]; + unsigned long vec_bitmap[4]; + bool active; + bool dont_zero_synic_pages; +}; + +union hv_stimer_config { + u64 as_uint64; + struct { + u64 enable : 1; + u64 periodic : 1; + u64 lazy : 1; + u64 auto_enable : 1; + u64 apic_vector : 8; + u64 direct_mode : 1; + u64 reserved_z0 : 3; + u64 sintx : 4; + u64 reserved_z1 : 44; + }; +}; + +union hv_message_flags { + __u8 asu8; + struct { + __u8 msg_pending : 1; + __u8 reserved : 7; + }; +}; + +union hv_port_id { + __u32 asu32; + struct { + __u32 id : 24; + __u32 reserved : 8; + } u; +}; + +struct hv_message_header { + __u32 message_type; + __u8 payload_size; + union hv_message_flags message_flags; + __u8 reserved[2]; + union { + __u64 sender; + union hv_port_id port; + }; +}; + +struct hv_message { + struct hv_message_header header; + union { + __u64 payload[30]; + } u; +}; + +struct kvm_vcpu_hv_stimer { + struct hrtimer timer; + int index; + union hv_stimer_config config; + u64 count; + u64 exp_time; + struct hv_message msg; + bool msg_pending; +}; + +struct kvm_vcpu_hv_tlb_flush_fifo { + spinlock_t write_lock; + struct { + union { + struct __kfifo kfifo; + u64 *type; + const u64 *const_type; + char (*rectype)[0]; + u64 *ptr; + const u64 *ptr_const; + }; + u64 buf[16]; + } entries; +}; + +struct hv_nested_enlightenments_control { + struct { + __u32 directhypercall : 1; + __u32 reserved : 31; + } features; + struct { + __u32 inter_partition_comm : 1; + __u32 reserved : 31; + } hypercallControls; +}; + +struct hv_vp_assist_page { + __u32 apic_assist; + __u32 reserved1; + __u32 vtl_entry_reason; + __u32 vtl_reserved; + __u64 vtl_ret_x64rax; + __u64 vtl_ret_x64rcx; + struct hv_nested_enlightenments_control nested_control; + __u8 enlighten_vmentry; + __u8 reserved2[7]; + __u64 current_nested_vmcs; + __u8 synthetic_time_unhalted_timer_expired; + __u8 reserved3[7]; + __u8 virtualization_fault_information[40]; + __u8 reserved4[8]; + __u8 intercept_message[256]; + __u8 vtl_ret_actions[256]; +}; + +struct kvm_vcpu_hv { + struct kvm_vcpu *vcpu; + u32 vp_index; + u64 hv_vapic; + s64 runtime_offset; + struct kvm_vcpu_hv_synic synic; + struct kvm_hyperv_exit exit; + struct kvm_vcpu_hv_stimer stimer[4]; + unsigned long stimer_pending_bitmap[1]; + bool enforce_cpuid; + struct { + u32 features_eax; + u32 features_ebx; + u32 features_edx; + u32 enlightenments_eax; + u32 enlightenments_ebx; + u32 syndbg_cap_eax; + u32 nested_eax; + u32 nested_ebx; + } cpuid_cache; + struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[2]; + u64 sparse_banks[64]; + struct hv_vp_assist_page vp_assist_page; + struct { + u64 pa_page_gpa; + u64 vm_id; + u32 vp_id; + } nested; +}; + +struct kvm_dirty_gfn { + __u32 flags; + __u32 slot; + __u64 offset; +}; + +typedef void (*btf_trace_module_load)(void *, struct module *); + +typedef void (*btf_trace_module_free)(void *, struct module *); + +typedef void (*btf_trace_module_get)(void *, struct module *, unsigned long); + +typedef void (*btf_trace_module_put)(void *, struct module *, unsigned long); + +typedef void (*btf_trace_module_request)(void *, char *, bool, unsigned long); + +struct latch_tree_root { + seqcount_latch_t seq; + struct rb_root tree[2]; +}; + +struct mod_tree_root { + struct latch_tree_root root; + unsigned long addr_min; + unsigned long addr_max; +}; + +enum mod_license { + NOT_GPL_ONLY = 0, + GPL_ONLY = 1, +}; + +struct symsearch { + const struct kernel_symbol *start; + const struct kernel_symbol *stop; + const s32 *crcs; + enum mod_license license; +}; + +enum kernel_load_data_id { + LOADING_UNKNOWN = 0, + LOADING_FIRMWARE = 1, + LOADING_MODULE = 2, + LOADING_KEXEC_IMAGE = 3, + LOADING_KEXEC_INITRAMFS = 4, + LOADING_POLICY = 5, + LOADING_X509_CERTIFICATE = 6, + LOADING_MAX_ID = 7, +}; + +enum fail_dup_mod_reason { + FAIL_DUP_MOD_BECOMING = 0, + FAIL_DUP_MOD_LOAD = 1, +}; + +enum kernel_read_file_id { + READING_UNKNOWN = 0, + READING_FIRMWARE = 1, + READING_MODULE = 2, + READING_KEXEC_IMAGE = 3, + READING_KEXEC_INITRAMFS = 4, + READING_POLICY = 5, + READING_X509_CERTIFICATE = 6, + READING_MAX_ID = 7, +}; + +struct trace_event_raw_module_load { + struct trace_entry ent; + unsigned int taints; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_free { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_refcnt { + struct trace_entry ent; + unsigned long ip; + int refcnt; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_request { + struct trace_entry ent; + unsigned long ip; + bool wait; + u32 __data_loc_name; + char __data[0]; +}; + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source; + struct module *target; +}; + +struct mod_initfree { + struct llist_node node; + void *init_text; + void *init_data; + void *init_rodata; +}; + +struct idempotent { + const void *cookie; + struct hlist_node entry; + struct completion complete; + int ret; +}; + +struct trace_event_data_offsets_module_load { + u32 name; +}; + +struct trace_event_data_offsets_module_free { + u32 name; +}; + +struct trace_event_data_offsets_module_refcnt { + u32 name; +}; + +struct trace_event_data_offsets_module_request { + u32 name; +}; + +struct find_symbol_arg { + const char *name; + bool gplok; + bool warn; + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +struct load_info { + const char *name; + struct module *mod; + Elf64_Ehdr *hdr; + unsigned long len; + Elf64_Shdr *sechdrs; + char *secstrings; + char *strtab; + unsigned long symoffs; + unsigned long stroffs; + unsigned long init_typeoffs; + unsigned long core_typeoffs; + bool sig_ok; + unsigned long mod_kallsyms_init_off; + struct { + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; + } index; +}; + +struct module_sect_attr { + struct bin_attribute battr; + unsigned long address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[0]; +}; + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[0]; +}; + +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *, struct latch_tree_node *); + int (*comp)(void *, struct latch_tree_node *); +}; + +enum kcmp_type { + KCMP_FILE = 0, + KCMP_VM = 1, + KCMP_FILES = 2, + KCMP_FS = 3, + KCMP_SIGHAND = 4, + KCMP_IO = 5, + KCMP_SYSVSEM = 6, + KCMP_EPOLL_TFD = 7, + KCMP_TYPES = 8, +}; + +struct kcmp_epoll_slot { + __u32 efd; + __u32 tfd; + __u32 toff; +}; + +struct profile_hit { + u32 pc; + u32 hits; +}; + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +typedef u32 nlink_t; + +struct proc_dir_entry { + atomic_t in_use; + refcount_t refcnt; + struct list_head pde_openers; + spinlock_t pde_unload_lock; + struct completion *pde_unload_completion; + const struct inode_operations *proc_iops; + union { + const struct proc_ops *proc_ops; + const struct file_operations *proc_dir_ops; + }; + const struct dentry_operations *proc_dops; + union { + const struct seq_operations *seq_ops; + int (*single_show)(struct seq_file *, void *); + }; + proc_write_t write; + void *data; + unsigned int state_size; + unsigned int low_ino; + nlink_t nlink; + kuid_t uid; + kgid_t gid; + loff_t size; + struct proc_dir_entry *parent; + struct rb_root subdir; + struct rb_node subdir_node; + char *name; + umode_t mode; + u8 flags; + u8 namelen; + char inline_name[0]; +}; + +struct stacktrace_cookie { + unsigned long *store; + unsigned int size; + unsigned int skip; + unsigned int len; +}; + +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; + +typedef __kernel_long_t __kernel_suseconds_t; + +typedef __kernel_suseconds_t suseconds_t; + +typedef __u64 timeu64_t; + +typedef __kernel_long_t __kernel_old_time_t; + +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + long long offset; + long long freq; + long long maxerror; + long long esterror; + int status; + long long constant; + long long precision; + long long tolerance; + struct __kernel_timex_timeval time; + long long tick; + long long ppsfreq; + long long jitter; + int shift; + long long stabil; + long long jitcnt; + long long calcnt; + long long errcnt; + long long stbcnt; + int tai; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct old_timex32 { + u32 modes; + s32 offset; + s32 freq; + s32 maxerror; + s32 esterror; + s32 status; + s32 constant; + s32 precision; + s32 tolerance; + struct old_timeval32 time; + s32 tick; + s32 ppsfreq; + s32 jitter; + s32 shift; + s32 stabil; + s32 jitcnt; + s32 calcnt; + s32 errcnt; + s32 stbcnt; + s32 tai; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct tk_read_base { + struct clocksource *clock; + u64 mask; + u64 cycle_last; + u32 mult; + u32 shift; + u64 xtime_nsec; + ktime_t base; + u64 base_real; +}; + +struct timekeeper { + struct tk_read_base tkr_mono; + struct tk_read_base tkr_raw; + u64 xtime_sec; + unsigned long ktime_sec; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + ktime_t next_leap_ktime; + u64 raw_sec; + struct timespec64 monotonic_to_boot; + u64 cycle_interval; + u64 xtime_interval; + s64 xtime_remainder; + u64 raw_interval; + u64 ntp_tick; + s64 ntp_error; + u32 ntp_error_shift; + u32 ntp_err_mult; + u32 skip_second_overflow; +}; + +typedef void (*btf_trace_timer_init)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_start)(void *, struct timer_list *, unsigned long, unsigned int); + +typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, unsigned long); + +typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); + +typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); + +typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); + +typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); + +typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 *const, + unsigned long long); + +typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, unsigned long long); + +typedef void (*btf_trace_tick_stop)(void *, int, int); + +struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; + unsigned long clk; + unsigned long next_expiry; + unsigned int cpu; + bool next_expiry_recalc; + bool is_idle; + bool timers_pending; + unsigned long pending_map[9]; + struct hlist_head vectors[576]; + long : 64; + long : 64; +}; + +struct trace_event_raw_timer_class { + struct trace_entry ent; + void *timer; + char __data[0]; +}; + +struct trace_event_raw_timer_start { + struct trace_entry ent; + void *timer; + void *function; + unsigned long expires; + unsigned long now; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_timer_expire_entry { + struct trace_entry ent; + void *timer; + unsigned long now; + void *function; + unsigned long baseclk; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_init { + struct trace_entry ent; + void *hrtimer; + clockid_t clockid; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_start { + struct trace_entry ent; + void *hrtimer; + void *function; + s64 expires; + s64 softexpires; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_expire_entry { + struct trace_entry ent; + void *hrtimer; + s64 now; + void *function; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_class { + struct trace_entry ent; + void *hrtimer; + char __data[0]; +}; + +struct trace_event_raw_itimer_state { + struct trace_entry ent; + int which; + unsigned long long expires; + long value_sec; + long value_nsec; + long interval_sec; + long interval_nsec; + char __data[0]; +}; + +struct trace_event_raw_itimer_expire { + struct trace_entry ent; + int which; + pid_t pid; + unsigned long long now; + char __data[0]; +}; + +struct trace_event_raw_tick_stop { + struct trace_entry ent; + int success; + int dependency; + char __data[0]; +}; + +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +struct trace_event_data_offsets_timer_class {}; + +struct trace_event_data_offsets_timer_start {}; + +struct trace_event_data_offsets_timer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_init {}; + +struct trace_event_data_offsets_hrtimer_start {}; + +struct trace_event_data_offsets_hrtimer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_class {}; + +struct trace_event_data_offsets_itimer_state {}; + +struct trace_event_data_offsets_itimer_expire {}; + +struct trace_event_data_offsets_tick_stop {}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +struct tk_fast { + seqcount_latch_t seq; + struct tk_read_base base[2]; +}; + +enum timekeeping_adv_mode { + TK_ADV_TICK = 0, + TK_ADV_FREQ = 1, +}; + +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; +}; + +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +struct audit_ntp_val { + long long oldval; + long long newval; +}; + +struct audit_ntp_data { + struct audit_ntp_val vals[6]; +}; + +struct ktime_timestamps { + u64 mono; + u64 boot; + u64 real; +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET = 0, + AUDIT_NTP_FREQ = 1, + AUDIT_NTP_STATUS = 2, + AUDIT_NTP_TAI = 3, + AUDIT_NTP_TICK = 4, + AUDIT_NTP_ADJUST = 5, + AUDIT_NTP_NVALS = 6, +}; + +struct rtc_device; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(struct rtc_device *); + struct rtc_device *rtc; + int enabled; +}; + +struct rtc_class_ops; + +struct rtc_device { + struct device dev; + struct module *owner; + int id; + const struct rtc_class_ops *ops; + struct mutex ops_lock; + struct cdev char_dev; + unsigned long flags; + unsigned long irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + int irq_freq; + int max_user_freq; + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; + int pie_enabled; + struct work_struct irqwork; + unsigned long set_offset_nsec; + unsigned long features[1]; + time64_t range_min; + timeu64_t range_max; + timeu64_t alarm_offset_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; +}; + +struct rtc_wkalrm; + +struct rtc_param; + +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, unsigned long); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*alarm_irq_enable)(struct device *, unsigned int); + int (*read_offset)(struct device *, long *); + int (*set_offset)(struct device *, long); + int (*param_get)(struct device *, struct rtc_param *); + int (*param_set)(struct device *, struct rtc_param *); +}; + +struct rtc_wkalrm { + unsigned char enabled; + unsigned char pending; + struct rtc_time time; +}; + +struct rtc_param { + __u64 param; + union { + __u64 uvalue; + __s64 svalue; + __u64 ptr; + }; + __u32 index; + __u32 __pad; +}; + +typedef s64 int64_t; + +enum wd_read_status { + WD_READ_SUCCESS = 0, + WD_READ_UNSTABLE = 1, + WD_READ_SKIP = 2, +}; + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC = 0, + TICKDEV_MODE_ONESHOT = 1, +}; + +enum tick_nohz_mode { + NOHZ_MODE_INACTIVE = 0, + NOHZ_MODE_LOWRES = 1, + NOHZ_MODE_HIGHRES = 2, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +struct tick_sched { + unsigned int inidle : 1; + unsigned int tick_stopped : 1; + unsigned int idle_active : 1; + unsigned int do_timer_last : 1; + unsigned int got_idle_tick : 1; + unsigned int stalled_jiffies; + unsigned long last_tick_jiffies; + struct hrtimer sched_timer; + ktime_t last_tick; + ktime_t next_tick; + unsigned long idle_jiffies; + ktime_t idle_waketime; + seqcount_t idle_sleeptime_seq; + ktime_t idle_entrytime; + enum tick_nohz_mode nohz_mode; + unsigned long last_jiffies; + u64 timer_expires_base; + u64 timer_expires; + u64 next_timer; + ktime_t idle_expires; + unsigned long idle_calls; + unsigned long idle_sleeps; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + atomic_t tick_dep_mask; + unsigned long check_clocks; +}; + +struct timer_list_iter { + int cpu; + bool second_pass; + u64 now; +}; + +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long tm_year; + int tm_wday; + int tm_yday; +}; + +struct cyclecounter; + +struct timecounter { + const struct cyclecounter *cc; + u64 cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +struct cyclecounter { + u64 (*read)(const struct cyclecounter *); + u64 mask; + u32 mult; + u32 shift; +}; + +typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); + +struct alarm; + +typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); + +enum alarmtimer_restart { + ALARMTIMER_NORESTART = 0, + ALARMTIMER_RESTART = 1, +}; + +enum alarmtimer_type { + ALARM_REALTIME = 0, + ALARM_BOOTTIME = 1, + ALARM_NUMTYPE = 2, + ALARM_REALTIME_FREEZER = 3, + ALARM_BOOTTIME_FREEZER = 4, +}; + +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + enum alarmtimer_restart (*function)(struct alarm *, ktime_t); + enum alarmtimer_type type; + int state; + void *data; +}; + +typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); + +struct k_itimer; + +struct k_clock { + int (*clock_getres)(const clockid_t, struct timespec64 *); + int (*clock_set)(const clockid_t, const struct timespec64 *); + int (*clock_get_timespec)(const clockid_t, struct timespec64 *); + ktime_t (*clock_get_ktime)(const clockid_t); + int (*clock_adj)(const clockid_t, struct __kernel_timex *); + int (*timer_create)(struct k_itimer *); + int (*nsleep)(const clockid_t, int, const struct timespec64 *); + int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); + int (*timer_del)(struct k_itimer *); + void (*timer_get)(struct k_itimer *, struct itimerspec64 *); + void (*timer_rearm)(struct k_itimer *); + s64 (*timer_forward)(struct k_itimer *, ktime_t); + ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); + int (*timer_try_to_cancel)(struct k_itimer *); + void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); + void (*timer_wait_running)(struct k_itimer *); +}; + +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + int firing; + struct task_struct __attribute__((btf_type_tag("rcu"))) * handling; +}; + +typedef __kernel_timer_t timer_t; + +struct k_itimer { + struct list_head list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_active; + s64 it_overrun; + s64 it_overrun_last; + int it_requeue_pending; + int it_sigev_notify; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue *sigq; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +struct alarm_base { + spinlock_t lock; + struct timerqueue_head timerqueue; + ktime_t (*get_ktime)(); + void (*get_timespec)(struct timespec64 *); + clockid_t base_clockid; +}; + +struct class_interface { + struct list_head node; + const struct class *class; + int (*add_dev)(struct device *); + void (*remove_dev)(struct device *); +}; + +struct platform_driver { + int (*probe)(struct platform_device *); + int (*remove)(struct platform_device *); + void (*remove_new)(struct platform_device *); + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; + bool driver_managed_dma; +}; + +struct trace_event_raw_alarmtimer_suspend { + struct trace_entry ent; + s64 expires; + unsigned char alarm_type; + char __data[0]; +}; + +struct trace_event_raw_alarm_class { + struct trace_entry ent; + void *alarm; + unsigned char alarm_type; + s64 expires; + s64 now; + char __data[0]; +}; + +struct trace_event_data_offsets_alarmtimer_suspend {}; + +struct trace_event_data_offsets_alarm_class {}; + +struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[12]; + int _tid; + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +}; + +struct compat_sigevent { + compat_sigval_t sigev_value; + compat_int_t sigev_signo; + compat_int_t sigev_notify; + union { + compat_int_t _pad[13]; + compat_int_t _tid; + struct { + compat_uptr_t _function; + compat_uptr_t _attribute; + } _sigev_thread; + } _sigev_un; +}; + +typedef struct sigevent sigevent_t; + +struct posix_clock; + +struct posix_clock_context; + +struct posix_clock_operations { + struct module *owner; + int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); + int (*clock_gettime)(struct posix_clock *, struct timespec64 *); + int (*clock_getres)(struct posix_clock *, struct timespec64 *); + int (*clock_settime)(struct posix_clock *, const struct timespec64 *); + long (*ioctl)(struct posix_clock_context *, unsigned int, unsigned long); + int (*open)(struct posix_clock_context *, fmode_t); + __poll_t (*poll)(struct posix_clock_context *, struct file *, poll_table *); + int (*release)(struct posix_clock_context *); + ssize_t (*read)(struct posix_clock_context *, uint, + char __attribute__((btf_type_tag("user"))) *, size_t); +}; + +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +struct posix_clock_context { + struct posix_clock *clk; + void *private_clkdata; +}; + +struct posix_clock_desc { + struct file *fp; + struct posix_clock *clk; +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +struct old_itimerval32 { + struct old_timeval32 it_interval; + struct old_timeval32 it_value; +}; + +struct ce_unbind { + struct clock_event_device *ce; + int res; +}; + +struct proc_timens_offset { + int clockid; + struct timespec64 val; +}; + +struct futex_hash_bucket { + atomic_t waiters; + spinlock_t lock; + struct plist_head chain; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +enum futex_access { + FUTEX_READ = 0, + FUTEX_WRITE = 1, +}; + +union futex_key { + struct { + u64 i_seq; + unsigned long pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + unsigned long address; + unsigned int offset; + } private; + struct { + u64 ptr; + unsigned long word; + unsigned int offset; + } both; +}; + +struct futex_pi_state { + struct list_head list; + struct rt_mutex_base pi_mutex; + struct task_struct *owner; + refcount_t refcount; + union futex_key key; +}; + +struct futex_q; + +typedef void futex_wake_fn(struct wake_q_head *, struct futex_q *); + +struct futex_q { + struct plist_node list; + struct task_struct *task; + spinlock_t *lock_ptr; + futex_wake_fn *wake; + void *wake_data; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; + atomic_t requeue_state; +}; + +struct futex_waitv { + __u64 val; + __u64 uaddr; + __u32 flags; + __u32 __reserved; +}; + +struct futex_vector { + struct futex_waitv w; + struct futex_q q; +}; + +enum { + Q_REQUEUE_PI_NONE = 0, + Q_REQUEUE_PI_IGNORE = 1, + Q_REQUEUE_PI_IN_PROGRESS = 2, + Q_REQUEUE_PI_WAIT = 3, + Q_REQUEUE_PI_DONE = 4, + Q_REQUEUE_PI_LOCKED = 5, +}; + +struct dma_chan { + int lock; + const char *device_id; +}; + +typedef void (*btf_trace_csd_queue_cpu)(void *, const unsigned int, unsigned long, + smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_entry)(void *, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_exit)(void *, smp_call_func_t, call_single_data_t *); + +struct call_function_data { + call_single_data_t __attribute__((btf_type_tag("percpu"))) * csd; + cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; +}; + +struct trace_event_raw_csd_queue_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_raw_csd_function { + struct trace_entry ent; + void *func; + void *csd; + char __data[0]; +}; + +struct smp_call_on_cpu_struct { + struct work_struct work; + struct completion done; + int (*func)(void *); + void *data; + int ret; + int cpu; +}; + +struct trace_event_data_offsets_csd_queue_cpu {}; + +struct trace_event_data_offsets_csd_function {}; + +typedef unsigned short __kernel_old_uid_t; + +typedef __kernel_old_uid_t old_uid_t; + +typedef unsigned short __kernel_old_gid_t; + +typedef __kernel_old_gid_t old_gid_t; + +union bpf_iter_link_info; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, + struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); + +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, + struct bpf_link_info *); + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + BPF_FUNC_task_storage_get = 156, + BPF_FUNC_task_storage_delete = 157, + BPF_FUNC_get_current_task_btf = 158, + BPF_FUNC_bprm_opts_set = 159, + BPF_FUNC_ktime_get_coarse_ns = 160, + BPF_FUNC_ima_inode_hash = 161, + BPF_FUNC_sock_from_file = 162, + BPF_FUNC_check_mtu = 163, + BPF_FUNC_for_each_map_elem = 164, + BPF_FUNC_snprintf = 165, + BPF_FUNC_sys_bpf = 166, + BPF_FUNC_btf_find_by_name_kind = 167, + BPF_FUNC_sys_close = 168, + BPF_FUNC_timer_init = 169, + BPF_FUNC_timer_set_callback = 170, + BPF_FUNC_timer_start = 171, + BPF_FUNC_timer_cancel = 172, + BPF_FUNC_get_func_ip = 173, + BPF_FUNC_get_attach_cookie = 174, + BPF_FUNC_task_pt_regs = 175, + BPF_FUNC_get_branch_snapshot = 176, + BPF_FUNC_trace_vprintk = 177, + BPF_FUNC_skc_to_unix_sock = 178, + BPF_FUNC_kallsyms_lookup_name = 179, + BPF_FUNC_find_vma = 180, + BPF_FUNC_loop = 181, + BPF_FUNC_strncmp = 182, + BPF_FUNC_get_func_arg = 183, + BPF_FUNC_get_func_ret = 184, + BPF_FUNC_get_func_arg_cnt = 185, + BPF_FUNC_get_retval = 186, + BPF_FUNC_set_retval = 187, + BPF_FUNC_xdp_get_buff_len = 188, + BPF_FUNC_xdp_load_bytes = 189, + BPF_FUNC_xdp_store_bytes = 190, + BPF_FUNC_copy_from_user_task = 191, + BPF_FUNC_skb_set_tstamp = 192, + BPF_FUNC_ima_file_hash = 193, + BPF_FUNC_kptr_xchg = 194, + BPF_FUNC_map_lookup_percpu_elem = 195, + BPF_FUNC_skc_to_mptcp_sock = 196, + BPF_FUNC_dynptr_from_mem = 197, + BPF_FUNC_ringbuf_reserve_dynptr = 198, + BPF_FUNC_ringbuf_submit_dynptr = 199, + BPF_FUNC_ringbuf_discard_dynptr = 200, + BPF_FUNC_dynptr_read = 201, + BPF_FUNC_dynptr_write = 202, + BPF_FUNC_dynptr_data = 203, + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204, + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205, + BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206, + BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207, + BPF_FUNC_ktime_get_tai_ns = 208, + BPF_FUNC_user_ringbuf_drain = 209, + BPF_FUNC_cgrp_storage_get = 210, + BPF_FUNC_cgrp_storage_delete = 211, + __BPF_FUNC_MAX_ID = 212, +}; + +struct bpf_func_proto; + +typedef const struct bpf_func_proto *(*bpf_iter_get_func_proto_t)(enum bpf_func_id, + const struct bpf_prog *); + +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + bpf_iter_show_fdinfo_t show_fdinfo; + bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; + u32 ctx_arg_info_size; + u32 feature; + struct bpf_ctx_arg_aux ctx_arg_info[2]; + const struct bpf_iter_seq_info *seq_info; +}; + +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; + struct { + enum bpf_cgroup_iter_order order; + __u32 cgroup_fd; + __u64 cgroup_id; + } cgroup; + struct { + __u32 tid; + __u32 pid; + __u32 pid_fd; + } task; +}; + +enum bpf_return_type { + RET_INTEGER = 0, + RET_VOID = 1, + RET_PTR_TO_MAP_VALUE = 2, + RET_PTR_TO_SOCKET = 3, + RET_PTR_TO_TCP_SOCK = 4, + RET_PTR_TO_SOCK_COMMON = 5, + RET_PTR_TO_MEM = 6, + RET_PTR_TO_MEM_OR_BTF_ID = 7, + RET_PTR_TO_BTF_ID = 8, + __BPF_RET_TYPE_MAX = 9, + RET_PTR_TO_MAP_VALUE_OR_NULL = 258, + RET_PTR_TO_SOCKET_OR_NULL = 259, + RET_PTR_TO_TCP_SOCK_OR_NULL = 260, + RET_PTR_TO_SOCK_COMMON_OR_NULL = 261, + RET_PTR_TO_RINGBUF_MEM_OR_NULL = 1286, + RET_PTR_TO_DYNPTR_MEM_OR_NULL = 262, + RET_PTR_TO_BTF_ID_OR_NULL = 264, + RET_PTR_TO_BTF_ID_TRUSTED = 1048584, + __BPF_RET_TYPE_LIMIT = 33554431, +}; + +enum bpf_arg_type { + ARG_DONTCARE = 0, + ARG_CONST_MAP_PTR = 1, + ARG_PTR_TO_MAP_KEY = 2, + ARG_PTR_TO_MAP_VALUE = 3, + ARG_PTR_TO_MEM = 4, + ARG_CONST_SIZE = 5, + ARG_CONST_SIZE_OR_ZERO = 6, + ARG_PTR_TO_CTX = 7, + ARG_ANYTHING = 8, + ARG_PTR_TO_SPIN_LOCK = 9, + ARG_PTR_TO_SOCK_COMMON = 10, + ARG_PTR_TO_INT = 11, + ARG_PTR_TO_LONG = 12, + ARG_PTR_TO_SOCKET = 13, + ARG_PTR_TO_BTF_ID = 14, + ARG_PTR_TO_RINGBUF_MEM = 15, + ARG_CONST_ALLOC_SIZE_OR_ZERO = 16, + ARG_PTR_TO_BTF_ID_SOCK_COMMON = 17, + ARG_PTR_TO_PERCPU_BTF_ID = 18, + ARG_PTR_TO_FUNC = 19, + ARG_PTR_TO_STACK = 20, + ARG_PTR_TO_CONST_STR = 21, + ARG_PTR_TO_TIMER = 22, + ARG_PTR_TO_KPTR = 23, + ARG_PTR_TO_DYNPTR = 24, + __BPF_ARG_TYPE_MAX = 25, + ARG_PTR_TO_MAP_VALUE_OR_NULL = 259, + ARG_PTR_TO_MEM_OR_NULL = 260, + ARG_PTR_TO_CTX_OR_NULL = 263, + ARG_PTR_TO_SOCKET_OR_NULL = 269, + ARG_PTR_TO_STACK_OR_NULL = 276, + ARG_PTR_TO_BTF_ID_OR_NULL = 270, + ARG_PTR_TO_UNINIT_MEM = 32772, + ARG_PTR_TO_FIXED_SIZE_MEM = 262148, + __BPF_ARG_TYPE_LIMIT = 33554431, +}; + +struct bpf_func_proto { + u64 (*func)(u64, u64, u64, u64, u64); + bool gpl_only; + bool pkt_access; + bool might_sleep; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + union { + struct { + u32 *arg1_btf_id; + u32 *arg2_btf_id; + u32 *arg3_btf_id; + u32 *arg4_btf_id; + u32 *arg5_btf_id; + }; + u32 *arg_btf_id[5]; + struct { + size_t arg1_size; + size_t arg2_size; + size_t arg3_size; + size_t arg4_size; + size_t arg5_size; + }; + size_t arg_size[5]; + }; + int *ret_btf_id; + bool (*allowed)(const struct bpf_prog *); +}; + +struct kallsym_iter { + loff_t pos; + loff_t pos_mod_end; + loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; + unsigned long value; + unsigned int nameoff; + char type; + char name[512]; + char module_name[56]; + int exported; + int show_value; +}; + +struct bpf_iter_meta; + +struct bpf_iter__ksym { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kallsym_iter *ksym; + }; +}; + +struct bpf_iter_meta { + union { + struct seq_file *seq; + }; + u64 session_id; + u64 seq_num; +}; + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +struct bsd_acct_struct { + struct fs_pin pin; + atomic_long_t count; + struct callback_head rcu; + struct mutex lock; + int active; + unsigned long needcheck; + struct file *file; + struct pid_namespace *ns; + struct work_struct work; + struct completion done; +}; + +typedef __u16 comp_t; + +struct acct_v3 { + char ac_flag; + char ac_version; + __u16 ac_tty; + __u32 ac_exitcode; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u32 ac_etime; + comp_t ac_utime; + comp_t ac_stime; + comp_t ac_mem; + comp_t ac_io; + comp_t ac_rw; + comp_t ac_minflt; + comp_t ac_majflt; + comp_t ac_swaps; + char ac_comm[16]; +}; + +typedef struct acct_v3 acct_t; + +typedef u32 note_buf_t[92]; + +struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +}; + +typedef struct elf64_phdr Elf64_Phdr; + +struct elf64_note { + Elf64_Word n_namesz; + Elf64_Word n_descsz; + Elf64_Word n_type; +}; + +struct kexec_load_limit { + struct mutex mutex; + int limit; +}; + +typedef unsigned long elf_greg_t; + +typedef elf_greg_t elf_gregset_t[27]; + +struct elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct elf_prstatus_common { + struct elf_siginfo pr_info; + short pr_cursig; + unsigned long pr_sigpend; + unsigned long pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; + struct __kernel_old_timeval pr_stime; + struct __kernel_old_timeval pr_cutime; + struct __kernel_old_timeval pr_cstime; +}; + +struct elf_prstatus { + struct elf_prstatus_common common; + elf_gregset_t pr_reg; + int pr_fpvalid; +}; + +struct compat_kexec_segment { + compat_uptr_t buf; + compat_size_t bufsz; + compat_ulong_t mem; + compat_size_t memsz; +}; + +struct crypto_tfm; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); + void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); + void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, + unsigned int *); + int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, + unsigned int *); +}; + +struct crypto_type; + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + int cra_priority; + refcount_t cra_refcnt; + char cra_name[128]; + char cra_driver_name[128]; + const struct crypto_type *cra_type; + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + int (*cra_init)(struct crypto_tfm *); + void (*cra_exit)(struct crypto_tfm *); + void (*cra_destroy)(struct crypto_alg *); + struct module *cra_module; +}; + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; +}; + +struct shash_desc; + +struct crypto_shash; + +struct shash_alg { + int (*init)(struct shash_desc *); + int (*update)(struct shash_desc *, const u8 *, unsigned int); + int (*final)(struct shash_desc *, u8 *); + int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*export)(struct shash_desc *, void *); + int (*import)(struct shash_desc *, const void *); + int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_shash *); + void (*exit_tfm)(struct crypto_shash *); + int (*clone_tfm)(struct crypto_shash *, struct crypto_shash *); + unsigned int descsize; + union { + struct { + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; + }; + struct hash_alg_common halg; + }; +}; + +struct shash_desc { + struct crypto_shash *tfm; + void *__ctx[0]; +}; + +struct crypto_tfm { + refcount_t refcnt; + u32 crt_flags; + int node; + void (*exit)(struct crypto_tfm *); + struct crypto_alg *__crt_alg; + void *__crt_ctx[0]; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; + +struct crypto_instance; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); + unsigned int (*extsize)(struct crypto_alg *); + int (*init_tfm)(struct crypto_tfm *); + void (*show)(struct seq_file *, struct crypto_alg *); + int (*report)(struct sk_buff *, struct crypto_alg *); + void (*free)(struct crypto_instance *); + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct kexec_sha_region { + unsigned long start; + unsigned long len; +}; + +struct crypto_template; + +struct crypto_spawn; + +struct crypto_instance { + struct crypto_alg alg; + struct crypto_template *tmpl; + union { + struct hlist_node list; + struct crypto_spawn *spawns; + }; + struct work_struct free_work; + void *__ctx[0]; +}; + +struct rtattr; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + int (*create)(struct crypto_template *, struct rtattr **); + char name[128]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + struct crypto_instance *inst; + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct cgroup_taskset { + struct list_head src_csets; + struct list_head dst_csets; + int nr_tasks; + int ssid; + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; +}; + +struct bpf_storage_buffer; + +struct bpf_cgroup_storage_map; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void __attribute__((btf_type_tag("percpu"))) * percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list_map; + struct list_head list_cg; + struct rb_node node; + struct callback_head rcu; +}; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[0]; +}; + +typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, + struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, + struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + unsigned long magic; + bool new_sb_created; +}; + +struct cgroup_fs_context { + struct kernfs_fs_context kfc; + struct cgroup_root *root; + struct cgroup_namespace *ns; + unsigned int flags; + bool cpuset_clone_children; + bool none; + bool all_ss; + u16 subsys_mask; + char *name; + char *release_agent; +}; + +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *, struct kernfs_root *); + int (*mkdir)(struct kernfs_node *, const char *, umode_t); + int (*rmdir)(struct kernfs_node *); + int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); + int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); +}; + +enum { + CGRP_ROOT_NOPREFIX = 2, + CGRP_ROOT_XATTR = 4, + CGRP_ROOT_NS_DELEGATE = 8, + CGRP_ROOT_FAVOR_DYNMODS = 16, + CGRP_ROOT_CPUSET_V2_MODE = 65536, + CGRP_ROOT_MEMORY_LOCAL_EVENTS = 131072, + CGRP_ROOT_MEMORY_RECURSIVE_PROT = 262144, + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = 524288, +}; + +enum kernfs_node_type { + KERNFS_DIR = 1, + KERNFS_FILE = 2, + KERNFS_LINK = 4, +}; + +enum { + CGRP_NOTIFY_ON_RELEASE = 0, + CGRP_CPUSET_CLONE_CHILDREN = 1, + CGRP_FREEZE = 2, + CGRP_FROZEN = 3, + CGRP_KILL = 4, +}; + +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 1, + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, + KERNFS_ROOT_SUPPORT_EXPORTOP = 4, + KERNFS_ROOT_SUPPORT_USER_XATTR = 8, +}; + +enum { + CFTYPE_ONLY_ON_ROOT = 1, + CFTYPE_NOT_ON_ROOT = 2, + CFTYPE_NS_DELEGATABLE = 4, + CFTYPE_NO_PREFIX = 8, + CFTYPE_WORLD_WRITABLE = 16, + CFTYPE_DEBUG = 32, + __CFTYPE_ONLY_ON_DFL = 65536, + __CFTYPE_NOT_ON_DFL = 131072, + __CFTYPE_ADDED = 262144, +}; + +enum { + CSS_TASK_ITER_PROCS = 1, + CSS_TASK_ITER_THREADED = 2, + CSS_TASK_ITER_SKIPPED = 65536, +}; + +enum cgroup2_param { + Opt_nsdelegate = 0, + Opt_favordynmods = 1, + Opt_memory_localevents = 2, + Opt_memory_recursiveprot = 3, + Opt_memory_hugetlb_accounting = 4, + nr__cgroup2_params = 5, +}; + +enum cgroup_opt_features { + OPT_FEATURE_COUNT = 0, +}; + +struct cgrp_cset_link { + struct cgroup *cgrp; + struct css_set *cset; + struct list_head cset_link; + struct list_head cgrp_link; +}; + +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + struct list_head *cset_pos; + struct list_head *cset_head; + struct list_head *tcset_pos; + struct list_head *tcset_head; + struct list_head *task_pos; + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +struct trace_event_raw_cgroup_root { + struct trace_entry ent; + int root; + u16 ss_mask; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_cgroup { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + char __data[0]; +}; + +struct trace_event_raw_cgroup_migrate { + struct trace_entry ent; + int dst_root; + int dst_level; + u64 dst_id; + int pid; + u32 __data_loc_dst_path; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_cgroup_event { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + int val; + char __data[0]; +}; + +struct trace_event_data_offsets_cgroup_root { + u32 name; +}; + +struct trace_event_data_offsets_cgroup { + u32 path; +}; + +struct trace_event_data_offsets_cgroup_event { + u32 path; +}; + +struct cgroup_mgctx { + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + struct cgroup_taskset tset; + u16 ss_mask; +}; + +struct cgroup_pidlist; + +struct cgroup_file_ctx { + struct cgroup_namespace *ns; + struct { + void *trigger; + } psi; + struct { + bool started; + struct css_task_iter iter; + } procs; + struct { + struct cgroup_pidlist *pidlist; + } procs1; +}; + +struct trace_event_data_offsets_cgroup_migrate { + u32 dst_path; + u32 comm; +}; + +struct kernfs_root { + struct kernfs_node *kn; + unsigned int flags; + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + struct list_head supers; + wait_queue_head_t deactivate_waitq; + struct rw_semaphore kernfs_rwsem; + struct rw_semaphore kernfs_iattr_rwsem; + struct rw_semaphore kernfs_supers_rwsem; +}; + +struct simple_xattrs { + struct rb_root rb_root; + rwlock_t lock; +}; + +struct kernfs_iattrs { + kuid_t ia_uid; + kgid_t ia_gid; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u64 ino; + u32 gen; + } __attribute__((packed)) i64; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + struct { + struct { + } __empty_raw; + __u32 raw[0]; + }; + }; +}; + +typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *, u32); + +struct btf_id_set8; + +struct btf_kfunc_id_set { + struct module *owner; + struct btf_id_set8 *set; + btf_kfunc_filter_t filter; +}; + +struct btf_id_set8 { + u32 cnt; + u32 flags; + struct { + u32 id; + u32 flags; + } pairs[0]; +}; + +enum cgroup_filetype { + CGROUP_FILE_PROCS = 0, + CGROUP_FILE_TASKS = 1, +}; + +enum cgroup1_param { + Opt_all = 0, + Opt_clone_children = 1, + Opt_cpuset_v2_mode = 2, + Opt_name = 3, + Opt_none = 4, + Opt_noprefix = 5, + Opt_release_agent = 6, + Opt_xattr = 7, + Opt_favordynmods___2 = 8, + Opt_nofavordynmods = 9, +}; + +struct cgroup_pidlist { + struct { + enum cgroup_filetype type; + struct pid_namespace *ns; + } key; + pid_t *list; + int length; + struct list_head links; + struct cgroup *owner; + struct delayed_work destroy_dwork; +}; + +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + __u64 nr_io_wait; +}; + +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = 1, + CGROUP_FREEZING_SELF = 2, + CGROUP_FREEZING_PARENT = 4, + CGROUP_FROZEN = 8, + CGROUP_FREEZING = 6, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; +}; + +struct pids_cgroup { + struct cgroup_subsys_state css; + atomic64_t counter; + atomic64_t limit; + int64_t watermark; + struct cgroup_file events_file; + atomic64_t events_limit; +}; + +struct fmeter { + int cnt; + int val; + time64_t time; + spinlock_t lock; +}; + +enum prs_errcode { + PERR_NONE = 0, + PERR_INVCPUS = 1, + PERR_INVPARENT = 2, + PERR_NOTPART = 3, + PERR_NOTEXCL = 4, + PERR_NOCPUS = 5, + PERR_HOTPLUG = 6, + PERR_CPUSEMPTY = 7, + PERR_HKEEPING = 8, +}; + +struct cpuset { + struct cgroup_subsys_state css; + unsigned long flags; + cpumask_var_t cpus_allowed; + nodemask_t mems_allowed; + cpumask_var_t effective_cpus; + nodemask_t effective_mems; + cpumask_var_t effective_xcpus; + cpumask_var_t exclusive_cpus; + nodemask_t old_mems_allowed; + struct fmeter fmeter; + int attach_in_progress; + int pn; + int relax_domain_level; + int nr_subparts; + int partition_root_state; + int use_parent_ecpus; + int child_ecpus_count; + int nr_deadline_tasks; + int nr_migrate_dl_tasks; + u64 sum_migrate_dl_bw; + enum prs_errcode prs_err; + struct cgroup_file partition_file; + struct list_head remote_sibling; +}; + +enum partition_cmd { + partcmd_enable = 0, + partcmd_disable = 1, + partcmd_update = 2, + partcmd_invalidate = 3, +}; + +enum { + ZONELIST_FALLBACK = 0, + ZONELIST_NOFALLBACK = 1, + MAX_ZONELISTS = 2, +}; + +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +struct tmpmasks { + cpumask_var_t addmask; + cpumask_var_t delmask; + cpumask_var_t new_cpus; +}; + +typedef enum { + CS_ONLINE = 0, + CS_CPU_EXCLUSIVE = 1, + CS_MEM_EXCLUSIVE = 2, + CS_MEM_HARDWALL = 3, + CS_MEMORY_MIGRATE = 4, + CS_SCHED_LOAD_BALANCE = 5, + CS_SPREAD_PAGE = 6, + CS_SPREAD_SLAB = 7, +} cpuset_flagbits_t; + +typedef enum { + FILE_MEMORY_MIGRATE = 0, + FILE_CPULIST = 1, + FILE_MEMLIST = 2, + FILE_EFFECTIVE_CPULIST = 3, + FILE_EFFECTIVE_MEMLIST = 4, + FILE_SUBPARTS_CPULIST = 5, + FILE_EXCLUSIVE_CPULIST = 6, + FILE_EFFECTIVE_XCPULIST = 7, + FILE_CPU_EXCLUSIVE = 8, + FILE_MEM_EXCLUSIVE = 9, + FILE_MEM_HARDWALL = 10, + FILE_SCHED_LOAD_BALANCE = 11, + FILE_PARTITION_ROOT = 12, + FILE_SCHED_RELAX_DOMAIN_LEVEL = 13, + FILE_MEMORY_PRESSURE_ENABLED = 14, + FILE_MEMORY_PRESSURE = 15, + FILE_SPREAD_PAGE = 16, + FILE_SPREAD_SLAB = 17, +} cpuset_filetype_t; + +struct idmap_key { + bool map_up; + u32 id; + u32 count; +}; + +struct cpu_stopper { + struct task_struct *thread; + raw_spinlock_t lock; + bool enabled; + struct list_head works; + struct cpu_stop_work stop_work; + unsigned long caller; + cpu_stop_fn_t fn; +}; + +struct cpu_stop_done { + atomic_t nr_todo; + int ret; + struct completion completion; +}; + +enum multi_stop_state { + MULTI_STOP_NONE = 0, + MULTI_STOP_PREPARE = 1, + MULTI_STOP_DISABLE_IRQ = 2, + MULTI_STOP_RUN = 3, + MULTI_STOP_EXIT = 4, +}; + +struct multi_stop_data { + cpu_stop_fn_t fn; + void *data; + unsigned int num_threads; + const struct cpumask *active_cpus; + enum multi_stop_state state; + atomic_t thread_ack; +}; + +struct auditd_connection { + struct pid *pid; + u32 portid; + struct net *net; + struct callback_head rcu; +}; + +typedef int __kernel_mqd_t; + +typedef __kernel_mqd_t mqd_t; + +struct mq_attr { + __kernel_long_t mq_flags; + __kernel_long_t mq_maxmsg; + __kernel_long_t mq_msgsize; + __kernel_long_t mq_curmsgs; + __kernel_long_t __reserved[4]; +}; + +struct audit_cap_data { + kernel_cap_t permitted; + kernel_cap_t inheritable; + union { + unsigned int fE; + kernel_cap_t effective; + }; + kernel_cap_t ambient; + kuid_t rootid; +}; + +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +enum audit_state { + AUDIT_STATE_DISABLED = 0, + AUDIT_STATE_BUILD = 1, + AUDIT_STATE_RECORD = 2, +}; + +struct audit_names { + struct list_head list; + struct filename *name; + int name_len; + bool hidden; + unsigned long ino; + dev_t dev; + umode_t mode; + kuid_t uid; + kgid_t gid; + dev_t rdev; + u32 osid; + struct audit_cap_data fcap; + unsigned int fcap_ver; + unsigned char type; + bool should_free; +}; + +struct audit_proctitle { + int len; + char *value; +}; + +struct audit_aux_data; + +struct __kernel_sockaddr_storage; + +struct audit_tree_refs; + +struct audit_context { + int dummy; + enum { + AUDIT_CTX_UNUSED = 0, + AUDIT_CTX_SYSCALL = 1, + AUDIT_CTX_URING = 2, + } context; + enum audit_state state; + enum audit_state current_state; + unsigned int serial; + int major; + int uring_op; + struct timespec64 ctime; + unsigned long argv[4]; + long return_code; + u64 prio; + int return_valid; + struct audit_names preallocated_names[5]; + int name_count; + struct list_head names_list; + char *filterkey; + struct path pwd; + struct audit_aux_data *aux; + struct audit_aux_data *aux_pids; + struct __kernel_sockaddr_storage *sockaddr; + size_t sockaddr_len; + pid_t ppid; + kuid_t uid; + kuid_t euid; + kuid_t suid; + kuid_t fsuid; + kgid_t gid; + kgid_t egid; + kgid_t sgid; + kgid_t fsgid; + unsigned long personality; + int arch; + pid_t target_pid; + kuid_t target_auid; + kuid_t target_uid; + unsigned int target_sessionid; + u32 target_sid; + char target_comm[16]; + struct audit_tree_refs *trees; + struct audit_tree_refs *first_trees; + struct list_head killed_trees; + int tree_count; + int type; + union { + struct { + int nargs; + long args[6]; + } socketcall; + struct { + kuid_t uid; + kgid_t gid; + umode_t mode; + u32 osid; + int has_perm; + uid_t perm_uid; + gid_t perm_gid; + umode_t perm_mode; + unsigned long qbytes; + } ipc; + struct { + mqd_t mqdes; + struct mq_attr mqstat; + } mq_getsetattr; + struct { + mqd_t mqdes; + int sigev_signo; + } mq_notify; + struct { + mqd_t mqdes; + size_t msg_len; + unsigned int msg_prio; + struct timespec64 abs_timeout; + } mq_sendrecv; + struct { + int oflag; + umode_t mode; + struct mq_attr attr; + } mq_open; + struct { + pid_t pid; + struct audit_cap_data cap; + } capset; + struct { + int fd; + int flags; + } mmap; + struct open_how openat2; + struct { + int argc; + } execve; + struct { + char *name; + } module; + struct { + struct audit_ntp_data ntp_data; + struct timespec64 tk_injoffset; + } time; + }; + int fds[2]; + struct audit_proctitle proctitle; +}; + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + char __data[126]; + }; + void *__align; + }; +}; + +struct audit_ctl_mutex { + struct mutex lock; + void *owner; +}; + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *); + void (*pre_exit)(struct net *); + void (*exit)(struct net *); + void (*exit_batch)(struct list_head *); + unsigned int *id; + size_t size; +}; + +struct audit_features { + __u32 vers; + __u32 mask; + __u32 features; + __u32 lock; +}; + +enum audit_nlgrps { + AUDIT_NLGRP_NONE = 0, + AUDIT_NLGRP_READLOG = 1, + __AUDIT_NLGRP_MAX = 2, +}; + +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +struct audit_reply { + __u32 portid; + struct net *net; + struct sk_buff *skb; +}; + +struct audit_net { + struct sock *sk; +}; + +struct audit_buffer { + struct sk_buff *skb; + struct audit_context *ctx; + gfp_t gfp_mask; +}; + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *); + struct mutex *cb_mutex; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, unsigned long *); +}; + +struct audit_sig_info { + uid_t uid; + pid_t pid; + char ctx[0]; +}; + +struct audit_tty_status { + __u32 enabled; + __u32 log_passwd; +}; + +struct audit_status { + __u32 mask; + __u32 enabled; + __u32 failure; + __u32 pid; + __u32 rate_limit; + __u32 backlog_limit; + __u32 lost; + __u32 backlog; + union { + __u32 version; + __u32 feature_bitmap; + }; + __u32 backlog_wait_time; + __u32 backlog_wait_time_actual; +}; + +struct audit_netlink_list { + __u32 portid; + struct net *net; + struct sk_buff_head q; +}; + +struct rhash_lock_head {}; + +enum { + Audit_equal = 0, + Audit_not_equal = 1, + Audit_bitmask = 2, + Audit_bittest = 3, + Audit_lt = 4, + Audit_gt = 5, + Audit_le = 6, + Audit_ge = 7, + Audit_bad = 8, +}; + +struct audit_field; + +struct audit_watch; + +struct audit_tree; + +struct audit_fsnotify_mark; + +struct audit_krule { + u32 pflags; + u32 flags; + u32 listnr; + u32 action; + u32 mask[64]; + u32 buflen; + u32 field_count; + char *filterkey; + struct audit_field *fields; + struct audit_field *arch_f; + struct audit_field *inode_f; + struct audit_watch *watch; + struct audit_tree *tree; + struct audit_fsnotify_mark *exe; + struct list_head rlist; + struct list_head list; + u64 prio; +}; + +struct audit_entry { + struct list_head list; + struct callback_head rcu; + struct audit_krule rule; +}; + +struct audit_field { + u32 type; + union { + u32 val; + kuid_t uid; + kgid_t gid; + struct { + char *lsm_str; + void *lsm_rule; + }; + }; + u32 op; +}; + +struct audit_rule_data { + __u32 flags; + __u32 action; + __u32 field_count; + __u32 mask[64]; + __u32 fields[64]; + __u32 values[64]; + __u32 fieldflags[64]; + __u32 buflen; + char buf[0]; +}; + +struct fsnotify_group; + +struct fsnotify_mark { + __u32 mask; + refcount_t refcnt; + struct fsnotify_group *group; + struct list_head g_list; + spinlock_t lock; + struct hlist_node obj_list; + struct fsnotify_mark_connector *connector; + __u32 ignore_mask; + unsigned int flags; +}; + +struct audit_fsnotify_mark { + dev_t dev; + unsigned long ino; + char *path; + struct fsnotify_mark mark; + struct audit_krule *rule; +}; + +struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; +}; + +struct fanotify_group_private_data { + struct hlist_head *merge_hash; + struct list_head access_list; + wait_queue_head_t access_waitq; + int flags; + int f_flags; + struct ucounts *ucounts; + mempool_t error_events_pool; +}; + +struct fsnotify_ops; + +struct fsnotify_event; + +struct fsnotify_group { + const struct fsnotify_ops *ops; + refcount_t refcnt; + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + unsigned int priority; + bool shutdown; + int flags; + unsigned int owner_flags; + struct mutex mark_mutex; + atomic_t user_waits; + struct list_head marks_list; + struct fasync_struct *fsn_fa; + struct fsnotify_event *overflow_event; + struct mem_cgroup *memcg; + union { + void *private; + struct inotify_group_private_data inotify_data; + struct fanotify_group_private_data fanotify_data; + }; +}; + +struct fsnotify_iter_info; + +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, + const struct qstr *, u32, struct fsnotify_iter_info *); + int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, + struct inode *, const struct qstr *, u32); + void (*free_group_priv)(struct fsnotify_group *); + void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); + void (*free_event)(struct fsnotify_group *, struct fsnotify_event *); + void (*free_mark)(struct fsnotify_mark *); +}; + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[5]; + struct fsnotify_group *current_group; + unsigned int report_mask; + int srcu_idx; +}; + +struct fsnotify_event { + struct list_head list; +}; + +struct audit_chunk; + +struct audit_tree { + refcount_t count; + int goner; + struct audit_chunk *root; + struct list_head chunks; + struct list_head rules; + struct list_head list; + struct list_head same_root; + struct callback_head head; + char pathname[0]; +}; + +struct audit_node { + struct list_head list; + struct audit_tree *owner; + unsigned int index; +}; + +struct audit_chunk { + struct list_head hash; + unsigned long key; + struct fsnotify_mark *mark; + struct list_head trees; + int count; + atomic_long_t refs; + struct callback_head head; + struct audit_node owners[0]; +}; + +struct audit_parent; + +struct audit_watch { + refcount_t count; + dev_t dev; + char *path; + unsigned long ino; + struct audit_parent *parent; + struct list_head wlist; + struct list_head rules; +}; + +struct audit_parent { + struct list_head watches; + struct fsnotify_mark mark; +}; + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER = 0, + AUDIT_XT_OP_REPLACE = 1, + AUDIT_XT_OP_UNREGISTER = 2, + AUDIT_NFT_OP_TABLE_REGISTER = 3, + AUDIT_NFT_OP_TABLE_UNREGISTER = 4, + AUDIT_NFT_OP_CHAIN_REGISTER = 5, + AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, + AUDIT_NFT_OP_RULE_REGISTER = 7, + AUDIT_NFT_OP_RULE_UNREGISTER = 8, + AUDIT_NFT_OP_SET_REGISTER = 9, + AUDIT_NFT_OP_SET_UNREGISTER = 10, + AUDIT_NFT_OP_SETELEM_REGISTER = 11, + AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, + AUDIT_NFT_OP_GEN_REGISTER = 13, + AUDIT_NFT_OP_OBJ_REGISTER = 14, + AUDIT_NFT_OP_OBJ_UNREGISTER = 15, + AUDIT_NFT_OP_OBJ_RESET = 16, + AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, + AUDIT_NFT_OP_SETELEM_RESET = 19, + AUDIT_NFT_OP_RULE_RESET = 20, + AUDIT_NFT_OP_INVALID = 21, +}; + +struct audit_nfcfgop_tab { + enum audit_nfcfgop op; + const char *s; +}; + +struct audit_aux_data { + struct audit_aux_data *next; + int type; +}; + +struct audit_tree_refs { + struct audit_tree_refs *next; + struct audit_chunk *c[31]; +}; + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kuid_t rootid; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + +typedef int __kernel_key_t; + +typedef __kernel_key_t key_t; + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + unsigned long seq; + void *security; + struct rhash_head khtnode; + struct callback_head rcu; + refcount_t refcount; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct audit_aux_data_bprm_fcaps { + struct audit_aux_data d; + struct audit_cap_data fcap; + unsigned int fcap_ver; + struct audit_cap_data old_pcap; + struct audit_cap_data new_pcap; +}; + +struct audit_aux_data_pids { + struct audit_aux_data d; + pid_t target_pid[16]; + kuid_t target_auid[16]; + kuid_t target_uid[16]; + unsigned int target_sessionid[16]; + u32 target_sid[16]; + char target_comm[256]; + int pid_count; +}; + +struct fanotify_response_info_header { + __u8 type; + __u8 pad; + __u16 len; +}; + +struct fanotify_response_info_audit_rule { + struct fanotify_response_info_header hdr; + __u32 rule_number; + __u32 subj_trust; + __u32 obj_trust; +}; + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_ANY = -1, + FSNOTIFY_OBJ_TYPE_INODE = 0, + FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, + FSNOTIFY_OBJ_TYPE_SB = 2, + FSNOTIFY_OBJ_TYPE_COUNT = 3, + FSNOTIFY_OBJ_TYPE_DETACHED = 3, +}; + +enum { + HASH_SIZE = 128, +}; + +struct audit_tree_mark { + struct fsnotify_mark mark; + struct audit_chunk *chunk; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, + PERF_RECORD_KSYMBOL_TYPE_MAX = 3, +}; + +struct kprobe_insn_page { + struct list_head list; + kprobe_opcode_t *insns; + struct kprobe_insn_cache *cache; + int nused; + int ngarbage; + char slot_used[0]; +}; + +struct kprobe_blacklist_entry { + struct list_head list; + unsigned long start_addr; + unsigned long end_addr; +}; + +struct kretprobe_instance; + +typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); + +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct rethook *rh; +}; + +struct kretprobe_instance { + struct rethook_node node; + char data[0]; +}; + +typedef void (*rethook_handler_t)(struct rethook_node *, void *, unsigned long, + struct pt_regs *); + +struct action_cache { + unsigned long allow_native[8]; + unsigned long allow_compat[8]; +}; + +struct notification; + +struct seccomp_filter { + refcount_t refs; + refcount_t users; + bool log; + bool wait_killable_recv; + struct action_cache cache; + struct seccomp_filter *prev; + struct bpf_prog *prog; + struct notification *notif; + struct mutex notify_lock; + wait_queue_head_t wqh; +}; + +struct notification { + atomic_t requests; + u32 flags; + u64 next_id; + struct list_head notifications; +}; + +struct seccomp_log_name { + u32 log; + const char *name; +}; + +enum notify_state { + SECCOMP_NOTIFY_INIT = 0, + SECCOMP_NOTIFY_SENT = 1, + SECCOMP_NOTIFY_REPLIED = 2, +}; + +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + __u32 ioctl_flags; + union { + bool setfd; + int ret; + }; + struct completion completion; + struct list_head list; +}; + +struct seccomp_knotif { + struct task_struct *task; + u64 id; + const struct seccomp_data *data; + enum notify_state state; + int error; + long val; + u32 flags; + struct completion ready; + struct list_head list; + struct list_head addfd; +}; + +typedef unsigned int (*bpf_dispatcher_fn)(const void *, const struct bpf_insn *, + unsigned int (*)(const void *, + const struct bpf_insn *)); + +typedef unsigned int (*bpf_func_t)(const void *, const struct bpf_insn *); + +struct sock_fprog { + unsigned short len; + struct sock_filter __attribute__((btf_type_tag("user"))) * filter; +}; + +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +struct seccomp_metadata { + __u64 filter_off; + __u64 flags; +}; + +struct rchan_buf { + void *start; + void *data; + size_t offset; + size_t subbufs_produced; + size_t subbufs_consumed; + struct rchan *chan; + wait_queue_head_t read_wait; + struct irq_work wakeup_work; + struct dentry *dentry; + struct kref kref; + struct page **page_array; + unsigned int page_count; + unsigned int finalized; + size_t *padding; + size_t prev_padding; + size_t bytes_consumed; + size_t early_bytes; + unsigned int cpu; + long : 64; + long : 64; +}; + +struct rchan_callbacks; + +struct rchan { + u32 version; + size_t subbuf_size; + size_t n_subbufs; + size_t alloc_size; + const struct rchan_callbacks *cb; + struct kref kref; + void *private_data; + size_t last_toobig; + struct rchan_buf *__attribute__((btf_type_tag("percpu"))) * buf; + int is_global; + struct list_head list; + struct dentry *parent; + int has_base_filename; + char base_filename[255]; +}; + +struct rchan_callbacks { + int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); + struct dentry *(*create_buf_file)(const char *, struct dentry *, umode_t, + struct rchan_buf *, int *); + int (*remove_buf_file)(struct dentry *); +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + unsigned long private; +}; + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +struct rchan_percpu_buf_dispatcher { + struct rchan_buf *buf; + struct dentry *dentry; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; + +struct genl_split_ops; + +struct genl_info; + +struct genl_ops; + +struct genl_small_ops; + +struct genl_multicast_group; + +struct genl_family { + unsigned int hdrsize; + char name[16]; + unsigned int version; + unsigned int maxattr; + u8 netnsok : 1; + u8 parallel_ops : 1; + u8 n_ops; + u8 n_small_ops; + u8 n_split_ops; + u8 n_mcgrps; + u8 resv_start_op; + const struct nla_policy *policy; + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + const struct genl_ops *ops; + const struct genl_small_ops *small_ops; + const struct genl_split_ops *split_ops; + const struct genl_multicast_group *mcgrps; + struct module *module; + int id; + unsigned int mcgrp_offset; +}; + +struct genl_split_ops { + union { + struct { + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, + struct genl_info *); + int (*doit)(struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, + struct genl_info *); + }; + struct { + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + }; + }; + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genlmsghdr; + +struct genl_info { + u32 snd_seq; + u32 snd_portid; + const struct genl_family *family; + const struct nlmsghdr *nlhdr; + struct genlmsghdr *genlhdr; + struct nlattr **attrs; + possible_net_t _net; + void *user_ptr[2]; + struct netlink_ext_ack *extack; +}; + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +struct genl_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_small_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_multicast_group { + char name[16]; + u8 flags; + u8 cap_sys_admin : 1; +}; + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID = 1, + TASKSTATS_TYPE_TGID = 2, + TASKSTATS_TYPE_STATS = 3, + TASKSTATS_TYPE_AGGR_PID = 4, + TASKSTATS_TYPE_AGGR_TGID = 5, + TASKSTATS_TYPE_NULL = 6, + __TASKSTATS_TYPE_MAX = 7, +}; + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID = 1, + TASKSTATS_CMD_ATTR_TGID = 2, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, + __TASKSTATS_CMD_ATTR_MAX = 5, +}; + +enum actions { + REGISTER = 0, + DEREGISTER = 1, + CPU_DONT_CARE = 2, +}; + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD = 1, + __CGROUPSTATS_CMD_ATTR_MAX = 2, +}; + +enum { + CGROUPSTATS_CMD_UNSPEC = 3, + CGROUPSTATS_CMD_GET = 4, + CGROUPSTATS_CMD_NEW = 5, + __CGROUPSTATS_CMD_MAX = 6, +}; + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS = 1, + __CGROUPSTATS_TYPE_MAX = 2, +}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct tp_transition_snapshot { + unsigned long rcu; + unsigned long srcu; + bool ongoing; +}; + +enum tp_func_state { + TP_FUNC_0 = 0, + TP_FUNC_1 = 1, + TP_FUNC_2 = 2, + TP_FUNC_N = 3, +}; + +enum tp_transition_sync { + TP_TRANSITION_SYNC_1_0_1 = 0, + TP_TRANSITION_SYNC_N_2_1 = 1, + _NR_TP_TRANSITION_SYNC = 2, +}; + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +struct tp_probes { + struct callback_head rcu; + struct tracepoint_func probes[0]; +}; + +struct ftrace_page; + +struct ftrace_rec_iter { + struct ftrace_page *pg; + int index; +}; + +struct ftrace_page { + struct ftrace_page *next; + struct dyn_ftrace *records; + int index; + int order; +}; + +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN = 0, + FTRACE_BUG_INIT = 1, + FTRACE_BUG_NOP = 2, + FTRACE_BUG_CALL = 3, + FTRACE_BUG_UPDATE = 4, +}; + +struct trace_array_cpu; + +struct array_buffer { + struct trace_array *tr; + struct trace_buffer *buffer; + struct trace_array_cpu __attribute__((btf_type_tag("percpu"))) * data; + u64 time_start; + int cpu; +}; + +struct trace_pid_list; + +struct trace_options; + +struct cond_snapshot; + +struct trace_func_repeats; + +struct trace_array { + struct list_head list; + char *name; + struct array_buffer array_buffer; + struct array_buffer max_buffer; + bool allocated_snapshot; + unsigned long max_latency; + struct dentry *d_max_latency; + struct work_struct fsnotify_work; + struct irq_work fsnotify_irqwork; + struct trace_pid_list __attribute__((btf_type_tag("rcu"))) * filtered_pids; + struct trace_pid_list __attribute__((btf_type_tag("rcu"))) * filtered_no_pids; + arch_spinlock_t max_lock; + int buffer_disabled; + int sys_refcount_enter; + int sys_refcount_exit; + struct trace_event_file __attribute__((btf_type_tag("rcu"))) * enter_syscall_files[460]; + struct trace_event_file __attribute__((btf_type_tag("rcu"))) * exit_syscall_files[460]; + int stop_count; + int clock_id; + int nr_topts; + bool clear_trace; + int buffer_percent; + unsigned int n_err_log_entries; + struct tracer *current_trace; + unsigned int trace_flags; + unsigned char trace_flags_index[32]; + unsigned int flags; + raw_spinlock_t start_lock; + struct list_head err_log; + struct dentry *dir; + struct dentry *options; + struct dentry *percpu_dir; + struct eventfs_inode *event_dir; + struct trace_options *topts; + struct list_head systems; + struct list_head events; + struct trace_event_file *trace_marker_file; + cpumask_var_t tracing_cpumask; + cpumask_var_t pipe_cpumask; + int ref; + int trace_ref; + struct ftrace_ops *ops; + struct trace_pid_list __attribute__((btf_type_tag("rcu"))) * function_pids; + struct trace_pid_list __attribute__((btf_type_tag("rcu"))) * function_no_pids; + struct list_head func_probes; + struct list_head mod_trace; + struct list_head mod_notrace; + int function_enabled; + int no_filter_buffering_ref; + struct list_head hist_vars; + struct cond_snapshot *cond_snapshot; + struct trace_func_repeats __attribute__((btf_type_tag("percpu"))) * last_func_repeats; + bool ring_buffer_expanded; +}; + +struct trace_array_cpu { + atomic_t disabled; + void *buffer_page; + unsigned long entries; + unsigned long saved_latency; + unsigned long critical_start; + unsigned long critical_end; + unsigned long critical_sequence; + unsigned long nice; + unsigned long policy; + unsigned long rt_priority; + unsigned long skipped_entries; + u64 preempt_timestamp; + pid_t pid; + kuid_t uid; + char comm[16]; + int ftrace_ignore_pid; + bool ignore_pid; +}; + +union upper_chunk; + +union lower_chunk; + +struct trace_pid_list { + raw_spinlock_t lock; + struct irq_work refill_irqwork; + union upper_chunk *upper[256]; + union upper_chunk *upper_list; + union lower_chunk *lower_list; + int free_upper_chunks; + int free_lower_chunks; +}; + +union upper_chunk { + union upper_chunk *next; + union lower_chunk *data[256]; +}; + +union lower_chunk { + union lower_chunk *next; + unsigned long data[256]; +}; + +struct filter_pred; + +struct prog_entry { + int target; + int when_to_branch; + struct filter_pred *pred; +}; + +struct event_subsystem; + +struct trace_subsystem_dir { + struct list_head list; + struct event_subsystem *subsystem; + struct trace_array *tr; + struct eventfs_inode *ei; + int ref_count; + int nr_events; +}; + +struct event_subsystem { + struct list_head list; + const char *name; + struct event_filter *filter; + int ref_count; +}; + +struct tracer_flags; + +struct tracer { + const char *name; + int (*init)(struct trace_array *); + void (*reset)(struct trace_array *); + void (*start)(struct trace_array *); + void (*stop)(struct trace_array *); + int (*update_thresh)(struct trace_array *); + void (*open)(struct trace_iterator *); + void (*pipe_open)(struct trace_iterator *); + void (*close)(struct trace_iterator *); + void (*pipe_close)(struct trace_iterator *); + ssize_t (*read)(struct trace_iterator *, struct file *, + char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); + ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); + void (*print_header)(struct seq_file *); + enum print_line_t (*print_line)(struct trace_iterator *); + int (*set_flag)(struct trace_array *, u32, u32, int); + int (*flag_changed)(struct trace_array *, u32, int); + struct tracer *next; + struct tracer_flags *flags; + int enabled; + bool print_max; + bool allow_instances; + bool use_max_tr; + bool noboot; +}; + +struct tracer_opt; + +struct tracer_flags { + u32 val; + struct tracer_opt *opts; + struct tracer *trace; +}; + +struct tracer_opt { + const char *name; + u32 bit; +}; + +struct trace_option_dentry; + +struct trace_options { + struct tracer *tracer; + struct trace_option_dentry *topts; +}; + +struct trace_option_dentry { + struct tracer_opt *opt; + struct tracer_flags *flags; + struct trace_array *tr; + struct dentry *entry; +}; + +typedef bool (*cond_update_fn_t)(struct trace_array *, void *); + +struct cond_snapshot { + void *cond_data; + cond_update_fn_t update; +}; + +struct trace_func_repeats { + unsigned long ip; + unsigned long parent_ip; + unsigned long count; + u64 ts_last_call; +}; + +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int); +}; + +struct tracer_stat { + const char *name; + void *(*stat_start)(struct tracer_stat *); + void *(*stat_next)(void *, int); + cmp_func_t stat_cmp; + int (*stat_show)(struct seq_file *, void *); + void (*stat_release)(void *); + int (*stat_headers)(struct seq_file *); +}; + +struct ftrace_profile_page; + +struct ftrace_profile_stat { + atomic_t disabled; + struct hlist_head *hash; + struct ftrace_profile_page *pages; + struct ftrace_profile_page *start; + struct tracer_stat stat; +}; + +struct ftrace_profile { + struct hlist_node node; + unsigned long ip; + unsigned long counter; + unsigned long long time; + unsigned long long time_squared; +}; + +struct ftrace_profile_page { + struct ftrace_profile_page *next; + unsigned long index; + struct ftrace_profile records[0]; +}; + +struct ftrace_graph_ent; + +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); + +struct ftrace_graph_ret; + +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); + +struct fgraph_ops { + trace_func_graph_ent_t entryfunc; + trace_func_graph_ret_t retfunc; +}; + +struct ftrace_graph_ent { + unsigned long func; + int depth; +} __attribute__((packed)); + +struct ftrace_graph_ret { + unsigned long func; + unsigned long retval; + int depth; + unsigned int overrun; + unsigned long long calltime; + unsigned long long rettime; +}; + +enum { + FTRACE_MODIFY_ENABLE_FL = 1, + FTRACE_MODIFY_MAY_SLEEP_FL = 2, +}; + +enum { + FTRACE_UPDATE_CALLS = 1, + FTRACE_DISABLE_CALLS = 2, + FTRACE_UPDATE_TRACE_FUNC = 4, + FTRACE_START_FUNC_RET = 8, + FTRACE_STOP_FUNC_RET = 16, + FTRACE_MAY_SLEEP = 32, +}; + +enum { + FTRACE_ITER_FILTER = 1, + FTRACE_ITER_NOTRACE = 2, + FTRACE_ITER_PRINTALL = 4, + FTRACE_ITER_DO_PROBES = 8, + FTRACE_ITER_PROBE = 16, + FTRACE_ITER_MOD = 32, + FTRACE_ITER_ENABLED = 64, + FTRACE_ITER_TOUCHED = 128, + FTRACE_ITER_ADDRS = 256, +}; + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY = 1, + MATCH_MIDDLE_ONLY = 2, + MATCH_END_ONLY = 3, + MATCH_GLOB = 4, + MATCH_INDEX = 5, +}; + +enum { + FTRACE_HASH_FL_MOD = 1, +}; + +enum { + TRACE_ARRAY_FL_GLOBAL = 1, +}; + +enum { + TRACE_PIDS = 1, + TRACE_NO_PIDS = 2, +}; + +enum graph_filter_type { + GRAPH_FILTER_NOTRACE = 0, + GRAPH_FILTER_FUNCTION = 1, +}; + +struct ftrace_func_mapper { + struct ftrace_hash hash; +}; + +struct ftrace_func_entry { + struct hlist_node hlist; + unsigned long ip; + unsigned long direct; +}; + +struct ftrace_func_map { + struct ftrace_func_entry entry; + void *data; +}; + +struct ftrace_probe_ops; + +struct ftrace_func_probe { + struct ftrace_probe_ops *probe_ops; + struct ftrace_ops ops; + struct trace_array *tr; + struct list_head list; + void *data; + int ref; +}; + +struct ftrace_probe_ops { + void (*func)(unsigned long, unsigned long, struct trace_array *, + struct ftrace_probe_ops *, void *); + int (*init)(struct ftrace_probe_ops *, struct trace_array *, unsigned long, + void *, void **); + void (*free)(struct ftrace_probe_ops *, struct trace_array *, unsigned long, void *); + int (*print)(struct seq_file *, unsigned long, struct ftrace_probe_ops *, void *); +}; + +struct ftrace_mod_map { + struct callback_head rcu; + struct list_head list; + struct module *mod; + unsigned long start_addr; + unsigned long end_addr; + struct list_head funcs; + unsigned int num_funcs; +}; + +struct ftrace_mod_func { + struct list_head list; + char *name; + unsigned long ip; + unsigned int size; +}; + +struct ftrace_init_func { + struct list_head list; + unsigned long ip; +}; + +struct ftrace_mod_load { + struct list_head list; + char *func; + char *module; + int enable; +}; + +struct trace_parser { + bool cont; + char *buffer; + unsigned int idx; + unsigned int size; +}; + +struct ftrace_iterator { + loff_t pos; + loff_t func_pos; + loff_t mod_pos; + struct ftrace_page *pg; + struct dyn_ftrace *func; + struct ftrace_func_probe *probe; + struct ftrace_func_entry *probe_entry; + struct trace_parser parser; + struct ftrace_hash *hash; + struct ftrace_ops *ops; + struct trace_array *tr; + struct list_head *mod_list; + int pidx; + int idx; + unsigned int flags; +}; + +struct ftrace_glob { + char *search; + unsigned int len; + int type; +}; + +struct ftrace_graph_data { + struct ftrace_hash *hash; + struct ftrace_func_entry *entry; + int idx; + enum graph_filter_type type; + struct ftrace_hash *new_hash; + const struct seq_operations *seq_ops; + struct trace_parser parser; +}; + +typedef int (*ftrace_mapper_func)(void *); + +struct kallsyms_data { + unsigned long *addrs; + const char **syms; + size_t cnt; + size_t found; +}; + +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING = 29, + RINGBUF_TYPE_TIME_EXTEND = 30, + RINGBUF_TYPE_TIME_STAMP = 31, +}; + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 8, +}; + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1, +}; + +enum { + RB_CTX_TRANSITION = 0, + RB_CTX_NMI = 1, + RB_CTX_IRQ = 2, + RB_CTX_SOFTIRQ = 3, + RB_CTX_NORMAL = 4, + RB_CTX_MAX = 5, +}; + +enum { + RB_ADD_STAMP_NONE = 0, + RB_ADD_STAMP_EXTEND = 2, + RB_ADD_STAMP_ABSOLUTE = 4, + RB_ADD_STAMP_FORCE = 8, +}; + +struct buffer_data_page; + +struct buffer_page { + struct list_head list; + local_t write; + unsigned int read; + local_t entries; + unsigned long real_end; + struct buffer_data_page *page; +}; + +struct buffer_data_page { + u64 time_stamp; + local_t commit; + unsigned char data[0]; +}; + +struct ring_buffer_per_cpu; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long head; + unsigned long next_event; + struct buffer_page *head_page; + struct buffer_page *cache_reader_page; + unsigned long cache_read; + unsigned long cache_pages_removed; + u64 read_stamp; + u64 page_stamp; + struct ring_buffer_event *event; + int missed_events; +}; + +struct rb_time_struct { + local64_t time; +}; + +typedef struct rb_time_struct rb_time_t; + +struct rb_irq_work { + struct irq_work work; + wait_queue_head_t waiters; + wait_queue_head_t full_waiters; + long wait_index; + bool waiters_pending; + bool full_waiters_pending; + bool wakeup_full; +}; + +struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + atomic_t resize_disabled; + struct trace_buffer *buffer; + raw_spinlock_t reader_lock; + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct buffer_data_page *free_page; + unsigned long nr_pages; + unsigned int current_context; + struct list_head *pages; + struct buffer_page *head_page; + struct buffer_page *tail_page; + struct buffer_page *commit_page; + struct buffer_page *reader_page; + unsigned long lost_events; + unsigned long last_overrun; + unsigned long nest; + local_t entries_bytes; + local_t entries; + local_t overrun; + local_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; + local_t pages_touched; + local_t pages_lost; + local_t pages_read; + long last_pages_touch; + size_t shortest_full; + unsigned long read; + unsigned long read_bytes; + rb_time_t write_stamp; + rb_time_t before_stamp; + u64 event_stamp[5]; + u64 read_stamp; + unsigned long pages_removed; + long nr_pages_to_update; + struct list_head new_pages; + struct work_struct update_pages_work; + struct completion update_done; + struct rb_irq_work irq_work; +}; + +struct trace_buffer { + unsigned int flags; + int cpus; + atomic_t record_disabled; + atomic_t resizing; + cpumask_var_t cpumask; + struct lock_class_key *reader_lock_key; + struct mutex mutex; + struct ring_buffer_per_cpu **buffers; + struct hlist_node node; + u64 (*clock)(); + struct rb_irq_work irq_work; + bool time_stamp_abs; +}; + +struct rb_event_info { + u64 ts; + u64 delta; + u64 before; + u64 after; + unsigned long length; + struct buffer_page *tail_page; + int add_timestamp; +}; + +struct trace_export { + struct trace_export __attribute__((btf_type_tag("rcu"))) * next; + void (*write)(struct trace_export *, const void *, unsigned int); + int flags; +}; + +struct saved_cmdlines_buffer { + unsigned int map_pid_to_cmdline[32769]; + unsigned int *map_cmdline_to_pid; + unsigned int cmdline_num; + int cmdline_idx; + char *saved_cmdlines; +}; + +struct ftrace_stack { + unsigned long calls[1024]; +}; + +struct ftrace_stacks { + struct ftrace_stack stacks[4]; +}; + +struct trace_buffer_struct { + int nesting; + char buffer[4096]; +}; + +enum trace_iterator_flags { + TRACE_ITER_PRINT_PARENT = 1, + TRACE_ITER_SYM_OFFSET = 2, + TRACE_ITER_SYM_ADDR = 4, + TRACE_ITER_VERBOSE = 8, + TRACE_ITER_RAW = 16, + TRACE_ITER_HEX = 32, + TRACE_ITER_BIN = 64, + TRACE_ITER_BLOCK = 128, + TRACE_ITER_FIELDS = 256, + TRACE_ITER_PRINTK = 512, + TRACE_ITER_ANNOTATE = 1024, + TRACE_ITER_USERSTACKTRACE = 2048, + TRACE_ITER_SYM_USEROBJ = 4096, + TRACE_ITER_PRINTK_MSGONLY = 8192, + TRACE_ITER_CONTEXT_INFO = 16384, + TRACE_ITER_LATENCY_FMT = 32768, + TRACE_ITER_RECORD_CMD = 65536, + TRACE_ITER_RECORD_TGID = 131072, + TRACE_ITER_OVERWRITE = 262144, + TRACE_ITER_STOP_ON_FREE = 524288, + TRACE_ITER_IRQ_INFO = 1048576, + TRACE_ITER_MARKERS = 2097152, + TRACE_ITER_EVENT_FORK = 4194304, + TRACE_ITER_PAUSE_ON_TRACE = 8388608, + TRACE_ITER_HASH_PTR = 16777216, + TRACE_ITER_FUNCTION = 33554432, + TRACE_ITER_FUNC_FORK = 67108864, + TRACE_ITER_DISPLAY_GRAPH = 134217728, + TRACE_ITER_STACKTRACE = 268435456, +}; + +enum trace_type { + __TRACE_FIRST_TYPE = 0, + TRACE_FN = 1, + TRACE_CTX = 2, + TRACE_WAKE = 3, + TRACE_STACK = 4, + TRACE_PRINT = 5, + TRACE_BPRINT = 6, + TRACE_MMIO_RW = 7, + TRACE_MMIO_MAP = 8, + TRACE_BRANCH = 9, + TRACE_GRAPH_RET = 10, + TRACE_GRAPH_ENT = 11, + TRACE_USER_STACK = 12, + TRACE_BLK = 13, + TRACE_BPUTS = 14, + TRACE_HWLAT = 15, + TRACE_OSNOISE = 16, + TRACE_TIMERLAT = 17, + TRACE_RAW_DATA = 18, + TRACE_FUNC_REPEATS = 19, + __TRACE_LAST_TYPE = 20, +}; + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 1, + TRACE_FLAG_IRQS_NOSUPPORT = 2, + TRACE_FLAG_NEED_RESCHED = 4, + TRACE_FLAG_HARDIRQ = 8, + TRACE_FLAG_SOFTIRQ = 16, + TRACE_FLAG_PREEMPT_RESCHED = 32, + TRACE_FLAG_NMI = 64, + TRACE_FLAG_BH_OFF = 128, +}; + +enum event_trigger_type { + ETT_NONE = 0, + ETT_TRACE_ONOFF = 1, + ETT_SNAPSHOT = 2, + ETT_STACKTRACE = 4, + ETT_EVENT_ENABLE = 8, + ETT_EVENT_HIST = 16, + ETT_HIST_ENABLE = 32, + ETT_EVENT_EPROBE = 64, +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, + EVENT_FILE_FL_FREED_BIT = 11, +}; + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE = 0, + FSNOTIFY_EVENT_PATH = 1, + FSNOTIFY_EVENT_INODE = 2, + FSNOTIFY_EVENT_DENTRY = 3, + FSNOTIFY_EVENT_ERROR = 4, +}; + +struct err_info { + const char **errs; + u8 type; + u16 pos; + u64 ts; +}; + +struct tracing_log_err { + struct list_head list; + struct err_info info; + char loc[128]; + char *cmd; +}; + +struct buffer_ref { + struct trace_buffer *buffer; + void *page; + int cpu; + refcount_t refcount; +}; + +struct userstack_entry { + struct trace_entry ent; + unsigned int tgid; + unsigned long caller[8]; +}; + +struct func_repeats_entry { + struct trace_entry ent; + unsigned long ip; + unsigned long parent_ip; + u16 count; + u16 top_delta_ts; + u32 bottom_delta_ts; +}; + +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); + +struct print_entry { + struct trace_entry ent; + unsigned long ip; + char buf[0]; +}; + +struct bputs_entry { + struct trace_entry ent; + unsigned long ip; + const char *str; +}; + +struct ftrace_entry { + struct trace_entry ent; + unsigned long ip; + unsigned long parent_ip; +}; + +struct stack_entry { + struct trace_entry ent; + int size; + unsigned long caller[0]; +}; + +struct bprint_entry { + struct trace_entry ent; + unsigned long ip; + const char *fmt; + u32 buf[0]; +}; + +struct trace_min_max_param { + struct mutex *lock; + u64 *val; + u64 *min; + u64 *max; +}; + +struct raw_data_entry { + struct trace_entry ent; + unsigned int id; + char buf[0]; +}; + +struct ftrace_buffer_info { + struct trace_iterator iter; + void *spare; + unsigned int spare_cpu; + unsigned int read; +}; + +struct trace_mark { + unsigned long long val; + char sym; +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_RDYN_STRING = 3, + FILTER_PTR_STRING = 4, + FILTER_TRACE_FN = 5, + FILTER_CPUMASK = 6, + FILTER_COMM = 7, + FILTER_CPU = 8, + FILTER_STACKTRACE = 9, +}; + +struct ftrace_event_field { + struct list_head link; + const char *name; + const char *type; + int filter_type; + int offset; + int size; + int is_signed; + int len; +}; + +struct ctx_switch_entry { + struct trace_entry ent; + unsigned int prev_pid; + unsigned int next_pid; + unsigned int next_cpu; + unsigned char prev_prio; + unsigned char prev_state; + unsigned char next_prio; + unsigned char next_state; +}; + +struct hwlat_entry { + struct trace_entry ent; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + unsigned int nmi_count; + unsigned int seqnum; + unsigned int count; +}; + +struct osnoise_entry { + struct trace_entry ent; + u64 noise; + u64 runtime; + u64 max_sample; + unsigned int hw_count; + unsigned int nmi_count; + unsigned int irq_count; + unsigned int softirq_count; + unsigned int thread_count; +}; + +struct timerlat_entry { + struct trace_entry ent; + unsigned int seqnum; + int context; + u64 timer_latency; +}; + +struct stat_session { + struct list_head session_list; + struct tracer_stat *ts; + struct rb_root stat_root; + struct mutex stat_mutex; + struct dentry *file; +}; + +struct stat_node { + struct rb_node node; + void *stat; +}; + +struct trace_bprintk_fmt { + struct list_head list; + const char *fmt; +}; + +enum { + TRACE_FUNC_NO_OPTS = 0, + TRACE_FUNC_OPT_STACK = 1, + TRACE_FUNC_OPT_NO_REPEATS = 2, + TRACE_FUNC_OPT_HIGHEST_BIT = 4, +}; + +enum { + TRACE_NOP_OPT_ACCEPT = 1, + TRACE_NOP_OPT_REFUSE = 2, +}; + +enum { + FLAGS_FILL_FULL = 268435456, + FLAGS_FILL_START = 536870912, + FLAGS_FILL_END = 805306368, +}; + +struct fgraph_cpu_data { + pid_t last_pid; + int depth; + int depth_irq; + int ignore; + unsigned long enter_funcs[50]; +}; + +struct ftrace_graph_ent_entry { + struct trace_entry ent; + struct ftrace_graph_ent graph_ent; +}; + +struct ftrace_graph_ret_entry { + struct trace_entry ent; + struct ftrace_graph_ret ret; +}; + +struct fgraph_data { + struct fgraph_cpu_data __attribute__((btf_type_tag("percpu"))) * cpu_data; + struct ftrace_graph_ent_entry ent; + struct ftrace_graph_ret_entry ret; + int failed; + int cpu; + long : 0; +} __attribute__((packed)); + +enum rq_end_io_ret { + RQ_END_IO_NONE = 0, + RQ_END_IO_FREE = 1, +}; + +typedef enum rq_end_io_ret rq_end_io_fn(struct request *, blk_status_t); + +typedef __u32 req_flags_t; + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + int tag; + int internal_tag; + unsigned int timeout; + unsigned int __data_len; + sector_t __sector; + struct bio *bio; + struct bio *biotail; + union { + struct list_head queuelist; + struct request *rq_next; + }; + struct block_device *part; + u64 alloc_time_ns; + u64 start_time_ns; + u64 io_start_time_ns; + unsigned short wbt_flags; + unsigned short stats_sectors; + unsigned short nr_phys_segments; + unsigned short nr_integrity_segments; + unsigned short ioprio; + enum mq_rq_state state; + atomic_t ref; + unsigned long deadline; + union { + struct hlist_node hash; + struct llist_node ipi_list; + }; + union { + struct rb_node rb_node; + struct bio_vec special_vec; + }; + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { + unsigned int seq; + rq_end_io_fn *saved_end_io; + } flush; + u64 fifo_time; + rq_end_io_fn *end_io; + void *end_io_data; +}; + +struct sbitmap_word; + +struct sbitmap { + unsigned int depth; + unsigned int shift; + unsigned int map_nr; + bool round_robin; + struct sbitmap_word *map; + unsigned int __attribute__((btf_type_tag("percpu"))) * alloc_hint; +}; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + unsigned long state; + long : 64; + long : 64; + long : 64; + long : 64; + }; + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + unsigned long flags; + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + void *driver_data; + struct sbitmap ctx_map; + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + unsigned short type; + unsigned short nr_ctx; + struct blk_mq_ctx **ctxs; + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + unsigned long run; + unsigned int numa_node; + unsigned int queue_num; + atomic_t nr_active; + struct hlist_node cpuhp_online; + struct hlist_node cpuhp_dead; + struct kobject kobj; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct list_head hctx_list; + long : 64; + long : 64; + long : 64; +}; + +struct blk_flush_queue { + spinlock_t mq_flush_lock; + unsigned int flush_pending_idx : 1; + unsigned int flush_running_idx : 1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + unsigned long flush_data_in_flight; + struct request *flush_rq; +}; + +struct sbitmap_word { + unsigned long word; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + unsigned long cleared; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct sbq_wait_state; + +struct sbitmap_queue { + struct sbitmap sb; + unsigned int wake_batch; + atomic_t wake_index; + struct sbq_wait_state *ws; + atomic_t ws_active; + unsigned int min_shallow_depth; + atomic_t completion_cnt; + atomic_t wakeup_cnt; +}; + +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + unsigned int active_queues; + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; + spinlock_t lock; +}; + +struct sbq_wait_state { + wait_queue_head_t wait; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +struct blk_mq_tag_set { + const struct blk_mq_ops *ops; + struct blk_mq_queue_map map[3]; + unsigned int nr_maps; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int cmd_size; + int numa_node; + unsigned int timeout; + unsigned int flags; + void *driver_data; + struct blk_mq_tags **tags; + struct blk_mq_tags *shared_tags; + struct mutex tag_list_lock; + struct list_head tag_list; + struct srcu_struct *srcu; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running = 2, + Blktrace_stopped = 3, +}; + +enum blktrace_cat { + BLK_TC_READ = 1, + BLK_TC_WRITE = 2, + BLK_TC_FLUSH = 4, + BLK_TC_SYNC = 8, + BLK_TC_SYNCIO = 8, + BLK_TC_QUEUE = 16, + BLK_TC_REQUEUE = 32, + BLK_TC_ISSUE = 64, + BLK_TC_COMPLETE = 128, + BLK_TC_FS = 256, + BLK_TC_PC = 512, + BLK_TC_NOTIFY = 1024, + BLK_TC_AHEAD = 2048, + BLK_TC_META = 4096, + BLK_TC_DISCARD = 8192, + BLK_TC_DRV_DATA = 16384, + BLK_TC_FUA = 32768, + BLK_TC_END = 32768, +}; + +enum blktrace_notify { + __BLK_TN_PROCESS = 0, + __BLK_TN_TIMESTAMP = 1, + __BLK_TN_MESSAGE = 2, + __BLK_TN_CGROUP = 256, +}; + +enum blktrace_act { + __BLK_TA_QUEUE = 1, + __BLK_TA_BACKMERGE = 2, + __BLK_TA_FRONTMERGE = 3, + __BLK_TA_GETRQ = 4, + __BLK_TA_SLEEPRQ = 5, + __BLK_TA_REQUEUE = 6, + __BLK_TA_ISSUE = 7, + __BLK_TA_COMPLETE = 8, + __BLK_TA_PLUG = 9, + __BLK_TA_UNPLUG_IO = 10, + __BLK_TA_UNPLUG_TIMER = 11, + __BLK_TA_INSERT = 12, + __BLK_TA_SPLIT = 13, + __BLK_TA_BOUNCE = 14, + __BLK_TA_REMAP = 15, + __BLK_TA_ABORT = 16, + __BLK_TA_DRV_DATA = 17, + __BLK_TA_CGROUP = 256, +}; + +struct blk_io_trace { + __u32 magic; + __u32 sequence; + __u64 time; + __u64 sector; + __u32 bytes; + __u32 action; + __u32 pid; + __u32 device; + __u32 cpu; + __u16 error; + __u16 pdu_len; +}; + +struct blk_user_trace_setup { + char name[32]; + __u16 act_mask; + __u32 buf_size; + __u32 buf_nr; + __u64 start_lba; + __u64 end_lba; + __u32 pid; +}; + +typedef u64 compat_u64; + +struct compat_blk_user_trace_setup { + char name[32]; + u16 act_mask; + int : 0; + u32 buf_size; + u32 buf_nr; + compat_u64 start_lba; + compat_u64 end_lba; + u32 pid; +} __attribute__((packed)); + +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); + +struct fgraph_ret_regs { + unsigned long ax; + unsigned long dx; + unsigned long bp; +}; + +struct boot_triggers { + const char *event; + char *trigger; +}; + +typedef int (*eventfs_callback)(const char *, umode_t *, void **, + const struct file_operations **); + +struct eventfs_entry { + const char *name; + eventfs_callback callback; +}; + +enum { + FORMAT_HEADER = 1, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, +}; + +struct module_string { + struct list_head next; + struct module *module; + char *str; +}; + +struct event_probe_data { + struct trace_event_file *file; + unsigned long count; + int ref; + bool enable; +}; + +enum filter_pred_fn { + FILTER_PRED_FN_NOP = 0, + FILTER_PRED_FN_64 = 1, + FILTER_PRED_FN_64_CPUMASK = 2, + FILTER_PRED_FN_S64 = 3, + FILTER_PRED_FN_U64 = 4, + FILTER_PRED_FN_32 = 5, + FILTER_PRED_FN_32_CPUMASK = 6, + FILTER_PRED_FN_S32 = 7, + FILTER_PRED_FN_U32 = 8, + FILTER_PRED_FN_16 = 9, + FILTER_PRED_FN_16_CPUMASK = 10, + FILTER_PRED_FN_S16 = 11, + FILTER_PRED_FN_U16 = 12, + FILTER_PRED_FN_8 = 13, + FILTER_PRED_FN_8_CPUMASK = 14, + FILTER_PRED_FN_S8 = 15, + FILTER_PRED_FN_U8 = 16, + FILTER_PRED_FN_COMM = 17, + FILTER_PRED_FN_STRING = 18, + FILTER_PRED_FN_STRLOC = 19, + FILTER_PRED_FN_STRRELLOC = 20, + FILTER_PRED_FN_PCHAR_USER = 21, + FILTER_PRED_FN_PCHAR = 22, + FILTER_PRED_FN_CPU = 23, + FILTER_PRED_FN_CPU_CPUMASK = 24, + FILTER_PRED_FN_CPUMASK = 25, + FILTER_PRED_FN_CPUMASK_CPU = 26, + FILTER_PRED_FN_FUNCTION = 27, + FILTER_PRED_FN_ = 28, + FILTER_PRED_TEST_VISITED = 29, +}; + +struct regex; + +struct filter_pred { + struct regex *regex; + struct cpumask *mask; + unsigned short *ops; + struct ftrace_event_field *field; + u64 val; + u64 val2; + enum filter_pred_fn fn_num; + int offset; + int not ; + int op; +}; + +typedef int (*regex_match_func)(char *, struct regex *, int); + +struct regex { + char pattern[256]; + int len; + int field_len; + regex_match_func match; +}; + +struct syscall_trace_enter { + struct trace_entry ent; + int nr; + unsigned long args[0]; +}; + +struct syscall_trace_exit { + struct trace_entry ent; + int nr; + long ret; +}; + +struct syscall_tp_t { + struct trace_entry ent; + int syscall_nr; + unsigned long args[6]; +}; + +struct syscall_tp_t___2 { + struct trace_entry ent; + int syscall_nr; + unsigned long ret; +}; + +typedef unsigned long perf_trace_t[1024]; + +struct ustring_buffer { + char buffer[1024]; +}; + +enum filter_op_ids { + OP_GLOB = 0, + OP_NE = 1, + OP_EQ = 2, + OP_LE = 3, + OP_LT = 4, + OP_GE = 5, + OP_GT = 6, + OP_BAND = 7, + OP_MAX = 8, +}; + +enum { + TOO_MANY_CLOSE = -1, + TOO_MANY_OPEN = -2, + MISSING_QUOTE = -3, +}; + +enum { + FILT_ERR_NONE = 0, + FILT_ERR_INVALID_OP = 1, + FILT_ERR_TOO_MANY_OPEN = 2, + FILT_ERR_TOO_MANY_CLOSE = 3, + FILT_ERR_MISSING_QUOTE = 4, + FILT_ERR_MISSING_BRACE_OPEN = 5, + FILT_ERR_MISSING_BRACE_CLOSE = 6, + FILT_ERR_OPERAND_TOO_LONG = 7, + FILT_ERR_EXPECT_STRING = 8, + FILT_ERR_EXPECT_DIGIT = 9, + FILT_ERR_ILLEGAL_FIELD_OP = 10, + FILT_ERR_FIELD_NOT_FOUND = 11, + FILT_ERR_ILLEGAL_INTVAL = 12, + FILT_ERR_BAD_SUBSYS_FILTER = 13, + FILT_ERR_TOO_MANY_PREDS = 14, + FILT_ERR_INVALID_FILTER = 15, + FILT_ERR_INVALID_CPULIST = 16, + FILT_ERR_IP_FIELD_ONLY = 17, + FILT_ERR_INVALID_VALUE = 18, + FILT_ERR_NO_FUNCTION = 19, + FILT_ERR_ERRNO = 20, + FILT_ERR_NO_FILTER = 21, +}; + +enum { + INVERT = 1, + PROCESS_AND = 2, + PROCESS_OR = 4, +}; + +struct filter_list { + struct list_head list; + struct event_filter *filter; +}; + +struct filter_parse_error { + int lasterr; + int lasterr_pos; +}; + +struct function_filter_data { + struct ftrace_ops *ops; + int first_filter; + int first_notrace; +}; + +typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, + struct filter_pred **); + +struct event_trigger_data; + +struct event_trigger_ops; + +struct event_command { + struct list_head list; + char *name; + enum event_trigger_type trigger_type; + int flags; + int (*parse)(struct event_command *, struct trace_event_file *, char *, char *, char *); + int (*reg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg_all)(struct trace_event_file *); + int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); + struct event_trigger_ops *(*get_trigger_ops)(char *, char *); +}; + +struct event_trigger_data { + unsigned long count; + int ref; + int flags; + struct event_trigger_ops *ops; + struct event_command *cmd_ops; + struct event_filter __attribute__((btf_type_tag("rcu"))) * filter; + char *filter_str; + void *private_data; + bool paused; + bool paused_tmp; + struct list_head list; + char *name; + struct list_head named_list; + struct event_trigger_data *named_data; +}; + +struct event_trigger_ops { + void (*trigger)(struct event_trigger_data *, struct trace_buffer *, void *, + struct ring_buffer_event *); + int (*init)(struct event_trigger_data *); + void (*free)(struct event_trigger_data *); + int (*print)(struct seq_file *, struct event_trigger_data *); +}; + +enum event_command_flags { + EVENT_CMD_FL_POST_TRIGGER = 1, + EVENT_CMD_FL_NEEDS_REC = 2, +}; + +enum { + EVENT_TRIGGER_FL_PROBE = 1, +}; + +struct enable_trigger_data { + struct trace_event_file *file; + bool enable; + bool hist; +}; + +struct dyn_event; + +struct dyn_event_operations { + struct list_head list; + int (*create)(const char *); + int (*show)(struct seq_file *, struct dyn_event *); + bool (*is_busy)(struct dyn_event *); + int (*free)(struct dyn_event *); + bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); +}; + +struct dyn_event { + struct list_head list; + struct dyn_event_operations *ops; +}; + +enum fetch_op { + FETCH_OP_NOP = 0, + FETCH_OP_REG = 1, + FETCH_OP_STACK = 2, + FETCH_OP_STACKP = 3, + FETCH_OP_RETVAL = 4, + FETCH_OP_IMM = 5, + FETCH_OP_COMM = 6, + FETCH_OP_ARG = 7, + FETCH_OP_FOFFS = 8, + FETCH_OP_DATA = 9, + FETCH_OP_DEREF = 10, + FETCH_OP_UDEREF = 11, + FETCH_OP_ST_RAW = 12, + FETCH_OP_ST_MEM = 13, + FETCH_OP_ST_UMEM = 14, + FETCH_OP_ST_STRING = 15, + FETCH_OP_ST_USTRING = 16, + FETCH_OP_ST_SYMSTR = 17, + FETCH_OP_MOD_BF = 18, + FETCH_OP_LP_ARRAY = 19, + FETCH_OP_TP_ARG = 20, + FETCH_OP_END = 21, + FETCH_NOP_SYMBOL = 22, +}; + +enum { + TP_ERR_FILE_NOT_FOUND = 0, + TP_ERR_NO_REGULAR_FILE = 1, + TP_ERR_BAD_REFCNT = 2, + TP_ERR_REFCNT_OPEN_BRACE = 3, + TP_ERR_BAD_REFCNT_SUFFIX = 4, + TP_ERR_BAD_UPROBE_OFFS = 5, + TP_ERR_BAD_MAXACT_TYPE = 6, + TP_ERR_BAD_MAXACT = 7, + TP_ERR_MAXACT_TOO_BIG = 8, + TP_ERR_BAD_PROBE_ADDR = 9, + TP_ERR_NON_UNIQ_SYMBOL = 10, + TP_ERR_BAD_RETPROBE = 11, + TP_ERR_NO_TRACEPOINT = 12, + TP_ERR_BAD_ADDR_SUFFIX = 13, + TP_ERR_NO_GROUP_NAME = 14, + TP_ERR_GROUP_TOO_LONG = 15, + TP_ERR_BAD_GROUP_NAME = 16, + TP_ERR_NO_EVENT_NAME = 17, + TP_ERR_EVENT_TOO_LONG = 18, + TP_ERR_BAD_EVENT_NAME = 19, + TP_ERR_EVENT_EXIST = 20, + TP_ERR_RETVAL_ON_PROBE = 21, + TP_ERR_NO_RETVAL = 22, + TP_ERR_BAD_STACK_NUM = 23, + TP_ERR_BAD_ARG_NUM = 24, + TP_ERR_BAD_VAR = 25, + TP_ERR_BAD_REG_NAME = 26, + TP_ERR_BAD_MEM_ADDR = 27, + TP_ERR_BAD_IMM = 28, + TP_ERR_IMMSTR_NO_CLOSE = 29, + TP_ERR_FILE_ON_KPROBE = 30, + TP_ERR_BAD_FILE_OFFS = 31, + TP_ERR_SYM_ON_UPROBE = 32, + TP_ERR_TOO_MANY_OPS = 33, + TP_ERR_DEREF_NEED_BRACE = 34, + TP_ERR_BAD_DEREF_OFFS = 35, + TP_ERR_DEREF_OPEN_BRACE = 36, + TP_ERR_COMM_CANT_DEREF = 37, + TP_ERR_BAD_FETCH_ARG = 38, + TP_ERR_ARRAY_NO_CLOSE = 39, + TP_ERR_BAD_ARRAY_SUFFIX = 40, + TP_ERR_BAD_ARRAY_NUM = 41, + TP_ERR_ARRAY_TOO_BIG = 42, + TP_ERR_BAD_TYPE = 43, + TP_ERR_BAD_STRING = 44, + TP_ERR_BAD_SYMSTRING = 45, + TP_ERR_BAD_BITFIELD = 46, + TP_ERR_ARG_NAME_TOO_LONG = 47, + TP_ERR_NO_ARG_NAME = 48, + TP_ERR_BAD_ARG_NAME = 49, + TP_ERR_USED_ARG_NAME = 50, + TP_ERR_ARG_TOO_LONG = 51, + TP_ERR_NO_ARG_BODY = 52, + TP_ERR_BAD_INSN_BNDRY = 53, + TP_ERR_FAIL_REG_PROBE = 54, + TP_ERR_DIFF_PROBE_TYPE = 55, + TP_ERR_DIFF_ARG_TYPE = 56, + TP_ERR_SAME_PROBE = 57, + TP_ERR_NO_EVENT_INFO = 58, + TP_ERR_BAD_ATTACH_EVENT = 59, + TP_ERR_BAD_ATTACH_ARG = 60, + TP_ERR_NO_EP_FILTER = 61, + TP_ERR_NOSUP_BTFARG = 62, + TP_ERR_NO_BTFARG = 63, + TP_ERR_NO_BTF_ENTRY = 64, + TP_ERR_BAD_VAR_ARGS = 65, + TP_ERR_NOFENTRY_ARGS = 66, + TP_ERR_DOUBLE_ARGS = 67, + TP_ERR_ARGS_2LONG = 68, + TP_ERR_ARGIDX_2BIG = 69, + TP_ERR_NO_PTR_STRCT = 70, + TP_ERR_NOSUP_DAT_ARG = 71, + TP_ERR_BAD_HYPHEN = 72, + TP_ERR_NO_BTF_FIELD = 73, + TP_ERR_BAD_BTF_TID = 74, + TP_ERR_BAD_TYPE4STR = 75, +}; + +enum probe_print_type { + PROBE_PRINT_NORMAL = 0, + PROBE_PRINT_RETURN = 1, + PROBE_PRINT_EVENT = 2, +}; + +struct eprobe_trace_entry_head { + struct trace_entry ent; +}; + +struct fetch_insn; + +struct fetch_type; + +struct probe_arg { + struct fetch_insn *code; + bool dynamic; + unsigned int offset; + unsigned int count; + const char *name; + const char *comm; + char *fmt; + const struct fetch_type *type; +}; + +struct trace_probe_event; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; + ssize_t size; + unsigned int nr_args; + struct probe_arg args[0]; +}; + +struct trace_eprobe { + const char *event_system; + const char *event_name; + char *filter_str; + struct trace_event_call *event; + struct dyn_event devent; + struct trace_probe tp; +}; + +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + +struct trace_probe_event { + unsigned int flags; + struct trace_event_class class; + struct trace_event_call call; + struct list_head files; + struct list_head probes; + struct trace_uprobe_filter filter[0]; +}; + +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + unsigned long immediate; + void *data; + }; +}; + +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); + +struct fetch_type { + const char *name; + size_t size; + bool is_signed; + bool is_string; + print_type_func_t print; + const char *fmt; + const char *fmttype; +}; + +struct btf_param; + +struct traceprobe_parse_context { + struct trace_event_call *event; + const char *funcname; + const struct btf_type *proto; + const struct btf_param *params; + s32 nr_params; + struct btf *btf; + const struct btf_type *last_type; + u32 last_bitoffs; + u32 last_bitsize; + unsigned int flags; + int offset; +}; + +struct btf_param { + __u32 name_off; + __u32 type; +}; + +struct eprobe_data { + struct trace_event_file *file; + struct trace_eprobe *ep; +}; + +struct event_file_link { + struct trace_event_file *file; + struct list_head list; +}; + +struct bpf_mem_caches; + +struct bpf_mem_cache; + +struct bpf_mem_alloc { + struct bpf_mem_caches __attribute__((btf_type_tag("percpu"))) * caches; + struct bpf_mem_cache __attribute__((btf_type_tag("percpu"))) * cache; + bool percpu; + struct work_struct work; +}; + +struct bpf_local_storage_map_bucket; + +struct bpf_local_storage_map { + struct bpf_map map; + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; + struct bpf_mem_alloc selem_ma; + struct bpf_mem_alloc storage_ma; + bool bpf_ma; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +struct bpf_mem_cache { + struct llist_head free_llist; + local_t active; + struct llist_head free_llist_extra; + struct irq_work refill_work; + struct obj_cgroup *objcg; + int unit_size; + int free_cnt; + int low_watermark; + int high_watermark; + int batch; + int percpu_size; + bool draining; + struct bpf_mem_cache *tgt; + struct llist_head free_by_rcu; + struct llist_node *free_by_rcu_tail; + struct llist_head waiting_for_gp; + struct llist_node *waiting_for_gp_tail; + struct callback_head rcu; + atomic_t call_rcu_in_progress; + struct llist_head free_llist_extra_rcu; + struct llist_head free_by_rcu_ttrace; + struct llist_head waiting_for_gp_ttrace; + struct callback_head rcu_ttrace; + atomic_t call_rcu_ttrace_in_progress; +}; + +struct bpf_mem_caches { + struct bpf_mem_cache cache[11]; +}; + +struct bpf_local_storage_data { + struct bpf_local_storage_map __attribute__((btf_type_tag("rcu"))) * smap; + u8 data[0]; +}; + +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +struct bpf_idmap { + u32 tmp_id_gen; + struct bpf_id_pair map[600]; +}; + +struct bpf_idset { + u32 count; + u32 ids[600]; +}; + +struct bpf_verifier_log { + u64 start_pos; + u64 end_pos; + char __attribute__((btf_type_tag("user"))) * ubuf; + u32 level; + u32 len_total; + u32 len_max; + char kbuf[1024]; +}; + +struct bpf_subprog_info { + u32 start; + u32 linfo_idx; + u16 stack_depth; + bool has_tail_call; + bool tail_call_reachable; + bool has_ld_abs; + bool is_cb; + bool is_async_cb; + bool is_exception_cb; +}; + +struct backtrack_state { + struct bpf_verifier_env *env; + u32 frame; + u32 reg_masks[8]; + u64 stack_masks[8]; +}; + +typedef sockptr_t bpfptr_t; + +struct bpf_verifier_ops; + +struct bpf_verifier_stack_elem; + +struct bpf_verifier_state; + +struct bpf_verifier_state_list; + +struct bpf_insn_aux_data; + +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; + const struct bpf_verifier_ops *ops; + struct bpf_verifier_stack_elem *head; + int stack_size; + bool strict_alignment; + bool test_state_freq; + struct bpf_verifier_state *cur_state; + struct bpf_verifier_state_list **explored_states; + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[64]; + struct btf_mod_pair used_btfs[64]; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 id_gen; + u32 hidden_subprog_cnt; + int exception_callback_subprog; + bool explore_alu_limits; + bool allow_ptr_leaks; + bool allow_uninit_stack; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; + bool seen_direct_write; + bool seen_exception; + struct bpf_insn_aux_data *insn_aux_data; + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[258]; + union { + struct bpf_idmap idmap_scratch; + struct bpf_idset idset_scratch; + }; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + struct backtrack_state bt; + u32 pass_cnt; + u32 subprog_cnt; + u32 prev_insn_processed; + u32 insn_processed; + u32 prev_jmps_processed; + u32 jmps_processed; + u64 verification_time; + u32 max_states_per_insn; + u32 total_states; + u32 peak_states; + u32 longest_mark_read_walk; + bpfptr_t fd_array; + u32 scratched_regs; + u64 scratched_stack_slots; + u64 prev_log_pos; + u64 prev_insn_print_pos; + char tmp_str_buf[320]; +}; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2, +}; + +struct bpf_insn_access_aux; + +struct bpf_reg_state; + +struct bpf_verifier_ops { + const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id, + const struct bpf_prog *); + bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, + struct bpf_insn_access_aux *); + int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); + int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); + u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, + struct bpf_insn *, struct bpf_prog *, u32 *); + int (*btf_struct_access)(struct bpf_verifier_log *, const struct bpf_reg_state *, + int, int); +}; + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + union { + int ctx_field_size; + struct { + struct btf *btf; + u32 btf_id; + }; + }; + struct bpf_verifier_log *log; +}; + +enum bpf_dynptr_type { + BPF_DYNPTR_TYPE_INVALID = 0, + BPF_DYNPTR_TYPE_LOCAL = 1, + BPF_DYNPTR_TYPE_RINGBUF = 2, + BPF_DYNPTR_TYPE_SKB = 3, + BPF_DYNPTR_TYPE_XDP = 4, +}; + +enum bpf_iter_state { + BPF_ITER_STATE_INVALID = 0, + BPF_ITER_STATE_ACTIVE = 1, + BPF_ITER_STATE_DRAINED = 2, +}; + +struct tnum { + u64 value; + u64 mask; +}; + +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, + REG_LIVE_READ32 = 1, + REG_LIVE_READ64 = 2, + REG_LIVE_READ = 3, + REG_LIVE_WRITTEN = 4, + REG_LIVE_DONE = 8, +}; + +struct bpf_reg_state { + enum bpf_reg_type type; + s32 off; + union { + int range; + struct { + struct bpf_map *map_ptr; + u32 map_uid; + }; + struct { + struct btf *btf; + u32 btf_id; + }; + struct { + u32 mem_size; + u32 dynptr_id; + }; + struct { + enum bpf_dynptr_type type; + bool first_slot; + } dynptr; + struct { + struct btf *btf; + u32 btf_id; + enum bpf_iter_state state : 2; + int depth : 30; + } iter; + struct { + unsigned long raw1; + unsigned long raw2; + } raw; + u32 subprogno; + }; + struct tnum var_off; + s64 smin_value; + s64 smax_value; + u64 umin_value; + u64 umax_value; + s32 s32_min_value; + s32 s32_max_value; + u32 u32_min_value; + u32 u32_max_value; + u32 id; + u32 ref_obj_id; + struct bpf_reg_state *parent; + u32 frameno; + s32 subreg_def; + enum bpf_reg_liveness live; + bool precise; +}; + +struct bpf_active_lock { + void *ptr; + u32 id; +}; + +struct bpf_idx_pair; + +struct bpf_verifier_state { + struct bpf_func_state *frame[8]; + struct bpf_verifier_state *parent; + u32 branches; + u32 insn_idx; + u32 curframe; + struct bpf_active_lock active_lock; + bool speculative; + bool active_rcu_lock; + bool used_as_loop_entry; + u32 first_insn_idx; + u32 last_insn_idx; + struct bpf_verifier_state *loop_entry; + struct bpf_idx_pair *jmp_history; + u32 jmp_history_cnt; + u32 dfs_depth; + u32 callback_unroll_depth; +}; + +struct bpf_reference_state; + +struct bpf_stack_state; + +struct bpf_func_state { + struct bpf_reg_state regs[11]; + int callsite; + u32 frameno; + u32 subprogno; + u32 async_entry_cnt; + bool in_callback_fn; + struct tnum callback_ret_range; + bool in_async_callback_fn; + bool in_exception_callback_fn; + u32 callback_depth; + int acquired_refs; + struct bpf_reference_state *refs; + int allocated_stack; + struct bpf_stack_state *stack; +}; + +struct bpf_reference_state { + int id; + int insn_idx; + int callback_ref; +}; + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[8]; +}; + +struct bpf_idx_pair { + u32 prev_idx; + u32 idx; +}; + +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt; + int hit_cnt; +}; + +struct bpf_loop_inline_state { + unsigned int initialized : 1; + unsigned int fit_for_inline : 1; + u32 callback_subprogno; +}; + +struct btf_struct_meta; + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; + unsigned long map_ptr_state; + s32 call_imm; + u32 alu_limit; + struct { + u32 map_index; + u32 map_off; + }; + struct { + enum bpf_reg_type reg_type; + union { + struct { + struct btf *btf; + u32 btf_id; + }; + u32 mem_size; + }; + } btf_var; + struct bpf_loop_inline_state loop_inline_state; + }; + union { + u64 obj_new_size; + u64 insert_off; + }; + struct btf_struct_meta *kptr_struct_meta; + u64 map_key_state; + int ctx_field_size; + u32 seen; + bool sanitize_stack_spill; + bool zext_dst; + bool storage_get_func_atomic; + bool is_iter_next; + bool call_with_percpu_alloc_ptr; + u8 alu_state; + unsigned int orig_idx; + bool jmp_point; + bool prune_point; + bool force_checkpoint; + bool calls_callback; +}; + +struct btf_struct_meta { + u32 btf_id; + struct btf_record *record; +}; + +typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); + +struct bpf_nested_pt_regs { + struct pt_regs regs[3]; +}; + +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; + enum pid_type type; +}; + +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; + +enum key_lookup_flag { + KEY_LOOKUP_CREATE = 1, + KEY_LOOKUP_PARTIAL = 2, + KEY_LOOKUP_ALL = 3, +}; + +enum key_need_perm { + KEY_NEED_UNSPECIFIED = 0, + KEY_NEED_VIEW = 1, + KEY_NEED_READ = 2, + KEY_NEED_WRITE = 3, + KEY_NEED_SEARCH = 4, + KEY_NEED_LINK = 5, + KEY_NEED_SETATTR = 6, + KEY_NEED_UNLINK = 7, + KEY_SYSADMIN_OVERRIDE = 8, + KEY_AUTHTOKEN_OVERRIDE = 9, + KEY_DEFER_PERM_CHECK = 10, +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT = 0, + BPF_FD_TYPE_TRACEPOINT = 1, + BPF_FD_TYPE_KPROBE = 2, + BPF_FD_TYPE_KRETPROBE = 3, + BPF_FD_TYPE_UPROBE = 4, + BPF_FD_TYPE_URETPROBE = 5, +}; + +enum { + BPF_F_KPROBE_MULTI_RETURN = 1, +}; + +enum uprobe_filter_ctx { + UPROBE_FILTER_REGISTER = 0, + UPROBE_FILTER_UNREGISTER = 1, + UPROBE_FILTER_MMAP = 2, +}; + +enum { + BPF_F_UPROBE_MULTI_RETURN = 1, +}; + +enum { + BTF_F_COMPACT = 1, + BTF_F_NONAME = 2, + BTF_F_PTR_RAW = 4, + BTF_F_ZERO = 8, +}; + +enum { + BPF_F_INDEX_MASK = 4294967295ULL, + BPF_F_CURRENT_CPU = 4294967295ULL, + BPF_F_CTXLEN_MASK = 4503595332403200ULL, +}; + +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = 1, +}; + +typedef u64 (*btf_bpf_probe_read_user)(void *, u32, + const void __attribute__((btf_type_tag("user"))) *); + +typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, + const void + __attribute__((btf_type_tag("user"))) *); + +typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_write_user)(void __attribute__((btf_type_tag("user"))) *, + const void *, u32); + +typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_trace_vprintk)(char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); + +struct btf_ptr; + +typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); + +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; +}; + +typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); + +struct bpf_perf_event_value; + +typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, + struct bpf_perf_event_value *, u32); + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_current_task)(); + +typedef u64 (*btf_bpf_get_current_task_btf)(); + +typedef u64 (*btf_bpf_task_pt_regs)(struct task_struct *); + +typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); + +typedef u64 (*btf_bpf_send_signal)(u32); + +typedef u64 (*btf_bpf_send_signal_thread)(u32); + +typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); + +typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_get_func_ip_tracing)(void *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_trace)(void *); + +struct bpf_perf_event_data_kern; + +typedef u64 (*btf_bpf_get_attach_cookie_pe)(struct bpf_perf_event_data_kern *); + +typedef struct pt_regs bpf_user_pt_regs_t; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +typedef u64 (*btf_bpf_get_attach_cookie_tracing)(void *); + +typedef u64 (*btf_bpf_get_branch_snapshot)(void *, u32, u64); + +typedef u64 (*btf_get_func_arg)(void *, u32, u64 *); + +typedef u64 (*btf_get_func_ret)(void *, u64 *); + +typedef u64 (*btf_get_func_arg_cnt)(void *); + +typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); + +typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, + struct bpf_perf_event_value *, u32); + +typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct bpf_raw_tracepoint_args; + +typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, + struct bpf_map *, u64, void *, u64); + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + +typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, + struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); + +struct trace_event_raw_bpf_trace_printk { + struct trace_entry ent; + u32 __data_loc_bpf_string; + char __data[0]; +}; + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + bool is_uprobe; +}; + +struct trace_uprobe; + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + unsigned long bp_addr; +}; + +struct bpf_kprobe_multi_link; + +struct bpf_kprobe_multi_run_ctx { + struct bpf_run_ctx run_ctx; + struct bpf_kprobe_multi_link *link; + unsigned long entry_ip; +}; + +struct fprobe { + struct ftrace_ops ops; + unsigned long nmissed; + unsigned int flags; + struct rethook *rethook; + size_t entry_data_size; + int nr_maxactive; + int (*entry_handler)(struct fprobe *, unsigned long, unsigned long, + struct pt_regs *, void *); + void (*exit_handler)(struct fprobe *, unsigned long, unsigned long, + struct pt_regs *, void *); +}; + +struct bpf_kprobe_multi_link { + struct bpf_link link; + struct fprobe fp; + unsigned long *addrs; + u64 *cookies; + u32 cnt; + u32 mods_cnt; + struct module **mods; + u32 flags; +}; + +struct bpf_uprobe; + +struct bpf_uprobe_multi_run_ctx { + struct bpf_run_ctx run_ctx; + unsigned long entry_ip; + struct bpf_uprobe *uprobe; +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *, struct pt_regs *); + int (*ret_handler)(struct uprobe_consumer *, unsigned long, struct pt_regs *); + bool (*filter)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); + struct uprobe_consumer *next; +}; + +struct bpf_uprobe_multi_link; + +struct bpf_uprobe { + struct bpf_uprobe_multi_link *link; + loff_t offset; + u64 cookie; + struct uprobe_consumer consumer; +}; + +struct bpf_uprobe_multi_link { + struct path path; + struct bpf_link link; + u32 cnt; + struct bpf_uprobe *uprobes; + struct task_struct *task; +}; + +struct bpf_trace_module { + struct module *module; + struct list_head list; +}; + +struct trace_event_data_offsets_bpf_trace_printk { + u32 bpf_string; +}; + +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *, const void *); + +struct bpf_bprintf_data { + u32 *bin_args; + char *buf; + bool get_bin_args; + bool get_buf; +}; + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +struct __key_reference_with_attributes; + +typedef struct __key_reference_with_attributes *key_ref_t; + +struct user_syms { + const char **syms; + char *buf; +}; + +typedef int (*cmp_r_func_t)(const void *, const void *, const void *); + +typedef void (*swap_r_func_t)(void *, void *, int, const void *); + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +struct modules_array { + struct module **mods; + int mods_cnt; + int mods_cap; +}; + +struct btf_id_set { + u32 cnt; + u32 ids[0]; +}; + +struct bpf_key { + struct key *key; + bool has_ref; +}; + +typedef unsigned long (*bpf_ctx_copy_t)(void *, const void *, unsigned long, unsigned long); + +struct bpf_dynptr_kern { + void *data; + u32 size; + u32 offset; +}; + +struct perf_event_query_bpf { + __u32 ids_len; + __u32 prog_cnt; + __u32 ids[0]; +}; + +struct multi_symbols_sort { + const char **funcs; + u64 *cookies; +}; + +struct xol_area { + wait_queue_head_t wq; + atomic_t slot_count; + unsigned long *bitmap; + struct vm_special_mapping xol_mapping; + struct page *pages[2]; + unsigned long vaddr; +}; + +struct uprobe { + struct rb_node rb_node; + refcount_t ref; + struct rw_semaphore register_rwsem; + struct rw_semaphore consumer_rwsem; + struct list_head pending_list; + struct uprobe_consumer *consumers; + struct inode *inode; + loff_t offset; + loff_t ref_ctr_offset; + unsigned long flags; + struct arch_uprobe arch; +}; + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE = 2, + DYNEVENT_TYPE_NONE = 3, +}; + +struct trace_kprobe { + struct dyn_event devent; + struct kretprobe rp; + unsigned long __attribute__((btf_type_tag("percpu"))) * nhit; + const char *symbol; + struct trace_probe tp; +}; + +struct kretprobe_trace_entry_head { + struct trace_entry ent; + unsigned long func; + unsigned long ret_ip; +}; + +struct kprobe_trace_entry_head { + struct trace_entry ent; + unsigned long ip; +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +struct dynevent_arg { + const char *str; + char separator; +}; + +typedef int (*dynevent_check_arg_fn_t)(void *); + +struct sym_count_ctx { + unsigned int count; + const char *name; +}; + +typedef void (*btf_trace_error_report_end)(void *, enum error_detector, unsigned long); + +struct trace_event_raw_error_report_template { + struct trace_entry ent; + enum error_detector error_detector; + unsigned long id; + char __data[0]; +}; + +struct trace_event_data_offsets_error_report_template {}; + +typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_idle_miss)(void *, unsigned int, unsigned int, bool); + +typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); + +typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); + +typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); + +typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); + +typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); + +typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); + +typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_pm_qos_add_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, + enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, + enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, + enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_guest_halt_poll_ns)(void *, bool, unsigned int, unsigned int); + +struct trace_event_raw_cpu { + struct trace_entry ent; + u32 state; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_idle_miss { + struct trace_entry ent; + u32 cpu_id; + u32 state; + bool below; + char __data[0]; +}; + +struct trace_event_raw_powernv_throttle { + struct trace_entry ent; + int chip_id; + u32 __data_loc_reason; + int pmax; + char __data[0]; +}; + +struct trace_event_raw_pstate_sample { + struct trace_entry ent; + u32 core_busy; + u32 scaled_busy; + u32 from; + u32 to; + u64 mperf; + u64 aperf; + u64 tsc; + u32 freq; + u32 io_boost; + char __data[0]; +}; + +struct trace_event_raw_cpu_frequency_limits { + struct trace_entry ent; + u32 min_freq; + u32 max_freq; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_start { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u32 __data_loc_parent; + u32 __data_loc_pm_ops; + int event; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_end { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + int error; + char __data[0]; +}; + +struct trace_event_raw_suspend_resume { + struct trace_entry ent; + const char *action; + int val; + bool start; + char __data[0]; +}; + +struct trace_event_raw_wakeup_source { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + char __data[0]; +}; + +struct trace_event_raw_clock { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_power_domain { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_latency_qos_request { + struct trace_entry ent; + s32 value; + char __data[0]; +}; + +struct trace_event_raw_pm_qos_update { + struct trace_entry ent; + enum pm_qos_req_action action; + int prev_value; + int curr_value; + char __data[0]; +}; + +struct trace_event_raw_dev_pm_qos_request { + struct trace_entry ent; + u32 __data_loc_name; + enum dev_pm_qos_req_type type; + s32 new_value; + char __data[0]; +}; + +struct trace_event_raw_guest_halt_poll_ns { + struct trace_entry ent; + bool grow; + unsigned int new; + unsigned int old; + char __data[0]; +}; + +struct trace_event_data_offsets_powernv_throttle { + u32 reason; +}; + +struct trace_event_data_offsets_wakeup_source { + u32 name; +}; + +struct trace_event_data_offsets_clock { + u32 name; +}; + +struct trace_event_data_offsets_power_domain { + u32 name; +}; + +struct trace_event_data_offsets_dev_pm_qos_request { + u32 name; +}; + +struct trace_event_data_offsets_cpu {}; + +struct trace_event_data_offsets_cpu_idle_miss {}; + +struct trace_event_data_offsets_pstate_sample {}; + +struct trace_event_data_offsets_cpu_frequency_limits {}; + +struct trace_event_data_offsets_device_pm_callback_start { + u32 device; + u32 driver; + u32 parent; + u32 pm_ops; +}; + +struct trace_event_data_offsets_device_pm_callback_end { + u32 device; + u32 driver; +}; + +struct trace_event_data_offsets_suspend_resume {}; + +struct trace_event_data_offsets_cpu_latency_qos_request {}; + +struct trace_event_data_offsets_pm_qos_update {}; + +struct trace_event_data_offsets_guest_halt_poll_ns {}; + +typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_return_int)(void *, struct device *, unsigned long, int); + +struct trace_event_raw_rpm_internal { + struct trace_entry ent; + u32 __data_loc_name; + int flags; + int usage_count; + int disable_depth; + int runtime_auto; + int request_pending; + int irq_safe; + int child_count; + char __data[0]; +}; + +struct trace_event_raw_rpm_return_int { + struct trace_entry ent; + u32 __data_loc_name; + unsigned long ip; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_rpm_internal { + u32 name; +}; + +struct trace_event_data_offsets_rpm_return_int { + u32 name; +}; + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; + char separator; +}; + +struct trace_probe_log { + const char *subsystem; + const char **argv; + int argc; + int index; +}; + +enum { + BTF_KIND_UNKN = 0, + BTF_KIND_INT = 1, + BTF_KIND_PTR = 2, + BTF_KIND_ARRAY = 3, + BTF_KIND_STRUCT = 4, + BTF_KIND_UNION = 5, + BTF_KIND_ENUM = 6, + BTF_KIND_FWD = 7, + BTF_KIND_TYPEDEF = 8, + BTF_KIND_VOLATILE = 9, + BTF_KIND_CONST = 10, + BTF_KIND_RESTRICT = 11, + BTF_KIND_FUNC = 12, + BTF_KIND_FUNC_PROTO = 13, + BTF_KIND_VAR = 14, + BTF_KIND_DATASEC = 15, + BTF_KIND_FLOAT = 16, + BTF_KIND_DECL_TAG = 17, + BTF_KIND_TYPE_TAG = 18, + BTF_KIND_ENUM64 = 19, + NR_BTF_KINDS = 20, + BTF_KIND_MAX = 19, +}; + +struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; +}; + +struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; +}; + +struct btf_anon_stack { + u32 tid; + u32 offset; +}; + +struct uprobe_cpu_buffer { + struct mutex mutex; + void *buf; +}; + +struct trace_uprobe { + struct dyn_event devent; + struct uprobe_consumer consumer; + struct path path; + struct inode *inode; + char *filename; + unsigned long offset; + unsigned long ref_ctr_offset; + unsigned long nhit; + struct trace_probe tp; +}; + +struct uprobe_trace_entry_head { + struct trace_entry ent; + unsigned long vaddr[0]; +}; + +typedef bool (*filter_func_t)(struct uprobe_consumer *, enum uprobe_filter_ctx, + struct mm_struct *); + +struct fprobe_rethook_node { + struct rethook_node node; + unsigned long entry_ip; + unsigned long entry_parent_ip; + char data[0]; +}; + +typedef int (*objpool_init_obj_cb)(void *, void *); + +struct trace_fprobe { + struct dyn_event devent; + struct fprobe fp; + const char *symbol; + struct tracepoint *tpoint; + struct module *mod; + struct trace_probe tp; +}; + +struct fexit_trace_entry_head { + struct trace_entry ent; + unsigned long func; + unsigned long ret_ip; +}; + +struct fentry_trace_entry_head { + struct trace_entry ent; + unsigned long ip; +}; + +struct __find_tracepoint_cb_data { + const char *tp_name; + struct tracepoint *tpoint; +}; + +struct bpf_empty_prog_array { + struct bpf_prog_array hdr; + struct bpf_prog *null_prog; +}; + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize; + struct xdp_mem_info mem; + struct net_device *dev_rx; + u32 frame_sz; + u32 flags; +}; + +struct xdp_rxq_info; + +struct xdp_txq_info; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; + u32 flags; +}; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; + unsigned int napi_id; + u32 frag_size; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct xdp_txq_info { + struct net_device *dev; +}; + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + __u32 ingress_ifindex; + __u32 rx_queue_index; + __u32 egress_ifindex; +}; + +typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, + const struct bpf_prog *, u32); + +typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, + const struct bpf_prog *, const void *, int, + enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, + const struct bpf_prog *, const void *, int, + enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, + const struct bpf_prog *, const void *, int, + enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, + const struct bpf_prog *, const void *, int, + enum bpf_map_type, u32, u32); + +struct xdp_cpumap_stats; + +typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, + struct xdp_cpumap_stats *); + +struct xdp_cpumap_stats { + unsigned int redirect; + unsigned int pass; + unsigned int drop; +}; + +typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, + const struct net_device *, int, int, int); + +struct xdp_mem_allocator; + +typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + }; + struct rhash_head node; + struct callback_head rcu; +}; + +typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, + const struct xdp_rxq_info *); + +typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, + const struct page *); + +typedef void (*btf_trace_bpf_xdp_link_attach_failed)(void *, const char *); + +struct bpf_prog_dummy { + struct bpf_prog prog; +}; + +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS = 1, + CGROUP_INET_SOCK_CREATE = 2, + CGROUP_SOCK_OPS = 3, + CGROUP_DEVICE = 4, + CGROUP_INET4_BIND = 5, + CGROUP_INET6_BIND = 6, + CGROUP_INET4_CONNECT = 7, + CGROUP_INET6_CONNECT = 8, + CGROUP_UNIX_CONNECT = 9, + CGROUP_INET4_POST_BIND = 10, + CGROUP_INET6_POST_BIND = 11, + CGROUP_UDP4_SENDMSG = 12, + CGROUP_UDP6_SENDMSG = 13, + CGROUP_UNIX_SENDMSG = 14, + CGROUP_SYSCTL = 15, + CGROUP_UDP4_RECVMSG = 16, + CGROUP_UDP6_RECVMSG = 17, + CGROUP_UNIX_RECVMSG = 18, + CGROUP_GETSOCKOPT = 19, + CGROUP_SETSOCKOPT = 20, + CGROUP_INET4_GETPEERNAME = 21, + CGROUP_INET6_GETPEERNAME = 22, + CGROUP_UNIX_GETPEERNAME = 23, + CGROUP_INET4_GETSOCKNAME = 24, + CGROUP_INET6_GETSOCKNAME = 25, + CGROUP_UNIX_GETSOCKNAME = 26, + CGROUP_INET_SOCK_RELEASE = 27, + CGROUP_LSM_START = 28, + CGROUP_LSM_END = 27, + MAX_CGROUP_BPF_ATTACH_TYPE = 28, +}; + +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP = 1, + XDP_PASS = 2, + XDP_TX = 3, + XDP_REDIRECT = 4, +}; + +struct bpf_prog_pack { + struct list_head list; + void *ptr; + unsigned long bitmap[0]; +}; + +typedef u64 (*btf_bpf_user_rnd_u32)(); + +typedef u64 (*btf_bpf_get_raw_cpu_id)(); + +struct trace_event_raw_xdp_exception { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_xdp_bulk_tx { + struct trace_entry ent; + int ifindex; + u32 act; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct _bpf_dtab_netdev { + struct net_device *dev; +}; + +struct trace_event_raw_xdp_redirect_template { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + int err; + int to_ifindex; + u32 map_id; + int map_index; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_kthread { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int sched; + unsigned int xdp_pass; + unsigned int xdp_drop; + unsigned int xdp_redirect; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_enqueue { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int to_cpu; + char __data[0]; +}; + +struct trace_event_raw_xdp_devmap_xmit { + struct trace_entry ent; + int from_ifindex; + u32 act; + int to_ifindex; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_mem_disconnect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + char __data[0]; +}; + +struct trace_event_raw_mem_connect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + const struct xdp_rxq_info *rxq; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_mem_return_failed { + struct trace_entry ent; + const struct page *page; + u32 mem_id; + u32 mem_type; + char __data[0]; +}; + +struct trace_event_raw_bpf_xdp_link_attach_failed { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_xdp_link_attach_failed { + u32 msg; +}; + +struct trace_event_data_offsets_xdp_exception {}; + +struct trace_event_data_offsets_xdp_bulk_tx {}; + +struct trace_event_data_offsets_xdp_redirect_template {}; + +struct trace_event_data_offsets_xdp_cpumap_kthread {}; + +struct trace_event_data_offsets_xdp_cpumap_enqueue {}; + +struct trace_event_data_offsets_xdp_devmap_xmit {}; + +struct trace_event_data_offsets_mem_disconnect {}; + +struct trace_event_data_offsets_mem_connect {}; + +struct trace_event_data_offsets_mem_return_failed {}; + +struct bpf_mprog_cp { + struct bpf_link *link; +}; + +struct bpf_mprog_bundle { + struct bpf_mprog_entry a; + struct bpf_mprog_entry b; + struct bpf_mprog_cp cp_items[64]; + struct bpf_prog *ref; + atomic64_t revision; + u32 count; +}; + +enum { + BPF_F_NO_PREALLOC = 1, + BPF_F_NO_COMMON_LRU = 2, + BPF_F_NUMA_NODE = 4, + BPF_F_RDONLY = 8, + BPF_F_WRONLY = 16, + BPF_F_STACK_BUILD_ID = 32, + BPF_F_ZERO_SEED = 64, + BPF_F_RDONLY_PROG = 128, + BPF_F_WRONLY_PROG = 256, + BPF_F_CLONE = 512, + BPF_F_MMAPABLE = 1024, + BPF_F_PRESERVE_ELEMS = 2048, + BPF_F_INNER_MAP = 4096, + BPF_F_LINK = 8192, + BPF_F_PATH_FD = 16384, +}; + +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + +enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, +}; + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX = 3, +}; + +enum bpf_audit { + BPF_AUDIT_LOAD = 0, + BPF_AUDIT_UNLOAD = 1, + BPF_AUDIT_MAX = 2, +}; + +enum bpf_perf_event_type { + BPF_PERF_EVENT_UNSPEC = 0, + BPF_PERF_EVENT_UPROBE = 1, + BPF_PERF_EVENT_URETPROBE = 2, + BPF_PERF_EVENT_KPROBE = 3, + BPF_PERF_EVENT_KRETPROBE = 4, + BPF_PERF_EVENT_TRACEPOINT = 5, + BPF_PERF_EVENT_EVENT = 6, +}; + +enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +}; + +typedef u64 (*btf_bpf_sys_bpf)(int, union bpf_attr *, u32); + +typedef u64 (*btf_bpf_sys_close)(u32); + +typedef u64 (*btf_bpf_kallsyms_lookup_name)(const char *, int, int, u64 *); + +struct bpf_tracing_link { + struct bpf_tramp_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; +}; + +struct bpf_perf_link { + struct bpf_link link; + struct file *perf_file; +}; + +struct bpf_spin_lock { + __u32 val; +}; + +struct bpf_prog_kstats { + u64 nsecs; + u64 cnt; + u64 misses; +}; + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 jited_prog_insns; + __u64 xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 map_ids; + char name[16]; + __u32 ifindex; + __u32 gpl_compatible : 1; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 jited_ksyms; + __u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 line_info; + __u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; + __u64 recursion_misses; + __u32 verified_insns; + __u32 attach_btf_obj_id; + __u32 attach_btf_id; +}; + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u64 map_extra; +}; + +struct bpf_btf_info { + __u64 btf; + __u32 btf_size; + __u32 id; + __u64 name; + __u32 name_len; + __u32 kernel_btf; +}; + +struct bpf_attach_target_info { + struct btf_func_model fmodel; + long tgt_addr; + struct module *tgt_mod; + const char *tgt_name; + const struct btf_type *tgt_type; +}; + +struct btf_kfunc_hook_filter { + btf_kfunc_filter_t filters[16]; + u32 nr_filters; +}; + +struct btf_kfunc_set_tab { + struct btf_id_set8 *sets[13]; + struct btf_kfunc_hook_filter hook_filters[13]; +}; + +struct lwtunnel_state { + __u16 type; + __u16 flags; + __u16 headroom; + atomic_t refcnt; + int (*orig_output)(struct net *, struct sock *, struct sk_buff *); + int (*orig_input)(struct sk_buff *); + struct callback_head rcu; + __u8 data[0]; +}; + +struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +}; + +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[15]; + struct nd_opt_hdr *nd_opts_ri; + struct nd_opt_hdr *nd_opts_ri_end; + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; +}; + +struct prefix_info { + __u8 type; + __u8 length; + __u8 prefix_len; + union { + __u8 flags; + struct { + __u8 reserved : 6; + __u8 autoconf : 1; + __u8 onlink : 1; + }; + }; + __be32 valid; + __be32 prefered; + __be32 reserved2; + struct in6_addr prefix; +}; + +struct inet_ehash_bucket; + +struct inet_bind_hashbucket; + +struct inet_listen_hashbucket; + +struct inet_hashinfo { + struct inet_ehash_bucket *ehash; + spinlock_t *ehash_locks; + unsigned int ehash_mask; + unsigned int ehash_locks_mask; + struct kmem_cache *bind_bucket_cachep; + struct inet_bind_hashbucket *bhash; + struct kmem_cache *bind2_bucket_cachep; + struct inet_bind_hashbucket *bhash2; + unsigned int bhash_size; + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; + bool pernet; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct inet_ehash_bucket { + struct hlist_nulls_head chain; +}; + +struct inet_bind_hashbucket { + spinlock_t lock; + struct hlist_head chain; +}; + +struct inet_listen_hashbucket { + spinlock_t lock; + struct hlist_nulls_head nulls_head; +}; + +struct inet_peer_base { + struct rb_root rb_root; + seqlock_t lock; + int total; +}; + +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; + u32 in_flight; +}; + +struct rate_sample { + u64 prior_mstamp; + u32 prior_lost; + u32 prior_delivered; + u32 prior_delivered_ce; + u32 tx_in_flight; + s32 lost; + s32 delivered; + s32 delivered_ce; + long interval_us; + u32 snd_interval_us; + u32 rcv_interval_us; + long rtt_us; + int losses; + u32 acked_sacked; + u32 prior_in_flight; + u32 last_end_seq; + bool is_app_limited; + bool is_retrans; + bool is_acking_tlp_retrans_seq; + bool is_ack_delayed; + bool is_ece; +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *stream_parser; + struct bpf_prog *stream_verdict; + struct bpf_prog *skb_verdict; +}; + +struct strp_stats { + unsigned long long msgs; + unsigned long long bytes; + unsigned int mem_fail; + unsigned int need_more_hdr; + unsigned int msg_too_big; + unsigned int msg_timeouts; + unsigned int bad_hdr_len; +}; + +struct strparser; + +struct strp_callbacks { + int (*parse_msg)(struct strparser *, struct sk_buff *); + void (*rcv_msg)(struct strparser *, struct sk_buff *); + int (*read_sock_done)(struct strparser *, int); + void (*abort_parser)(struct strparser *, int); + void (*lock)(struct strparser *); + void (*unlock)(struct strparser *); +}; + +struct strparser { + struct sock *sk; + u32 stopped : 1; + u32 paused : 1; + u32 aborted : 1; + u32 interrupted : 1; + u32 unrecov_intr : 1; + struct sk_buff **skb_nextp; + struct sk_buff *skb_head; + unsigned int need_bytes; + struct delayed_work msg_timer_work; + struct work_struct work; + struct strp_stats stats; + struct strp_callbacks cb; +}; + +struct sk_psock_work_state { + u32 len; + u32 off; +}; + +struct sk_msg; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + bool redir_ingress; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct strparser strp; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + spinlock_t ingress_lock; + unsigned long state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *); + void (*saved_destroy)(struct sock *); + void (*saved_close)(struct sock *, long); + void (*saved_write_space)(struct sock *); + void (*saved_data_ready)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + struct proto *sk_proto; + struct mutex work_mutex; + struct sk_psock_work_state work_state; + struct delayed_work work; + struct sock *sk_pair; + struct rcu_work rwork; +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + unsigned long copy[1]; + struct scatterlist data[19]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +struct btf_id_dtor_kfunc { + u32 btf_id; + u32 kfunc_btf_id; +}; + +struct btf_id_dtor_kfunc_tab { + u32 cnt; + struct btf_id_dtor_kfunc dtors[0]; +}; + +struct btf_struct_metas { + u32 cnt; + struct btf_struct_meta types[0]; +}; + +struct bpf_verifier_stack_elem { + struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; + struct bpf_verifier_stack_elem *next; + u32 log_pos; +}; + +struct bpf_kfunc_desc { + struct btf_func_model func_model; + u32 func_id; + s32 imm; + u16 offset; + unsigned long addr; +}; + +struct bpf_kfunc_desc_tab { + struct bpf_kfunc_desc descs[256]; + u32 nr_descs; +}; + +struct bpf_kfunc_btf { + struct btf *btf; + struct module *module; + u16 offset; +}; + +struct bpf_kfunc_btf_tab { + struct bpf_kfunc_btf descs[256]; + u32 nr_descs; +}; + +struct bpf_reg_types { + const enum bpf_reg_type types[10]; + u32 *btf_id; +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + +enum special_kfunc_type { + KF_bpf_obj_new_impl = 0, + KF_bpf_obj_drop_impl = 1, + KF_bpf_refcount_acquire_impl = 2, + KF_bpf_list_push_front_impl = 3, + KF_bpf_list_push_back_impl = 4, + KF_bpf_list_pop_front = 5, + KF_bpf_list_pop_back = 6, + KF_bpf_cast_to_kern_ctx = 7, + KF_bpf_rdonly_cast = 8, + KF_bpf_rcu_read_lock = 9, + KF_bpf_rcu_read_unlock = 10, + KF_bpf_rbtree_remove = 11, + KF_bpf_rbtree_add_impl = 12, + KF_bpf_rbtree_first = 13, + KF_bpf_dynptr_from_skb = 14, + KF_bpf_dynptr_from_xdp = 15, + KF_bpf_dynptr_slice = 16, + KF_bpf_dynptr_slice_rdwr = 17, + KF_bpf_dynptr_clone = 18, + KF_bpf_percpu_obj_new_impl = 19, + KF_bpf_percpu_obj_drop_impl = 20, + KF_bpf_throw = 21, + KF_bpf_iter_css_task_new = 22, +}; + +enum bpf_stack_slot_type { + STACK_INVALID = 0, + STACK_SPILL = 1, + STACK_MISC = 2, + STACK_ZERO = 3, + STACK_DYNPTR = 4, + STACK_ITER = 5, +}; + +enum bpf_type_flag { + PTR_MAYBE_NULL = 256, + MEM_RDONLY = 512, + MEM_RINGBUF = 1024, + MEM_USER = 2048, + MEM_PERCPU = 4096, + OBJ_RELEASE = 8192, + PTR_UNTRUSTED = 16384, + MEM_UNINIT = 32768, + DYNPTR_TYPE_LOCAL = 65536, + DYNPTR_TYPE_RINGBUF = 131072, + MEM_FIXED_SIZE = 262144, + MEM_ALLOC = 524288, + PTR_TRUSTED = 1048576, + MEM_RCU = 2097152, + NON_OWN_REF = 4194304, + DYNPTR_TYPE_SKB = 8388608, + DYNPTR_TYPE_XDP = 16777216, + __BPF_TYPE_FLAG_MAX = 16777217, + __BPF_TYPE_LAST_FLAG = 16777216, +}; + +enum bpf_access_src { + ACCESS_DIRECT = 1, + ACCESS_HELPER = 2, +}; + +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, + BPF_CORE_FIELD_BYTE_SIZE = 1, + BPF_CORE_FIELD_EXISTS = 2, + BPF_CORE_FIELD_SIGNED = 3, + BPF_CORE_FIELD_LSHIFT_U64 = 4, + BPF_CORE_FIELD_RSHIFT_U64 = 5, + BPF_CORE_TYPE_ID_LOCAL = 6, + BPF_CORE_TYPE_ID_TARGET = 7, + BPF_CORE_TYPE_EXISTS = 8, + BPF_CORE_TYPE_SIZE = 9, + BPF_CORE_ENUMVAL_EXISTS = 10, + BPF_CORE_ENUMVAL_VALUE = 11, + BPF_CORE_TYPE_MATCHES = 12, +}; + +enum { + DISCOVERED = 16, + EXPLORED = 32, + FALLTHROUGH = 1, + BRANCH = 2, +}; + +enum { + DONE_EXPLORING = 0, + KEEP_EXPLORING = 1, +}; + +enum reg_arg_type { + SRC_OP = 0, + DST_OP = 1, + DST_OP_NO_MARK = 2, +}; + +enum { + REASON_BOUNDS = -1, + REASON_TYPE = -2, + REASON_PATHS = -3, + REASON_LIMIT = -4, + REASON_STACK = -5, +}; + +enum kfunc_ptr_arg_type { + KF_ARG_PTR_TO_CTX = 0, + KF_ARG_PTR_TO_ALLOC_BTF_ID = 1, + KF_ARG_PTR_TO_REFCOUNTED_KPTR = 2, + KF_ARG_PTR_TO_DYNPTR = 3, + KF_ARG_PTR_TO_ITER = 4, + KF_ARG_PTR_TO_LIST_HEAD = 5, + KF_ARG_PTR_TO_LIST_NODE = 6, + KF_ARG_PTR_TO_BTF_ID = 7, + KF_ARG_PTR_TO_MEM = 8, + KF_ARG_PTR_TO_MEM_SIZE = 9, + KF_ARG_PTR_TO_CALLBACK = 10, + KF_ARG_PTR_TO_RB_ROOT = 11, + KF_ARG_PTR_TO_RB_NODE = 12, + KF_ARG_PTR_TO_NULL = 13, +}; + +enum { + KF_ARG_DYNPTR_ID = 0, + KF_ARG_LIST_HEAD_ID = 1, + KF_ARG_LIST_NODE_ID = 2, + KF_ARG_RB_ROOT_ID = 3, + KF_ARG_RB_NODE_ID = 4, +}; + +enum { + BTF_TRACING_TYPE_TASK = 0, + BTF_TRACING_TYPE_FILE = 1, + BTF_TRACING_TYPE_VMA = 2, + MAX_BTF_TRACING_TYPE = 3, +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS = 1, +}; + +enum { + AT_PKT_END = -1, + BEYOND_PKT_END = -2, +}; + +enum { + BPF_MAX_LOOPS = 8388608, +}; + +struct bpf_iter_meta__safe_trusted { + struct seq_file *seq; +}; + +struct bpf_iter__task__safe_trusted { + struct bpf_iter_meta *meta; + struct task_struct *task; +}; + +struct linux_binprm__safe_trusted { + struct file *file; +}; + +struct file__safe_trusted { + struct inode *f_inode; +}; + +struct dentry__safe_trusted { + struct inode *d_inode; +}; + +struct socket__safe_trusted { + struct sock *sk; +}; + +struct task_struct__safe_rcu { + const cpumask_t *cpus_ptr; + struct css_set __attribute__((btf_type_tag("rcu"))) * cgroups; + struct task_struct __attribute__((btf_type_tag("rcu"))) * real_parent; + struct task_struct *group_leader; +}; + +struct cgroup__safe_rcu { + struct kernfs_node *kn; +}; + +struct css_set__safe_rcu { + struct cgroup *dfl_cgrp; +}; + +struct mm_struct__safe_rcu_or_null { + struct file __attribute__((btf_type_tag("rcu"))) * exe_file; +}; + +struct sk_buff__safe_rcu_or_null { + struct sock *sk; +}; + +struct request_sock__safe_rcu_or_null { + struct sock *sk; +}; + +struct btf_var_secinfo { + __u32 type; + __u32 offset; + __u32 size; +}; + +struct bpf_iter; + +typedef void (*bpf_insn_print_t)(void *, const char *, ...); + +typedef const char *(*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); + +typedef const char *(*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, + struct bpf_insn *, struct bpf_prog *, u32 *); + +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + +struct bpf_core_ctx { + struct bpf_verifier_log *log; + const struct btf *btf; +}; + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *); + int (*check_member)(const struct btf_type *, const struct btf_member *, + const struct bpf_prog *); + int (*init_member)(const struct btf_type *, const struct btf_member *, void *, + const void *); + int (*reg)(void *); + void (*unreg)(void *); + int (*update)(void *, void *); + int (*validate)(void *); + const struct btf_type *type; + const struct btf_type *value_type; + const char *name; + struct btf_func_model func_models[64]; + u32 type_id; + u32 value_id; +}; + +struct bpf_kfunc_call_arg_meta { + struct btf *btf; + u32 func_id; + u32 kfunc_flags; + const struct btf_type *func_proto; + const char *func_name; + u32 ref_obj_id; + u8 release_regno; + bool r0_rdonly; + u32 ret_btf_id; + u64 r0_size; + u32 subprogno; + struct { + u64 value; + bool found; + } arg_constant; + struct btf *arg_btf; + u32 arg_btf_id; + bool arg_owning_ref; + struct { + struct btf_field *field; + } arg_list_head; + struct { + struct btf_field *field; + } arg_rbtree_root; + struct { + enum bpf_dynptr_type type; + u32 id; + u32 ref_obj_id; + } initialized_dynptr; + struct { + u8 spi; + u8 frameno; + } iter; + u64 mem_size; +}; + +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; + bool raw_mode; + bool pkt_access; + u8 release_regno; + int regno; + int access_size; + int mem_size; + u64 msize_max_value; + int ref_obj_id; + int dynptr_id; + int map_uid; + int func_id; + struct btf *btf; + u32 btf_id; + struct btf *ret_btf; + u32 ret_btf_id; + u32 subprogno; + struct btf_field *kptr_field; +}; + +struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; + bool mask_to_left; +}; + +typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, + struct bpf_func_state *, int); + +struct bpf_preload_info; + +struct bpf_preload_ops { + int (*preload)(struct bpf_preload_info *); + struct module *owner; +}; + +struct bpf_preload_info { + char link_name[16]; + struct bpf_link *link; +}; + +struct tree_descr { + const char *name; + const struct file_operations *ops; + int mode; +}; + +enum bpf_type { + BPF_TYPE_UNSPEC = 0, + BPF_TYPE_PROG = 1, + BPF_TYPE_MAP = 2, + BPF_TYPE_LINK = 3, +}; + +enum { + OPT_MODE = 0, +}; + +struct map_iter { + void *key; + bool done; +}; + +struct bpf_mount_opts { + umode_t mode; +}; + +struct bpf_hrtimer { + struct hrtimer timer; + struct bpf_map *map; + struct bpf_prog *prog; + void __attribute__((btf_type_tag("rcu"))) * callback_fn; + void *value; +}; + +struct bpf_bprintf_buffers { + char bin_args[512]; + char buf[1024]; +}; + +enum { + BPF_F_TIMER_ABS = 1, + BPF_F_TIMER_CPU_PIN = 2, +}; + +typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + +typedef u64 (*btf_bpf_get_smp_processor_id)(); + +typedef u64 (*btf_bpf_get_numa_node_id)(); + +typedef u64 (*btf_bpf_ktime_get_ns)(); + +typedef u64 (*btf_bpf_ktime_get_boot_ns)(); + +typedef u64 (*btf_bpf_ktime_get_coarse_ns)(); + +typedef u64 (*btf_bpf_ktime_get_tai_ns)(); + +typedef u64 (*btf_bpf_get_current_pid_tgid)(); + +typedef u64 (*btf_bpf_get_current_uid_gid)(); + +typedef u64 (*btf_bpf_get_current_comm)(char *, u32); + +typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_jiffies64)(); + +typedef u64 (*btf_bpf_get_current_cgroup_id)(); + +typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); + +typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, long *); + +typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, unsigned long *); + +typedef u64 (*btf_bpf_strncmp)(const char *, u32, const char *); + +struct bpf_pidns_info; + +typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_copy_from_user)(void *, u32, + const void __attribute__((btf_type_tag("user"))) *); + +typedef u64 (*btf_bpf_copy_from_user_task)(void *, u32, + const void __attribute__((btf_type_tag("use" + "r"))) *, + struct task_struct *, u64); + +typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); + +typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); + +typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); + +struct bpf_timer_kern; + +typedef u64 (*btf_bpf_timer_init)(struct bpf_timer_kern *, struct bpf_map *, u64); + +struct bpf_timer_kern { + struct bpf_hrtimer *timer; + struct bpf_spin_lock lock; +}; + +typedef u64 (*btf_bpf_timer_set_callback)(struct bpf_timer_kern *, void *, + struct bpf_prog_aux *); + +typedef u64 (*btf_bpf_timer_start)(struct bpf_timer_kern *, u64, u64); + +typedef u64 (*btf_bpf_timer_cancel)(struct bpf_timer_kern *); + +typedef u64 (*btf_bpf_kptr_xchg)(void *, void *); + +typedef u64 (*btf_bpf_dynptr_from_mem)(void *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_dynptr_read)(void *, u32, const struct bpf_dynptr_kern *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_write)(const struct bpf_dynptr_kern *, u32, void *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_data)(const struct bpf_dynptr_kern *, u32, u32); + +struct bpf_refcount { + int : 32; +}; + +struct bpf_rb_node_kern { + struct rb_node rb_node; + void *owner; +}; + +struct bpf_rb_node { + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_timer { + long : 64; + long : 64; +}; + +struct bpf_dynptr { + long : 64; + long : 64; +}; + +struct bpf_list_node_kern { + struct list_head list_head; + void *owner; +}; + +struct bpf_list_node { + long : 64; + long : 64; + long : 64; +}; + +typedef __kernel_ulong_t ino_t; + +struct bpf_list_head { + long : 64; + long : 64; +}; + +struct bpf_rb_root { + long : 64; + long : 64; +}; + +struct bpf_throw_ctx { + struct bpf_prog_aux *aux; + u64 sp; + u64 bp; + int cnt; +}; + +enum bpf_iter_feature { + BPF_ITER_RESCHED = 1, +}; + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + u64 session_id; + u64 seq_num; + bool done_stop; + long : 0; + u8 target_private[0]; +}; + +typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_loop)(u32, void *, void *, u64); + +struct bpf_iter_num { + __u64 __opaque[1]; +}; + +struct bpf_iter_num_kern { + int cur; + int end; +}; + +struct bpf_iter__bpf_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; +}; + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +struct mmap_unlock_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +enum { + BPF_TASK_ITER_ALL_PROCS = 0, + BPF_TASK_ITER_ALL_THREADS = 1, + BPF_TASK_ITER_PROC_THREADS = 2, +}; + +enum bpf_task_vma_iter_find_op { + task_vma_iter_first_vma = 0, + task_vma_iter_next_vma = 1, + task_vma_iter_find_vma = 2, +}; + +typedef u64 (*btf_bpf_find_vma)(struct task_struct *, u64, bpf_callback_t, void *, u64); + +struct bpf_iter__task { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; +}; + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; + enum bpf_iter_task_type type; + u32 pid; + u32 pid_visiting; +}; + +struct bpf_iter__task_file { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + u32 fd; + union { + struct file *file; + }; +}; + +struct bpf_iter_seq_task_file_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + u32 tid; + u32 fd; +}; + +struct bpf_iter__task_vma { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + union { + struct vm_area_struct *vma; + }; +}; + +struct bpf_iter_seq_task_vma_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + u32 tid; + unsigned long prev_vm_start; + unsigned long prev_vm_end; +}; + +struct bpf_iter_task_vma { + __u64 __opaque[1]; +}; + +struct bpf_iter_task_vma_kern_data; + +struct bpf_iter_task_vma_kern { + struct bpf_iter_task_vma_kern_data *data; +}; + +struct bpf_iter_task_vma_kern_data { + struct task_struct *task; + struct mm_struct *mm; + struct mmap_unlock_irq_work *work; + struct vma_iterator vmi; +}; + +struct bpf_iter_css_task { + __u64 __opaque[1]; +}; + +struct bpf_iter_css_task_kern { + struct css_task_iter *css_it; +}; + +struct bpf_iter_task { + __u64 __opaque[3]; +}; + +struct bpf_iter_task_kern { + struct task_struct *task; + struct task_struct *pos; + unsigned int flags; +}; + +struct bpf_iter_seq_task_info { + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +struct bpf_iter__bpf_prog { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_prog *prog; + }; +}; + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +struct bpf_iter__bpf_link { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_link *link; + }; +}; + +struct bpf_iter_seq_link_info { + u32 link_id; +}; + +struct pcpu_freelist_node; + +struct pcpu_freelist_head { + struct pcpu_freelist_node *first; + raw_spinlock_t lock; +}; + +struct pcpu_freelist { + struct pcpu_freelist_head __attribute__((btf_type_tag("percpu"))) * freelist; + struct pcpu_freelist_head extralist; +}; + +struct bpf_lru_list { + struct list_head lists[3]; + unsigned int counts[2]; + struct list_head *next_inactive_rotation; + raw_spinlock_t lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_lru_locallist; + +struct bpf_common_lru { + struct bpf_lru_list lru_list; + struct bpf_lru_locallist __attribute__((btf_type_tag("percpu"))) * local_list; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_lru_node; + +typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); + +struct bpf_lru { + union { + struct bpf_common_lru common_lru; + struct bpf_lru_list __attribute__((btf_type_tag("percpu"))) * percpu_lru; + }; + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; + unsigned int nr_scans; + bool percpu; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bucket; + +struct htab_elem; + +struct bpf_htab { + struct bpf_map map; + struct bpf_mem_alloc ma; + struct bpf_mem_alloc pcpu_ma; + struct bucket *buckets; + void *elems; + union { + struct pcpu_freelist freelist; + struct bpf_lru lru; + }; + struct htab_elem *__attribute__((btf_type_tag("percpu"))) * extra_elems; + struct percpu_counter pcount; + atomic_t count; + bool use_percpu_counter; + u32 n_buckets; + u32 elem_size; + u32 hashrnd; + struct lock_class_key lockdep_key; + int __attribute__((btf_type_tag("percpu"))) * map_locked[8]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bucket { + struct hlist_nulls_head head; + raw_spinlock_t raw_lock; +}; + +struct pcpu_freelist_node { + struct pcpu_freelist_node *next; +}; + +struct bpf_lru_locallist { + struct list_head lists[2]; + u16 next_steal; + raw_spinlock_t lock; +}; + +struct bpf_lru_node { + struct list_head list; + u16 cpu; + u8 type; + u8 ref; +}; + +struct htab_elem { + union { + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct pcpu_freelist_node fnode; + struct htab_elem *batch_flink; + }; + }; + }; + union { + void *ptr_to_pptr; + struct bpf_lru_node lru_node; + }; + u32 hash; + long : 0; + char key[0]; +}; + +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; + u32 bucket_id; + u32 skip_elems; +}; + +struct bpf_iter__bpf_map_elem { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + void *value; + }; +}; + +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +enum bpf_lru_list_type { + BPF_LRU_LIST_T_ACTIVE = 0, + BPF_LRU_LIST_T_INACTIVE = 1, + BPF_LRU_LIST_T_FREE = 2, + BPF_LRU_LOCAL_LIST_T_FREE = 3, + BPF_LRU_LOCAL_LIST_T_PENDING = 4, +}; + +struct lpm_trie_node; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node __attribute__((btf_type_tag("rcu"))) * root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + spinlock_t lock; + long : 64; + long : 64; + long : 64; +}; + +struct lpm_trie_node { + struct callback_head rcu; + struct lpm_trie_node __attribute__((btf_type_tag("rcu"))) * child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct bpf_lpm_trie_key { + __u32 prefixlen; + __u8 data[0]; +}; + +struct bpf_bloom_filter { + struct bpf_map map; + u32 bitset_mask; + u32 hash_seed; + u32 nr_hash_funcs; + unsigned long bitset[0]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_cgroup_storage_map { + struct bpf_map map; + spinlock_t lock; + struct rb_root root; + struct list_head list; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED = 0, + BPF_CGROUP_STORAGE_PERCPU = 1, + __BPF_CGROUP_STORAGE_MAX = 2, +}; + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head; + u32 tail; + u32 size; + char elements[0]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +enum { + BPF_RINGBUF_BUSY_BIT = 2147483648, + BPF_RINGBUF_DISCARD_BIT = 1073741824, + BPF_RINGBUF_HDR_SZ = 8, +}; + +enum { + BPF_RB_NO_WAKEUP = 1, + BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_ringbuf_reserve_dynptr)(struct bpf_map *, u32, u64, + struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_ringbuf_submit_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_user_ringbuf_drain)(struct bpf_map *, void *, void *, u64); + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + u64 mask; + struct page **pages; + int nr_pages; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + spinlock_t spinlock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + atomic_t busy; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + unsigned long consumer_pos; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + unsigned long producer_pos; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + char data[0]; +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_ringbuf *rb; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +struct bpf_local_storage_elem { + struct hlist_node map_node; + struct hlist_node snode; + struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) * local_storage; + struct callback_head rcu; + long : 64; + struct bpf_local_storage_data sdata; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + u64 idx_usage_counts[16]; +}; + +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = 1, + BPF_SK_STORAGE_GET_F_CREATE = 1, +}; + +typedef u64 (*btf_bpf_task_storage_get_recur)(struct bpf_map *, struct task_struct *, + void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, + u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_delete_recur)(struct bpf_map *, struct task_struct *); + +typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); + +struct bpf_tuple { + struct bpf_prog *prog; + struct bpf_link *link; +}; + +enum { + BPF_MAX_TRAMP_LINKS = 38, +}; + +struct bpf_flow_keys; + +struct bpf_sock; + +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 data_meta; + union { + struct bpf_flow_keys *flow_keys; + }; + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { + struct bpf_sock *sk; + }; + __u32 gso_size; + __u8 tstamp_type; + __u64 hwtstamp; +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __be16 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + __u32 user_ip6[4]; + __u32 user_port; + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + __u32 msg_src_ip6[4]; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + u64 tmp_reg; + void *t_ctx; + u32 uaddrlen; +}; + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { + struct bpf_sock *sk; + }; + union { + void *skb_data; + }; + union { + void *skb_data_end; + }; + __u32 skb_len; + __u32 skb_tcp_flags; + __u64 skb_hwtstamp; +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; + u64 temp; +}; + +struct sk_msg_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + const void *data; + const void *data_end; +}; + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; + +struct bpf_cgroup_dev_ctx { + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_sysctl { + __u32 write; + __u32 file_pos; +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + u64 tmp_reg; +}; + +struct bpf_sockopt { + union { + struct bpf_sock *sk; + }; + union { + void *optval; + }; + union { + void *optval_end; + }; + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + struct task_struct *current_task; + u64 tmp_reg; +}; + +struct sk_reuseport_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 len; + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; + union { + struct bpf_sock *sk; + }; + union { + struct bpf_sock *migrating_sk; + }; +}; + +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + struct sock *migrating_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +struct bpf_sk_lookup { + union { + union { + struct bpf_sock *sk; + }; + __u64 cookie; + }; + __u32 family; + __u32 protocol; + __u32 remote_ip4; + __u32 remote_ip6[4]; + __be16 remote_port; + __u32 local_ip4; + __u32 local_ip6[4]; + __u32 local_port; + __u32 ingress_ifindex; +}; + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + __be16 sport; + u16 dport; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + struct sock *selected_sk; + u32 ingress_ifindex; + bool no_reuseport; +}; + +struct bpf_nf_ctx { + const struct nf_hook_state *state; + struct sk_buff *skb; +}; + +struct bpf_ctx_convert { + struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; + struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; + struct xdp_md BPF_PROG_TYPE_XDP_prog; + struct xdp_buff BPF_PROG_TYPE_XDP_kern; + struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; + struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; + struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; + struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; + struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; + struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; + struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; + struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; + struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; + struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; + struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; + struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; + struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; + struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; + struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; + struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; + bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; + struct pt_regs BPF_PROG_TYPE_KPROBE_kern; + __u64 BPF_PROG_TYPE_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_TRACEPOINT_kern; + struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; + struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; + void *BPF_PROG_TYPE_TRACING_prog; + void *BPF_PROG_TYPE_TRACING_kern; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; + struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; + struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; + struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; + struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; + struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; + struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; + struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; + struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; + void *BPF_PROG_TYPE_STRUCT_OPS_prog; + void *BPF_PROG_TYPE_STRUCT_OPS_kern; + void *BPF_PROG_TYPE_EXT_prog; + void *BPF_PROG_TYPE_EXT_kern; + void *BPF_PROG_TYPE_SYSCALL_prog; + void *BPF_PROG_TYPE_SYSCALL_kern; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_prog; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_kern; +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct nf_hook_state { + u8 hook; + u8 pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +struct btf_verifier_env; + +struct resolve_vertex; + +struct btf_show; + +struct btf_kind_operations { + s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); + int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); + int (*check_member)(struct btf_verifier_env *, const struct btf_type *, + const struct btf_member *, const struct btf_type *); + int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, + const struct btf_member *, const struct btf_type *); + void (*log_details)(struct btf_verifier_env *, const struct btf_type *); + void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, + struct btf_show *); +}; + +struct resolve_vertex { + const struct btf_type *t; + u32 type_id; + u16 next_member; +}; + +enum verifier_phase { + CHECK_META = 0, + CHECK_TYPE = 1, +}; + +enum resolve_mode { + RESOLVE_TBD = 0, + RESOLVE_PTR = 1, + RESOLVE_STRUCT_OR_ARRAY = 2, +}; + +struct btf_verifier_env { + struct btf *btf; + u8 *visit_states; + struct resolve_vertex stack[32]; + struct bpf_verifier_log log; + u32 log_type_id; + u32 top_stack; + enum verifier_phase phase; + enum resolve_mode resolve_mode; +}; + +struct btf_show { + u64 flags; + void *target; + void (*showfn)(struct btf_show *, const char *, struct __va_list_tag *); + const struct btf *btf; + struct { + u8 depth; + u8 depth_to_show; + u8 depth_check; + u8 array_member : 1; + u8 array_terminated : 1; + u16 array_encoding; + u32 type_id; + int status; + const struct btf_type *type; + const struct btf_member *member; + char name[80]; + } state; + struct { + u32 size; + void *head; + void *data; + u8 safe[32]; + } obj; +}; + +struct bpf_cand_cache { + const char *name; + u32 name_len; + u16 kind; + u16 cnt; + struct { + const struct btf *btf; + u32 id; + } cands[0]; +}; + +enum bpf_struct_walk_result { + WALK_SCALAR = 0, + WALK_PTR = 1, + WALK_STRUCT = 2, +}; + +enum { + BTF_MODULE_F_LIVE = 1, +}; + +enum btf_kfunc_hook { + BTF_KFUNC_HOOK_COMMON = 0, + BTF_KFUNC_HOOK_XDP = 1, + BTF_KFUNC_HOOK_TC = 2, + BTF_KFUNC_HOOK_STRUCT_OPS = 3, + BTF_KFUNC_HOOK_TRACING = 4, + BTF_KFUNC_HOOK_SYSCALL = 5, + BTF_KFUNC_HOOK_FMODRET = 6, + BTF_KFUNC_HOOK_CGROUP_SKB = 7, + BTF_KFUNC_HOOK_SCHED_ACT = 8, + BTF_KFUNC_HOOK_SK_SKB = 9, + BTF_KFUNC_HOOK_SOCKET_FILTER = 10, + BTF_KFUNC_HOOK_LWT = 11, + BTF_KFUNC_HOOK_NETFILTER = 12, + BTF_KFUNC_HOOK_MAX = 13, +}; + +enum { + BTF_KFUNC_SET_MAX_CNT = 256, + BTF_DTOR_KFUNC_MAX_CNT = 256, + BTF_KFUNC_FILTER_MAX_CNT = 16, +}; + +enum { + BTF_FIELD_IGNORE = 0, + BTF_FIELD_FOUND = 1, +}; + +enum visit_state { + NOT_VISITED = 0, + VISITED = 1, + RESOLVED = 2, +}; + +enum { + BTF_VAR_STATIC = 0, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +struct btf_module { + struct list_head list; + struct module *module; + struct btf *btf; + struct bin_attribute *sysfs_attr; + int flags; +}; + +typedef u64 (*btf_bpf_btf_find_by_name_kind)(char *, int, u32, int); + +struct btf_decl_tag { + __s32 component_idx; +}; + +struct btf_sec_info { + u32 off; + u32 len; +}; + +struct btf_enum { + __u32 name_off; + __s32 val; +}; + +struct btf_var { + __u32 linkage; +}; + +struct btf_enum64 { + __u32 name_off; + __u32 val_lo32; + __u32 val_hi32; +}; + +struct btf_show_snprintf { + struct btf_show show; + int len_left; + int len; +}; + +struct btf_field_info { + enum btf_field_type type; + u32 off; + union { + struct { + u32 type_id; + } kptr; + struct { + const char *node_name; + u32 value_btf_id; + } graph_root; + }; +}; + +struct bpf_core_cand; + +struct bpf_core_cand_list { + struct bpf_core_cand *cands; + int len; +}; + +struct bpf_core_cand { + const struct btf *btf; + __u32 id; +}; + +struct bpf_core_accessor { + __u32 type_id; + __u32 idx; + const char *name; +}; + +struct bpf_core_spec { + const struct btf *btf; + struct bpf_core_accessor spec[64]; + __u32 root_type_id; + enum bpf_core_relo_kind relo_kind; + int len; + int raw_spec[64]; + int raw_len; + __u32 bit_offset; +}; + +struct bpf_core_relo_res { + __u64 orig_val; + __u64 new_val; + bool poison; + bool validate; + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; +}; + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + void *rw_image; + u32 image_off; + struct bpf_ksym ksym; + struct static_call_key *sc_key; + void *sc_tramp; +}; + +enum net_device_flags { + IFF_UP = 1, + IFF_BROADCAST = 2, + IFF_DEBUG = 4, + IFF_LOOPBACK = 8, + IFF_POINTOPOINT = 16, + IFF_NOTRAILERS = 32, + IFF_RUNNING = 64, + IFF_NOARP = 128, + IFF_PROMISC = 256, + IFF_ALLMULTI = 512, + IFF_MASTER = 1024, + IFF_SLAVE = 2048, + IFF_MULTICAST = 4096, + IFF_PORTSEL = 8192, + IFF_AUTOMEDIA = 16384, + IFF_DYNAMIC = 32768, + IFF_LOWER_UP = 65536, + IFF_DORMANT = 131072, + IFF_ECHO = 262144, +}; + +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1ULL, + IFF_EBRIDGE = 2ULL, + IFF_BONDING = 4ULL, + IFF_ISATAP = 8ULL, + IFF_WAN_HDLC = 16ULL, + IFF_XMIT_DST_RELEASE = 32ULL, + IFF_DONT_BRIDGE = 64ULL, + IFF_DISABLE_NETPOLL = 128ULL, + IFF_MACVLAN_PORT = 256ULL, + IFF_BRIDGE_PORT = 512ULL, + IFF_OVS_DATAPATH = 1024ULL, + IFF_TX_SKB_SHARING = 2048ULL, + IFF_UNICAST_FLT = 4096ULL, + IFF_TEAM_PORT = 8192ULL, + IFF_SUPP_NOFCS = 16384ULL, + IFF_LIVE_ADDR_CHANGE = 32768ULL, + IFF_MACVLAN = 65536ULL, + IFF_XMIT_DST_RELEASE_PERM = 131072ULL, + IFF_L3MDEV_MASTER = 262144ULL, + IFF_NO_QUEUE = 524288ULL, + IFF_OPENVSWITCH = 1048576ULL, + IFF_L3MDEV_SLAVE = 2097152ULL, + IFF_TEAM = 4194304ULL, + IFF_RXFH_CONFIGURED = 8388608ULL, + IFF_PHONY_HEADROOM = 16777216ULL, + IFF_MACSEC = 33554432ULL, + IFF_NO_RX_HANDLER = 67108864ULL, + IFF_FAILOVER = 134217728ULL, + IFF_FAILOVER_SLAVE = 268435456ULL, + IFF_L3MDEV_RX_HANDLER = 536870912ULL, + IFF_NO_ADDRCONF = 1073741824ULL, + IFF_TX_SKB_NO_LINEAR = 2147483648ULL, + IFF_CHANGE_PROTO_DOWN = 4294967296ULL, + IFF_SEE_ALL_HWTSTAMP_REQUESTS = 8589934592ULL, +}; + +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + NETDEV_XDP_ACT_MASK = 127, +}; + +enum xdp_buff_flags { + XDP_FLAGS_HAS_FRAGS = 1, + XDP_FLAGS_FRAGS_PF_MEMALLOC = 2, +}; + +enum { + BPF_F_BROADCAST = 8, + BPF_F_EXCLUDE_INGRESS = 16, +}; + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN = 2, + NETDEV_REBOOT = 3, + NETDEV_CHANGE = 4, + NETDEV_REGISTER = 5, + NETDEV_UNREGISTER = 6, + NETDEV_CHANGEMTU = 7, + NETDEV_CHANGEADDR = 8, + NETDEV_PRE_CHANGEADDR = 9, + NETDEV_GOING_DOWN = 10, + NETDEV_CHANGENAME = 11, + NETDEV_FEAT_CHANGE = 12, + NETDEV_BONDING_FAILOVER = 13, + NETDEV_PRE_UP = 14, + NETDEV_PRE_TYPE_CHANGE = 15, + NETDEV_POST_TYPE_CHANGE = 16, + NETDEV_POST_INIT = 17, + NETDEV_PRE_UNINIT = 18, + NETDEV_RELEASE = 19, + NETDEV_NOTIFY_PEERS = 20, + NETDEV_JOIN = 21, + NETDEV_CHANGEUPPER = 22, + NETDEV_RESEND_IGMP = 23, + NETDEV_PRECHANGEMTU = 24, + NETDEV_CHANGEINFODATA = 25, + NETDEV_BONDING_INFO = 26, + NETDEV_PRECHANGEUPPER = 27, + NETDEV_CHANGELOWERSTATE = 28, + NETDEV_UDP_TUNNEL_PUSH_INFO = 29, + NETDEV_UDP_TUNNEL_DROP_INFO = 30, + NETDEV_CHANGE_TX_QUEUE_LEN = 31, + NETDEV_CVLAN_FILTER_PUSH_INFO = 32, + NETDEV_CVLAN_FILTER_DROP_INFO = 33, + NETDEV_SVLAN_FILTER_PUSH_INFO = 34, + NETDEV_SVLAN_FILTER_DROP_INFO = 35, + NETDEV_OFFLOAD_XSTATS_ENABLE = 36, + NETDEV_OFFLOAD_XSTATS_DISABLE = 37, + NETDEV_OFFLOAD_XSTATS_REPORT_USED = 38, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA = 39, + NETDEV_XDP_FEAT_CHANGE = 40, +}; + +struct bpf_dtab_netdev; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev __attribute__((btf_type_tag("rcu"))) * *netdev_map; + struct list_head list; + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; + long : 64; + long : 64; +}; + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct bpf_dtab_netdev { + struct net_device *dev; + struct hlist_node index_hlist; + struct bpf_prog *xdp_prog; + struct callback_head rcu; + unsigned int idx; + struct bpf_devmap_val val; +}; + +typedef struct bio_vec skb_frag_t; + +struct skb_shared_info { + __u8 flags; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + unsigned short gso_size; + unsigned short gso_segs; + struct sk_buff *frag_list; + struct skb_shared_hwtstamps hwtstamps; + unsigned int gso_type; + u32 tskey; + atomic_t dataref; + unsigned int xdp_frags_size; + void *destructor_arg; + skb_frag_t frags[17]; +}; + +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + +struct bpf_redirect_info { + u64 tgt_index; + void *tgt_value; + struct bpf_map *map; + u32 flags; + u32 kern_flags; + u32 map_id; + enum bpf_map_type map_type; + struct bpf_nh_params nh; +}; + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct bpf_cpu_map_entry; + +struct xdp_bulk_queue { + void *q[8]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; + unsigned int count; +}; + +struct bpf_cpumap_val { + __u32 qsize; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct ptr_ring; + +struct bpf_cpu_map_entry { + u32 cpu; + int map_id; + struct xdp_bulk_queue __attribute__((btf_type_tag("percpu"))) * bulkq; + struct ptr_ring *queue; + struct task_struct *kthread; + struct bpf_cpumap_val value; + struct bpf_prog *prog; + struct completion kthread_running; + struct rcu_work free_work; +}; + +struct ptr_ring { + int producer; + spinlock_t producer_lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + int consumer_head; + int consumer_tail; + spinlock_t consumer_lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + int size; + int batch; + void **queue; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_cpu_map { + struct bpf_map map; + struct bpf_cpu_map_entry __attribute__((btf_type_tag("rcu"))) * *cpu_map; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_prog_offload_ops; + +struct bpf_offload_dev { + const struct bpf_prog_offload_ops *ops; + struct list_head netdevs; + void *priv; +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *, int, int); + int (*finalize)(struct bpf_verifier_env *); + int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); + int (*remove_insns)(struct bpf_verifier_env *, u32, u32); + int (*prepare)(struct bpf_prog *); + int (*translate)(struct bpf_prog *); + void (*destroy)(struct bpf_prog *); +}; + +enum xdp_rx_metadata { + XDP_METADATA_KFUNC_RX_TIMESTAMP = 0, + XDP_METADATA_KFUNC_RX_HASH = 1, + MAX_XDP_METADATA_KFUNC = 2, +}; + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + struct list_head progs; + struct list_head maps; + struct list_head offdev_netdevs; +}; + +typedef struct ns_common *ns_get_path_helper_t(void *); + +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +struct ns_get_path_bpf_map_args { + struct bpf_offloaded_map *offmap; + struct bpf_map_info *info; +}; + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP = 1, + MAX_NETNS_BPF_ATTACH_TYPE = 2, +}; + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + struct net *net; + struct list_head node; +}; + +struct mini_Qdisc; + +struct tcx_entry { + struct mini_Qdisc __attribute__((btf_type_tag("rcu"))) * miniq; + struct bpf_mprog_bundle bundle; + bool miniq_active; + struct callback_head rcu; +}; + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) * cpu_bstats; + struct gnet_stats_queue __attribute__((btf_type_tag("percpu"))) * cpu_qstats; + unsigned long rcu_state; +}; + +struct tcx_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +enum { + BPF_F_SKIP_FIELD_MASK = 255, + BPF_F_USER_STACK = 256, + BPF_F_FAST_STACK_CMP = 512, + BPF_F_REUSE_STACKID = 1024, + BPF_F_USER_BUILD_ID = 2048, +}; + +enum bpf_stack_build_id_status { + BPF_STACK_BUILD_ID_EMPTY = 0, + BPF_STACK_BUILD_ID_VALID = 1, + BPF_STACK_BUILD_ID_IP = 2, +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = 18446744073709551584ULL, + PERF_CONTEXT_KERNEL = 18446744073709551488ULL, + PERF_CONTEXT_USER = 18446744073709551104ULL, + PERF_CONTEXT_GUEST = 18446744073709549568ULL, + PERF_CONTEXT_GUEST_KERNEL = 18446744073709549440ULL, + PERF_CONTEXT_GUEST_USER = 18446744073709549056ULL, + PERF_CONTEXT_MAX = 18446744073709547521ULL, +}; + +typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct stack_map_bucket; + +struct bpf_stack_map { + struct bpf_map map; + void *elems; + struct pcpu_freelist freelist; + u32 n_buckets; + struct stack_map_bucket *buckets[0]; + long : 64; + long : 64; + long : 64; +}; + +struct stack_map_bucket { + struct pcpu_freelist_node fnode; + u32 hash; + u32 nr; + u64 data[0]; +}; + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + +struct cgroup_iter_priv { + struct cgroup_subsys_state *start_css; + bool visited_all; + bool terminate; + int order; +}; + +struct bpf_iter__cgroup { + union { + struct bpf_iter_meta *meta; + }; + union { + struct cgroup *cgroup; + }; +}; + +struct bpf_iter_css { + __u64 __opaque[3]; +}; + +struct bpf_iter_css_kern { + struct cgroup_subsys_state *start; + struct cgroup_subsys_state *pos; + unsigned int flags; +}; + +typedef u64 (*btf_bpf_cgrp_storage_get)(struct bpf_map *, struct cgroup *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_cgrp_storage_delete)(struct bpf_map *, struct cgroup *); + +enum { + TCPF_ESTABLISHED = 2, + TCPF_SYN_SENT = 4, + TCPF_SYN_RECV = 8, + TCPF_FIN_WAIT1 = 16, + TCPF_FIN_WAIT2 = 32, + TCPF_TIME_WAIT = 64, + TCPF_CLOSE = 128, + TCPF_CLOSE_WAIT = 256, + TCPF_LAST_ACK = 512, + TCPF_LISTEN = 1024, + TCPF_CLOSING = 2048, + TCPF_NEW_SYN_RECV = 4096, +}; + +enum { + BPF_F_SYSCTL_BASE_NAME = 1, +}; + +typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_retval)(); + +typedef u64 (*btf_bpf_set_retval)(int); + +typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); + +typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); + +typedef u64 (*btf_bpf_get_netns_cookie_sockopt)(struct bpf_sockopt_kern *); + +struct bpf_cgroup_link; + +struct bpf_prog_list { + struct hlist_node node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[2]; +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + unsigned char data[20]; +}; + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; + int retval; +}; + +struct bpf_sockopt_buf { + u8 data[32]; +}; + +enum { + IPPROTO_IP = 0, + IPPROTO_ICMP = 1, + IPPROTO_IGMP = 2, + IPPROTO_IPIP = 4, + IPPROTO_TCP = 6, + IPPROTO_EGP = 8, + IPPROTO_PUP = 12, + IPPROTO_UDP = 17, + IPPROTO_IDP = 22, + IPPROTO_TP = 29, + IPPROTO_DCCP = 33, + IPPROTO_IPV6 = 41, + IPPROTO_RSVP = 46, + IPPROTO_GRE = 47, + IPPROTO_ESP = 50, + IPPROTO_AH = 51, + IPPROTO_MTP = 92, + IPPROTO_BEETPH = 94, + IPPROTO_ENCAP = 98, + IPPROTO_PIM = 103, + IPPROTO_COMP = 108, + IPPROTO_L2TP = 115, + IPPROTO_SCTP = 132, + IPPROTO_UDPLITE = 136, + IPPROTO_MPLS = 137, + IPPROTO_ETHERNET = 143, + IPPROTO_RAW = 255, + IPPROTO_MPTCP = 262, + IPPROTO_MAX = 263, +}; + +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +enum sock_flags { + SOCK_DEAD = 0, + SOCK_DONE = 1, + SOCK_URGINLINE = 2, + SOCK_KEEPOPEN = 3, + SOCK_LINGER = 4, + SOCK_DESTROY = 5, + SOCK_BROADCAST = 6, + SOCK_TIMESTAMP = 7, + SOCK_ZAPPED = 8, + SOCK_USE_WRITE_QUEUE = 9, + SOCK_DBG = 10, + SOCK_RCVTSTAMP = 11, + SOCK_RCVTSTAMPNS = 12, + SOCK_LOCALROUTE = 13, + SOCK_MEMALLOC = 14, + SOCK_TIMESTAMPING_RX_SOFTWARE = 15, + SOCK_FASYNC = 16, + SOCK_RXQ_OVFL = 17, + SOCK_ZEROCOPY = 18, + SOCK_WIFI_STATUS = 19, + SOCK_NOFCS = 20, + SOCK_FILTER_LOCKED = 21, + SOCK_SELECT_ERR_QUEUE = 22, + SOCK_RCU_FREE = 23, + SOCK_TXTIME = 24, + SOCK_XDP = 25, + SOCK_TSTAMP_NEW = 26, + SOCK_RCVMARK = 27, +}; + +struct reuseport_array { + struct bpf_map map; + struct sock __attribute__((btf_type_tag("rcu"))) * ptrs[0]; +}; + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT = 0, + BPF_STRUCT_OPS_STATE_INUSE = 1, + BPF_STRUCT_OPS_STATE_TOBEFREE = 2, + BPF_STRUCT_OPS_STATE_READY = 3, +}; + +struct bpf_dummy_ops_state; + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *); + int (*test_2)(struct bpf_dummy_ops_state *, int, unsigned short, char, unsigned long); + int (*test_sleepable)(struct bpf_dummy_ops_state *); +}; + +struct bpf_struct_ops_bpf_dummy_ops { + refcount_t refcnt; + enum bpf_struct_ops_state state; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct bpf_dummy_ops data; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct bpf_dummy_ops_state { + int val; +}; + +struct bpf_struct_ops_tcp_congestion_ops { + refcount_t refcnt; + enum bpf_struct_ops_state state; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct tcp_congestion_ops data; +}; + +struct bpf_struct_ops_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + char data[0]; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + struct callback_head rcu; + const struct bpf_struct_ops *st_ops; + struct mutex lock; + struct bpf_link **links; + void *image; + struct bpf_struct_ops_value *uvalue; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct bpf_struct_ops_value kvalue; +}; + +struct bpf_struct_ops_link { + struct bpf_link link; + struct bpf_map __attribute__((btf_type_tag("rcu"))) * map; +}; + +struct bpf_cpumask { + cpumask_t cpumask; + refcount_t usage; +}; + +struct static_call_tramp_key { + s32 tramp; + s32 key; +}; + +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int online; + struct perf_cgroup *cgrp; + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; +}; + +struct perf_guest_info_callbacks { + unsigned int (*state)(); + unsigned long (*get_ip)(); + unsigned int (*handle_intel_pt_intr)(); +}; + +struct swevent_hlist; + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; + int recursion[4]; +}; + +struct swevent_hlist { + struct hlist_head heads[256]; + struct callback_head callback_head; +}; + +struct min_heap_callbacks { + int elem_size; + bool (*less)(const void *, const void *); + void (*swp)(void *, void *); +}; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START = 1, + PERF_ADDR_FILTER_ACTION_FILTER = 2, +}; + +struct match_token { + int token; + const char *pattern; +}; + +enum event_type_t { + EVENT_FLEXIBLE = 1, + EVENT_PINNED = 2, + EVENT_TIME = 4, + EVENT_CPU = 8, + EVENT_CGROUP = 16, + EVENT_ALL = 3, +}; + +enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_AUX_OUTPUT_HW_ID = 21, + PERF_RECORD_MAX = 22, +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + NR_NAMESPACES = 7, +}; + +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_ID = 4, + PERF_FORMAT_GROUP = 8, + PERF_FORMAT_LOST = 16, + PERF_FORMAT_MAX = 32, +}; + +enum perf_probe_config { + PERF_PROBE_CONFIG_IS_RETPROBE = 1, + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, +}; + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1, +}; + +enum { + IF_STATE_ACTION = 0, + IF_STATE_SOURCE = 1, + IF_STATE_END = 2, +}; + +enum { + IF_ACT_NONE = -1, + IF_ACT_FILTER = 0, + IF_ACT_START = 1, + IF_ACT_STOP = 2, + IF_SRC_FILE = 3, + IF_SRC_KERNEL = 4, + IF_SRC_FILEADDR = 5, + IF_SRC_KERNELADDR = 6, +}; + +struct perf_addr_filter { + struct list_head entry; + struct path path; + unsigned long offset; + unsigned long size; + enum perf_addr_filter_action_t action; +}; + +typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, + struct perf_event_context *, void *); + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +struct perf_switch_event { + struct task_struct *task; + struct task_struct *next_prev; + struct { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; + } event_id; +}; + +typedef void perf_iterate_f(struct perf_event *, void *); + +struct stop_event_data { + struct perf_event *event; + unsigned int restart; +}; + +typedef int (*remote_function_f)(void *); + +struct remote_function_call { + struct task_struct *p; + remote_function_f func; + void *info; + int ret; +}; + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + struct { + struct perf_event_header header; + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + } event_id; +}; + +struct perf_mmap_event { + struct vm_area_struct *vma; + const char *file_name; + int file_size; + int maj; + int min; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + u8 build_id[20]; + u32 build_id_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +struct perf_aux_event { + struct perf_event_header header; + u64 offset; + u64 size; + u64 flags; +}; + +struct perf_aux_event___2 { + struct perf_event_header header; + u64 hw_id; +}; + +struct __group_key { + int cpu; + struct pmu *pmu; + struct cgroup *cgroup; +}; + +struct perf_cgroup_event { + char *path; + int path_size; + struct { + struct perf_event_header header; + u64 id; + char path[0]; + } event_id; +}; + +struct min_heap { + void *data; + int nr; + int size; +}; + +struct perf_aux_event___3 { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +struct perf_read_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +typedef struct { + char *from; + char *to; +} substring_t; + +struct remote_output { + struct perf_buffer *rb; + int err; +}; + +struct perf_namespaces_event { + struct task_struct *task; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 nr_namespaces; + struct perf_ns_link_info link_info[7]; + } event_id; +}; + +struct perf_ksymbol_event { + const char *name; + int name_len; + struct { + struct perf_event_header header; + u64 addr; + u32 len; + u16 ksym_type; + u16 flags; + } event_id; +}; + +struct perf_bpf_event { + struct bpf_prog *prog; + struct { + struct perf_event_header header; + u16 type; + u16 flags; + u32 id; + u8 tag[8]; + } event_id; +}; + +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + struct { + struct perf_event_header header; + u64 addr; + } event_id; +}; + +struct event_function_struct { + struct perf_event *event; + event_f func; + void *data; +}; + +struct perf_read_data { + struct perf_event *event; + bool group; + int ret; +}; + +struct callchain_cpus_entries { + struct callback_head callback_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +struct bp_slots_histogram { + atomic_t count[4]; +}; + +struct rhltable { + struct rhashtable ht; +}; + +struct bp_cpuinfo { + unsigned int cpu_pinned; + struct bp_slots_histogram tsk_pinned; +}; + +enum bp_type_idx { + TYPE_INST = 0, + TYPE_DATA = 0, + TYPE_MAX = 1, +}; + +typedef u8 uprobe_opcode_t; + +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +struct page_vma_mapped_walk { + unsigned long pfn; + unsigned long nr_pages; + unsigned long pgoff; + struct vm_area_struct *vma; + unsigned long address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +typedef unsigned int fgf_t; + +struct __uprobe_key { + struct inode *inode; + loff_t offset; +}; + +struct map_info { + struct map_info *next; + struct mm_struct *mm; + unsigned long vaddr; +}; + +typedef int filler_t(struct file *, struct folio *); + +struct user_return_notifier { + void (*on_user_return)(struct user_return_notifier *); + struct hlist_node link; +}; + +struct padata_work { + struct work_struct pw_work; + struct list_head pw_list; + void *pw_data; +}; + +struct padata_instance; + +struct padata_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct padata_instance *, struct attribute *, char *); + ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); +}; + +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; +}; + +struct padata_instance { + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; + struct workqueue_struct *parallel_wq; + struct workqueue_struct *serial_wq; + struct list_head pslist; + struct padata_cpumask cpumask; + struct kobject kobj; + struct mutex lock; + u8 flags; +}; + +struct padata_shell; + +struct padata_list; + +struct padata_serial_queue; + +struct parallel_data { + struct padata_shell *ps; + struct padata_list __attribute__((btf_type_tag("percpu"))) * reorder_list; + struct padata_serial_queue __attribute__((btf_type_tag("percpu"))) * squeue; + refcount_t refcnt; + unsigned int seq_nr; + unsigned int processed; + int cpu; + struct padata_cpumask cpumask; + struct work_struct reorder_work; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + spinlock_t lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct padata_shell { + struct padata_instance *pinst; + struct parallel_data __attribute__((btf_type_tag("rcu"))) * pd; + struct parallel_data *opd; + struct list_head list; +}; + +struct padata_list { + struct list_head list; + spinlock_t lock; +}; + +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +struct padata_priv { + struct list_head list; + struct parallel_data *pd; + int cb_cpu; + unsigned int seq_nr; + int info; + void (*parallel)(struct padata_priv *); + void (*serial)(struct padata_priv *); +}; + +struct padata_mt_job; + +struct padata_mt_job_state { + spinlock_t lock; + struct completion completion; + struct padata_mt_job *job; + int nworks; + int nworks_fini; + unsigned long chunk_size; +}; + +struct padata_mt_job { + void (*thread_fn)(unsigned long, unsigned long, void *); + void *fn_arg; + unsigned long start; + unsigned long size; + unsigned long align; + unsigned long min_chunk; + int max_threads; +}; + +struct static_key_deferred { + struct static_key key; + unsigned long timeout; + struct delayed_work work; +}; + +typedef void (*btf_trace_user_enter)(void *, int); + +typedef void (*btf_trace_user_exit)(void *, int); + +struct trace_event_raw_context_tracking_user { + struct trace_entry ent; + int dummy; + char __data[0]; +}; + +struct trace_event_data_offsets_context_tracking_user {}; + +typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); + +typedef void (*btf_trace_rseq_ip_fixup)(void *, unsigned long, unsigned long, + unsigned long, unsigned long); + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = 1, +}; + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = -1, + RSEQ_CPU_ID_REGISTRATION_FAILED = -2, +}; + +struct trace_event_raw_rseq_update { + struct trace_entry ent; + s32 cpu_id; + s32 node_id; + s32 mm_cid; + char __data[0]; +}; + +struct trace_event_raw_rseq_ip_fixup { + struct trace_entry ent; + unsigned long regs_ip; + unsigned long start_ip; + unsigned long post_commit_offset; + unsigned long abort_ip; + char __data[0]; +}; + +struct rseq_cs { + __u32 version; + __u32 flags; + __u64 start_ip; + __u64 post_commit_offset; + __u64 abort_ip; +}; + +struct trace_event_data_offsets_rseq_update {}; + +struct trace_event_data_offsets_rseq_ip_fixup {}; + +struct watch_filter; + +struct watch_queue { + struct callback_head rcu; + struct watch_filter __attribute__((btf_type_tag("rcu"))) * filter; + struct pipe_inode_info *pipe; + struct hlist_head watches; + struct page **notes; + unsigned long *notes_bitmap; + struct kref usage; + spinlock_t lock; + unsigned int nr_notes; + unsigned int nr_pages; +}; + +enum watch_notification_type { + WATCH_TYPE_META = 0, + WATCH_TYPE_KEY_NOTIFY = 1, + WATCH_TYPE__NR = 2, +}; + +struct watch_type_filter { + enum watch_notification_type type; + __u32 subtype_filter[1]; + __u32 info_filter; + __u32 info_mask; +}; + +struct watch_filter { + union { + struct callback_head rcu; + unsigned long type_filter[1]; + }; + u32 nr_filters; + struct watch_type_filter filters[0]; +}; + +enum watch_meta_notification_subtype { + WATCH_META_REMOVAL_NOTIFICATION = 0, + WATCH_META_LOSS_NOTIFICATION = 1, +}; + +struct watch_list; + +struct watch { + union { + struct callback_head rcu; + u32 info_id; + }; + struct watch_queue __attribute__((btf_type_tag("rcu"))) * queue; + struct hlist_node queue_node; + struct watch_list __attribute__((btf_type_tag("rcu"))) * watch_list; + struct hlist_node list_node; + const struct cred *cred; + void *private; + u64 id; + struct kref usage; +}; + +struct watch_list { + struct callback_head rcu; + struct hlist_head watchers; + void (*release_watch)(struct watch *); + spinlock_t lock; +}; + +struct watch_notification { + __u32 type : 24; + __u32 subtype : 8; + __u32 info; +}; + +struct watch_notification_type_filter { + __u32 type; + __u32 info_filter; + __u32 info_mask; + __u32 subtype_filter[8]; +}; + +struct watch_notification_filter { + __u32 nr_filters; + __u32 __reserved; + struct watch_notification_type_filter filters[0]; +}; + +struct watch_notification_removal { + struct watch_notification watch; + __u64 id; +}; + +enum OID { + OID_id_dsa = 0, + OID_id_ecPublicKey = 1, + OID_id_prime192v1 = 2, + OID_id_prime256v1 = 3, + OID_id_ecdsa_with_sha224 = 4, + OID_id_ecdsa_with_sha256 = 5, + OID_id_ecdsa_with_sha384 = 6, + OID_id_ecdsa_with_sha512 = 7, + OID_rsaEncryption = 8, + OID_sha256WithRSAEncryption = 9, + OID_sha384WithRSAEncryption = 10, + OID_sha512WithRSAEncryption = 11, + OID_sha224WithRSAEncryption = 12, + OID_data = 13, + OID_signed_data = 14, + OID_email_address = 15, + OID_contentType = 16, + OID_messageDigest = 17, + OID_signingTime = 18, + OID_smimeCapabilites = 19, + OID_smimeAuthenticatedAttrs = 20, + OID_mskrb5 = 21, + OID_krb5 = 22, + OID_krb5u2u = 23, + OID_msIndirectData = 24, + OID_msStatementType = 25, + OID_msSpOpusInfo = 26, + OID_msPeImageDataObjId = 27, + OID_msIndividualSPKeyPurpose = 28, + OID_msOutlookExpress = 29, + OID_ntlmssp = 30, + OID_negoex = 31, + OID_spnego = 32, + OID_IAKerb = 33, + OID_PKU2U = 34, + OID_Scram = 35, + OID_certAuthInfoAccess = 36, + OID_id_ansip384r1 = 37, + OID_sha256 = 38, + OID_sha384 = 39, + OID_sha512 = 40, + OID_sha224 = 41, + OID_commonName = 42, + OID_surname = 43, + OID_countryName = 44, + OID_locality = 45, + OID_stateOrProvinceName = 46, + OID_organizationName = 47, + OID_organizationUnitName = 48, + OID_title = 49, + OID_description = 50, + OID_name = 51, + OID_givenName = 52, + OID_initials = 53, + OID_generationalQualifier = 54, + OID_subjectKeyIdentifier = 55, + OID_keyUsage = 56, + OID_subjectAltName = 57, + OID_issuerAltName = 58, + OID_basicConstraints = 59, + OID_crlDistributionPoints = 60, + OID_certPolicies = 61, + OID_authorityKeyIdentifier = 62, + OID_extKeyUsage = 63, + OID_NetlogonMechanism = 64, + OID_appleLocalKdcSupported = 65, + OID_gostCPSignA = 66, + OID_gostCPSignB = 67, + OID_gostCPSignC = 68, + OID_gost2012PKey256 = 69, + OID_gost2012PKey512 = 70, + OID_gost2012Digest256 = 71, + OID_gost2012Digest512 = 72, + OID_gost2012Signature256 = 73, + OID_gost2012Signature512 = 74, + OID_gostTC26Sign256A = 75, + OID_gostTC26Sign256B = 76, + OID_gostTC26Sign256C = 77, + OID_gostTC26Sign256D = 78, + OID_gostTC26Sign512A = 79, + OID_gostTC26Sign512B = 80, + OID_gostTC26Sign512C = 81, + OID_sm2 = 82, + OID_sm3 = 83, + OID_SM2_with_SM3 = 84, + OID_sm3WithRSAEncryption = 85, + OID_TPMLoadableKey = 86, + OID_TPMImportableKey = 87, + OID_TPMSealedData = 88, + OID_sha3_256 = 89, + OID_sha3_384 = 90, + OID_sha3_512 = 91, + OID_id_ecdsa_with_sha3_256 = 92, + OID_id_ecdsa_with_sha3_384 = 93, + OID_id_ecdsa_with_sha3_512 = 94, + OID_id_rsassa_pkcs1_v1_5_with_sha3_256 = 95, + OID_id_rsassa_pkcs1_v1_5_with_sha3_384 = 96, + OID_id_rsassa_pkcs1_v1_5_with_sha3_512 = 97, + OID__NR = 98, +}; + +struct x509_certificate; + +struct pkcs7_signed_info; + +struct pkcs7_message { + struct x509_certificate *certs; + struct x509_certificate *crl; + struct pkcs7_signed_info *signed_infos; + u8 version; + bool have_authattrs; + enum OID data_type; + size_t data_len; + size_t data_hdrlen; + const void *data; +}; + +typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); + +typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + AS_NO_WRITEBACK_TAGS = 5, + AS_LARGE_FOLIO_SUPPORT = 6, + AS_RELEASE_ALWAYS = 7, + AS_STABLE_WRITES = 8, +}; + +enum behavior { + EXCLUSIVE = 0, + SHARED = 1, + DROP = 2, +}; + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 524288, + AOP_TRUNCATED_PAGE = 524289, +}; + +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node __attribute__((btf_type_tag("rcu"))) * parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void __attribute__((btf_type_tag("rcu"))) * slots[64]; + union { + unsigned long tags[3]; + unsigned long marks[3]; + }; +}; + +struct trace_event_raw_mm_filemap_op_page_cache { + struct trace_entry ent; + unsigned long pfn; + unsigned long i_ino; + unsigned long index; + dev_t s_dev; + unsigned char order; + char __data[0]; +}; + +struct trace_event_raw_filemap_set_wb_err { + struct trace_entry ent; + unsigned long i_ino; + dev_t s_dev; + errseq_t errseq; + char __data[0]; +}; + +struct trace_event_raw_file_check_and_advance_wb_err { + struct trace_entry ent; + struct file *file; + unsigned long i_ino; + dev_t s_dev; + errseq_t old; + errseq_t new; + char __data[0]; +}; + +struct cachestat_range { + __u64 off; + __u64 len; +}; + +struct cachestat { + __u64 nr_cache; + __u64 nr_dirty; + __u64 nr_writeback; + __u64 nr_evicted; + __u64 nr_recently_evicted; +}; + +typedef void (*xa_update_node_t)(struct xa_node *); + +struct xa_state { + struct xarray *xa; + unsigned long xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; + struct list_lru *xa_lru; +}; + +struct folio_batch { + unsigned char nr; + bool percpu_pvec_drained; + struct folio *folios[15]; +}; + +struct wait_page_key { + struct folio *folio; + int bit_nr; + int page_match; +}; + +typedef struct pglist_data pg_data_t; + +struct trace_event_data_offsets_mm_filemap_op_page_cache {}; + +struct trace_event_data_offsets_filemap_set_wb_err {}; + +struct trace_event_data_offsets_file_check_and_advance_wb_err {}; + +typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); + +typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, unsigned long, + unsigned long, unsigned long, int, bool); + +typedef void (*btf_trace_mark_victim)(void *, int); + +typedef void (*btf_trace_wake_reaper)(void *, int); + +typedef void (*btf_trace_start_task_reaping)(void *, int); + +typedef void (*btf_trace_finish_task_reaping)(void *, int); + +typedef void (*btf_trace_skip_task_reaping)(void *, int); + +enum compact_priority { + COMPACT_PRIO_SYNC_FULL = 0, + MIN_COMPACT_PRIORITY = 0, + COMPACT_PRIO_SYNC_LIGHT = 1, + MIN_COMPACT_COSTLY_PRIORITY = 1, + DEF_COMPACT_PRIORITY = 1, + COMPACT_PRIO_ASYNC = 2, + INIT_COMPACT_PRIORITY = 2, +}; + +enum compact_result { + COMPACT_NOT_SUITABLE_ZONE = 0, + COMPACT_SKIPPED = 1, + COMPACT_DEFERRED = 2, + COMPACT_NO_SUITABLE_PAGE = 3, + COMPACT_CONTINUE = 4, + COMPACT_COMPLETE = 5, + COMPACT_PARTIAL_SKIPPED = 6, + COMPACT_CONTENDED = 7, + COMPACT_SUCCESS = 8, +}; + +typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, + enum compact_result, int, int, bool); + +enum oom_constraint { + CONSTRAINT_NONE = 0, + CONSTRAINT_CPUSET = 1, + CONSTRAINT_MEMORY_POLICY = 2, + CONSTRAINT_MEMCG = 3, +}; + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_OOM_GROUP_KILL = 5, + MEMCG_SWAP_HIGH = 6, + MEMCG_SWAP_MAX = 7, + MEMCG_SWAP_FAIL = 8, + MEMCG_NR_MEMORY_EVENTS = 9, +}; + +struct trace_event_raw_oom_score_adj_update { + struct trace_entry ent; + pid_t pid; + char comm[16]; + short oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_reclaim_retry_zone { + struct trace_entry ent; + int node; + int zone_idx; + int order; + unsigned long reclaimable; + unsigned long available; + unsigned long min_wmark; + int no_progress_loops; + bool wmark_check; + char __data[0]; +}; + +struct trace_event_raw_mark_victim { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_wake_reaper { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_start_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_finish_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_skip_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_compact_retry { + struct trace_entry ent; + int order; + int priority; + int result; + int retries; + int max_retries; + bool ret; + char __data[0]; +}; + +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct mem_cgroup *memcg; + const gfp_t gfp_mask; + const int order; + unsigned long totalpages; + struct task_struct *chosen; + long chosen_points; + enum oom_constraint constraint; +}; + +struct trace_event_data_offsets_oom_score_adj_update {}; + +struct trace_event_data_offsets_reclaim_retry_zone {}; + +struct trace_event_data_offsets_mark_victim {}; + +struct trace_event_data_offsets_wake_reaper {}; + +struct trace_event_data_offsets_start_task_reaping {}; + +struct trace_event_data_offsets_finish_task_reaping {}; + +struct trace_event_data_offsets_skip_task_reaping {}; + +struct trace_event_data_offsets_compact_retry {}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +enum { + XA_CHECK_SCHED = 4096, +}; + +enum vmscan_throttle_state { + VMSCAN_THROTTLE_WRITEBACK = 0, + VMSCAN_THROTTLE_ISOLATED = 1, + VMSCAN_THROTTLE_NOPROGRESS = 2, + VMSCAN_THROTTLE_CONGESTED = 3, + NR_VMSCAN_THROTTLE = 4, +}; + +enum wb_state { + WB_registered = 0, + WB_writeback_running = 1, + WB_has_dirty_io = 2, + WB_start_all = 3, +}; + +enum page_memcg_data_flags { + MEMCG_DATA_OBJCGS = 1, + MEMCG_DATA_KMEM = 2, + __NR_MEMCG_DATA_FLAGS = 4, +}; + +struct dirty_throttle_control { + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + unsigned long avail; + unsigned long dirty; + unsigned long thresh; + unsigned long bg_thresh; + unsigned long wb_dirty; + unsigned long wb_thresh; + unsigned long wb_bg_thresh; + unsigned long pos_ratio; +}; + +struct wb_lock_cookie { + bool locked; + unsigned long flags; +}; + +typedef int (*writepage_t)(struct folio *, struct writeback_control *, void *); + +typedef void (*btf_trace_mm_lru_insertion)(void *, struct folio *); + +typedef void (*btf_trace_mm_lru_activate)(void *, struct folio *); + +struct lru_rotate { + local_lock_t lock; + struct folio_batch fbatch; +}; + +struct cpu_fbatches { + local_lock_t lock; + struct folio_batch lru_add; + struct folio_batch lru_deactivate_file; + struct folio_batch lru_deactivate; + struct folio_batch lru_lazyfree; + struct folio_batch activate; +}; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +enum { + LRU_GEN_ANON = 0, + LRU_GEN_FILE = 1, +}; + +enum { + LRU_GEN_CORE = 0, + LRU_GEN_MM_WALK = 1, + LRU_GEN_NONLEAF_YOUNG = 2, + NR_LRU_GEN_CAPS = 3, +}; + +struct trace_event_raw_mm_lru_insertion { + struct trace_entry ent; + struct folio *folio; + unsigned long pfn; + enum lru_list lru; + unsigned long flags; + char __data[0]; +}; + +struct trace_event_raw_mm_lru_activate { + struct trace_entry ent; + struct folio *folio; + unsigned long pfn; + char __data[0]; +}; + +typedef void (*move_fn_t)(struct lruvec *, struct folio *); + +struct trace_event_data_offsets_mm_lru_insertion {}; + +struct trace_event_data_offsets_mm_lru_activate {}; + +typedef union { + struct page **pages; + struct folio **folios; + struct encoded_page **encoded_pages; +} release_pages_arg; + +typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); + +typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); + +typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, unsigned long); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, unsigned long); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, unsigned long); + +typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, + struct shrink_control *, long, unsigned long, + unsigned long long, unsigned long, int); + +typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long, + long, long); + +typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, unsigned long, unsigned long, + unsigned long, unsigned long, int); + +typedef void (*btf_trace_mm_vmscan_write_folio)(void *, struct folio *); + +struct reclaim_stat; + +typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, unsigned long, + unsigned long, + struct reclaim_stat *, int, int); + +struct reclaim_stat { + unsigned int nr_dirty; + unsigned int nr_unqueued_dirty; + unsigned int nr_congested; + unsigned int nr_writeback; + unsigned int nr_immediate; + unsigned int nr_pageout; + unsigned int nr_activate[2]; + unsigned int nr_ref_keep; + unsigned int nr_unmap_fail; + unsigned int nr_lazyfree_fail; +}; + +typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, unsigned long, + unsigned long, unsigned long, + unsigned long, int, int); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, unsigned long); + +typedef void (*btf_trace_mm_vmscan_throttled)(void *, int, int, int, int); + +enum page_walk_lock { + PGWALK_RDLOCK = 0, + PGWALK_WRLOCK = 1, + PGWALK_WRLOCK_VERIFY = 2, +}; + +struct mm_walk; + +struct mm_walk_ops { + int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); + int (*p4d_entry)(p4d_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pte_hole)(unsigned long, unsigned long, int, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, unsigned long, + struct mm_walk *); + int (*test_walk)(unsigned long, unsigned long, struct mm_walk *); + int (*pre_vma)(unsigned long, unsigned long, struct mm_walk *); + void (*post_vma)(struct mm_walk *); + enum page_walk_lock walk_lock; +}; + +enum page_walk_action { + ACTION_SUBTREE = 0, + ACTION_CONTINUE = 1, + ACTION_AGAIN = 2, +}; + +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + pgd_t *pgd; + struct vm_area_struct *vma; + enum page_walk_action action; + bool no_vma; + void *private; +}; + +enum { + MEMCG_LRU_NOP = 0, + MEMCG_LRU_HEAD = 1, + MEMCG_LRU_TAIL = 2, + MEMCG_LRU_OLD = 3, + MEMCG_LRU_YOUNG = 4, +}; + +enum pgdat_flags { + PGDAT_DIRTY = 0, + PGDAT_WRITEBACK = 1, + PGDAT_RECLAIM_LOCKED = 2, +}; + +enum folio_references { + FOLIOREF_RECLAIM = 0, + FOLIOREF_RECLAIM_CLEAN = 1, + FOLIOREF_KEEP = 2, + FOLIOREF_ACTIVATE = 3, +}; + +enum ttu_flags { + TTU_SPLIT_HUGE_PMD = 4, + TTU_IGNORE_MLOCK = 8, + TTU_SYNC = 16, + TTU_HWPOISON = 32, + TTU_BATCH_FLUSH = 64, + TTU_RMAP_LOCKED = 128, +}; + +enum { + SWP_USED = 1, + SWP_WRITEOK = 2, + SWP_DISCARDABLE = 4, + SWP_DISCARDING = 8, + SWP_SOLIDSTATE = 16, + SWP_CONTINUED = 32, + SWP_BLKDEV = 64, + SWP_ACTIVATED = 128, + SWP_FS_OPS = 256, + SWP_AREA_DISCARD = 512, + SWP_PAGE_DISCARD = 1024, + SWP_STABLE_WRITES = 2048, + SWP_SYNCHRONOUS_IO = 4096, + SWP_SCANNING = 16384, +}; + +enum migrate_reason { + MR_COMPACTION = 0, + MR_MEMORY_FAILURE = 1, + MR_MEMORY_HOTPLUG = 2, + MR_SYSCALL = 3, + MR_MEMPOLICY_MBIND = 4, + MR_NUMA_MISPLACED = 5, + MR_CONTIG_RANGE = 6, + MR_LONGTERM_PIN = 7, + MR_DEMOTION = 8, + MR_TYPES = 9, +}; + +enum { + MM_LEAF_TOTAL = 0, + MM_LEAF_OLD = 1, + MM_LEAF_YOUNG = 2, + MM_NONLEAF_TOTAL = 3, + MM_NONLEAF_FOUND = 4, + MM_NONLEAF_ADDED = 5, + NR_MM_STATS = 6, +}; + +enum zone_watermarks { + WMARK_MIN = 0, + WMARK_LOW = 1, + WMARK_HIGH = 2, + WMARK_PROMO = 3, + NR_WMARK = 4, +}; + +enum lruvec_flags { + LRUVEC_CGROUP_CONGESTED = 0, + LRUVEC_NODE_CONGESTED = 1, +}; + +enum scan_balance { + SCAN_EQUAL = 0, + SCAN_FRACT = 1, + SCAN_ANON = 2, + SCAN_FILE = 3, +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK = 0, + ZONE_RECLAIM_ACTIVE = 1, + ZONE_BELOW_HIGH = 2, +}; + +struct trace_event_raw_mm_vmscan_kswapd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_kswapd_wake { + struct trace_entry ent; + int nid; + int zid; + int order; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_wakeup_kswapd { + struct trace_entry ent; + int nid; + int zid; + int order; + unsigned long gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { + struct trace_entry ent; + int order; + unsigned long gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { + struct trace_entry ent; + unsigned long nr_reclaimed; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_start { + struct trace_entry ent; + struct shrinker *shr; + void *shrink; + int nid; + long nr_objects_to_shrink; + unsigned long gfp_flags; + unsigned long cache_items; + unsigned long long delta; + unsigned long total_scan; + int priority; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_end { + struct trace_entry ent; + struct shrinker *shr; + int nid; + void *shrink; + long unused_scan; + long new_scan; + int retval; + long total_scan; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_isolate { + struct trace_entry ent; + int highest_zoneidx; + int order; + unsigned long nr_requested; + unsigned long nr_scanned; + unsigned long nr_skipped; + unsigned long nr_taken; + int lru; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_write_folio { + struct trace_entry ent; + unsigned long pfn; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_inactive { + struct trace_entry ent; + int nid; + unsigned long nr_scanned; + unsigned long nr_reclaimed; + unsigned long nr_dirty; + unsigned long nr_writeback; + unsigned long nr_congested; + unsigned long nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + unsigned long nr_ref_keep; + unsigned long nr_unmap_fail; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_active { + struct trace_entry ent; + int nid; + unsigned long nr_taken; + unsigned long nr_active; + unsigned long nr_deactivated; + unsigned long nr_referenced; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_node_reclaim_begin { + struct trace_entry ent; + int nid; + int order; + unsigned long gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_throttled { + struct trace_entry ent; + int nid; + int usec_timeout; + int usec_delayed; + int reason; + char __data[0]; +}; + +struct migration_target_control { + int nid; + nodemask_t *nmask; + gfp_t gfp_mask; +}; + +struct scan_control { + unsigned long nr_to_reclaim; + nodemask_t *nodemask; + struct mem_cgroup *target_mem_cgroup; + unsigned long anon_cost; + unsigned long file_cost; + unsigned int may_deactivate : 2; + unsigned int force_deactivate : 1; + unsigned int skipped_deactivate : 1; + unsigned int may_writepage : 1; + unsigned int may_unmap : 1; + unsigned int may_swap : 1; + unsigned int proactive : 1; + unsigned int memcg_low_reclaim : 1; + unsigned int memcg_low_skipped : 1; + unsigned int hibernation_mode : 1; + unsigned int compaction_ready : 1; + unsigned int cache_trim_mode : 1; + unsigned int file_is_tiny : 1; + unsigned int no_demotion : 1; + s8 order; + s8 priority; + s8 reclaim_idx; + gfp_t gfp_mask; + unsigned long nr_scanned; + unsigned long nr_reclaimed; + struct { + unsigned int dirty; + unsigned int unqueued_dirty; + unsigned int congested; + unsigned int writeback; + unsigned int immediate; + unsigned int file_taken; + unsigned int taken; + } nr; + struct reclaim_state reclaim_state; +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + unsigned int generation; +}; + +typedef enum { + PAGE_KEEP = 0, + PAGE_ACTIVATE = 1, + PAGE_SUCCESS = 2, + PAGE_CLEAN = 3, +} pageout_t; + +typedef struct folio *new_folio_t(struct folio *, unsigned long); + +typedef void free_folio_t(struct folio *, unsigned long); + +struct ctrl_pos { + unsigned long refaulted; + unsigned long total; + int gain; +}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; + +struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; + +struct trace_event_data_offsets_mm_shrink_slab_start {}; + +struct trace_event_data_offsets_mm_shrink_slab_end {}; + +struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; + +struct trace_event_data_offsets_mm_vmscan_write_folio {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; + +struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; + +struct trace_event_data_offsets_mm_vmscan_throttled {}; + +struct memcg_vmstats { + long state[48]; + unsigned long events[21]; + long state_local[48]; + unsigned long events_local[21]; + long state_pending[48]; + unsigned long events_pending[21]; +}; + +struct fileattr { + u32 flags; + u32 fsx_xflags; + u32 fsx_extsize; + u32 fsx_nextents; + u32 fsx_projid; + u32 fsx_cowextsize; + bool flags_valid : 1; + bool fsx_valid : 1; +}; + +struct constant_table { + const char *name; + int value; +}; + +enum sgp_type { + SGP_READ = 0, + SGP_NOALLOC = 1, + SGP_CACHE = 2, + SGP_WRITE = 3, + SGP_FALLOC = 4, +}; + +enum mfill_atomic_mode { + MFILL_ATOMIC_COPY = 0, + MFILL_ATOMIC_ZEROPAGE = 1, + MFILL_ATOMIC_CONTINUE = 2, + MFILL_ATOMIC_POISON = 3, + NR_MFILL_ATOMIC_MODES = 4, +}; + +enum shmem_param { + Opt_gid = 0, + Opt_huge = 1, + Opt_mode = 2, + Opt_mpol = 3, + Opt_nr_blocks = 4, + Opt_nr_inodes = 5, + Opt_size = 6, + Opt_uid = 7, + Opt_inode32 = 8, + Opt_inode64 = 9, + Opt_noswap = 10, + Opt_quota = 11, + Opt_usrquota = 12, + Opt_grpquota = 13, + Opt_usrquota_block_hardlimit = 14, + Opt_usrquota_inode_hardlimit = 15, + Opt_grpquota_block_hardlimit = 16, + Opt_grpquota_inode_hardlimit = 17, +}; + +enum fid_type { + FILEID_ROOT = 0, + FILEID_INO32_GEN = 1, + FILEID_INO32_GEN_PARENT = 2, + FILEID_BTRFS_WITHOUT_PARENT = 77, + FILEID_BTRFS_WITH_PARENT = 78, + FILEID_BTRFS_WITH_PARENT_ROOT = 79, + FILEID_UDF_WITHOUT_PARENT = 81, + FILEID_UDF_WITH_PARENT = 82, + FILEID_NILFS_WITHOUT_PARENT = 97, + FILEID_NILFS_WITH_PARENT = 98, + FILEID_FAT_WITHOUT_PARENT = 113, + FILEID_FAT_WITH_PARENT = 114, + FILEID_INO64_GEN = 129, + FILEID_INO64_GEN_PARENT = 130, + FILEID_LUSTRE = 151, + FILEID_BCACHEFS_WITHOUT_PARENT = 177, + FILEID_BCACHEFS_WITH_PARENT = 178, + FILEID_KERNFS = 254, + FILEID_INVALID = 255, +}; + +enum { + MPOL_DEFAULT = 0, + MPOL_PREFERRED = 1, + MPOL_BIND = 2, + MPOL_INTERLEAVE = 3, + MPOL_LOCAL = 4, + MPOL_PREFERRED_MANY = 5, + MPOL_MAX = 6, +}; + +enum iter_type { + ITER_UBUF = 0, + ITER_IOVEC = 1, + ITER_BVEC = 2, + ITER_KVEC = 3, + ITER_XARRAY = 4, + ITER_DISCARD = 5, +}; + +enum { + _DQUOT_USAGE_ENABLED = 0, + _DQUOT_LIMITS_ENABLED = 1, + _DQUOT_SUSPENDED = 2, + _DQUOT_STATE_FLAGS = 3, +}; + +struct shared_policy { + struct rb_root root; + rwlock_t lock; +}; + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; + unsigned long flags; + unsigned long alloced; + unsigned long swapped; + union { + struct offset_ctx dir_offsets; + struct { + struct list_head shrinklist; + struct list_head swaplist; + }; + }; + struct timespec64 i_crtime; + struct shared_policy policy; + struct simple_xattrs xattrs; + unsigned long fallocend; + unsigned int fsflags; + atomic_t stop_eviction; + struct inode vfs_inode; +}; + +typedef unsigned int uffd_flags_t; + +struct shmem_quota_limits { + qsize_t usrquota_bhardlimit; + qsize_t usrquota_ihardlimit; + qsize_t grpquota_bhardlimit; + qsize_t grpquota_ihardlimit; +}; + +struct shmem_sb_info { + unsigned long max_blocks; + struct percpu_counter used_blocks; + unsigned long max_inodes; + unsigned long free_ispace; + raw_spinlock_t stat_lock; + umode_t mode; + unsigned char huge; + kuid_t uid; + kgid_t gid; + bool full_inums; + bool noswap; + ino_t next_ino; + ino_t __attribute__((btf_type_tag("percpu"))) * ino_batch; + struct mempolicy *mpol; + spinlock_t shrinklist_lock; + struct list_head shrinklist; + unsigned long shrinklist_len; + struct shmem_quota_limits qlimits; +}; + +typedef __u64 __le64; + +struct simple_xattr { + struct rb_node rb_node; + char *name; + size_t size; + char value[0]; +}; + +struct shmem_falloc { + wait_queue_head_t *waitq; + unsigned long start; + unsigned long next; + unsigned long nr_falloced; + unsigned long nr_unswapped; +}; + +struct shmem_options { + unsigned long long blocks; + unsigned long long inodes; + struct mempolicy *mpol; + kuid_t uid; + kgid_t gid; + umode_t mode; + bool full_inums; + int huge; + int seen; + bool noswap; + unsigned short quota_types; + struct shmem_quota_limits qlimits; +}; + +struct vm_event_state { + unsigned long event[108]; +}; + +enum numa_stat_item { + NUMA_HIT = 0, + NUMA_MISS = 1, + NUMA_FOREIGN = 2, + NUMA_INTERLEAVE_HIT = 3, + NUMA_LOCAL = 4, + NUMA_OTHER = 5, + NR_VM_NUMA_EVENT_ITEMS = 6, +}; + +enum writeback_stat_item { + NR_DIRTY_THRESHOLD = 0, + NR_DIRTY_BG_THRESHOLD = 1, + NR_VM_WRITEBACK_STAT_ITEMS = 2, +}; + +struct contig_page_info { + unsigned long free_pages; + unsigned long free_blocks_total; + unsigned long free_blocks_suitable; +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 15, + RADIX_TREE_ITER_TAGGED = 16, + RADIX_TREE_ITER_CONTIG = 32, +}; + +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; + struct xa_node *node; +}; + +struct blkg_iostat { + u64 bytes[3]; + u64 ios[3]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkcg_gq *blkg; + struct llist_node lnode; + int lqueued; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +struct blkcg; + +struct blkg_policy_data; + +struct blkcg_gq { + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + struct blkcg_gq *parent; + struct percpu_ref refcnt; + bool online; + struct blkg_iostat_set __attribute__((btf_type_tag("percpu"))) * iostat_cpu; + struct blkg_iostat_set iostat; + struct blkg_policy_data *pd[6]; + spinlock_t async_bio_lock; + struct bio_list async_bios; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + struct callback_head callback_head; +}; + +struct blkcg_policy_data; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + struct xarray blkg_tree; + struct blkcg_gq __attribute__((btf_type_tag("rcu"))) * blkg_hint; + struct hlist_head blkg_list; + struct blkcg_policy_data *cpd[6]; + struct list_head all_blkcgs_node; + struct llist_head __attribute__((btf_type_tag("percpu"))) * lhead; + struct list_head cgwb_list; +}; + +struct blkcg_policy_data { + struct blkcg *blkcg; + int plid; +}; + +struct blkg_policy_data { + struct blkcg_gq *blkg; + int plid; + bool online; +}; + +struct mminit_pfnnid_cache { + unsigned long last_start; + unsigned long last_end; + int last_nid; +}; + +enum mminit_level { + MMINIT_WARNING = 0, + MMINIT_VERIFY = 1, + MMINIT_TRACE = 2, +}; + +enum meminit_context { + MEMINIT_EARLY = 0, + MEMINIT_HOTPLUG = 1, +}; + +typedef void (*btf_trace_percpu_alloc_percpu)(void *, unsigned long, bool, bool, size_t, + size_t, void *, int, + void __attribute__((btf_type_tag("percpu"))) *, + size_t, gfp_t); + +typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, + void __attribute__((btf_type_tag("percpu"))) *); + +typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); + +typedef void (*btf_trace_percpu_create_chunk)(void *, void *); + +typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); + +struct pcpu_block_md { + int scan_hint; + int scan_hint_start; + int contig_hint; + int contig_hint_start; + int left_free; + int right_free; + int first_free; + int nr_bits; +}; + +struct pcpu_chunk { + struct list_head list; + int free_bytes; + struct pcpu_block_md chunk_md; + unsigned long *bound_map; + void *base_addr; + unsigned long *alloc_map; + struct pcpu_block_md *md_blocks; + void *data; + bool immutable; + bool isolated; + int start_offset; + int end_offset; + struct obj_cgroup **obj_cgroups; + int nr_pages; + int nr_populated; + int nr_empty_pop_pages; + unsigned long populated[0]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +enum memcg_stat_item { + MEMCG_SWAP = 41, + MEMCG_SOCK = 42, + MEMCG_PERCPU_B = 43, + MEMCG_VMALLOC = 44, + MEMCG_KMEM = 45, + MEMCG_ZSWAP_B = 46, + MEMCG_ZSWAPPED = 47, + MEMCG_NR_STAT = 48, +}; + +struct trace_event_raw_percpu_alloc_percpu { + struct trace_entry ent; + unsigned long call_site; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + void *base_addr; + int off; + void __attribute__((btf_type_tag("percpu"))) * ptr; + size_t bytes_alloc; + unsigned long gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_percpu_free_percpu { + struct trace_entry ent; + void *base_addr; + int off; + void __attribute__((btf_type_tag("percpu"))) * ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu_fail { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + char __data[0]; +}; + +struct trace_event_raw_percpu_create_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_raw_percpu_destroy_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct pcpu_group_info { + int nr_units; + unsigned long base_offset; + unsigned int *cpu_map; +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[0]; +}; + +struct trace_event_data_offsets_percpu_alloc_percpu {}; + +struct trace_event_data_offsets_percpu_free_percpu {}; + +struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; + +struct trace_event_data_offsets_percpu_create_chunk {}; + +struct trace_event_data_offsets_percpu_destroy_chunk {}; + +typedef void (*btf_trace_kmem_cache_alloc)(void *, unsigned long, const void *, + struct kmem_cache *, gfp_t, int); + +typedef void (*btf_trace_kmalloc)(void *, unsigned long, const void *, size_t, size_t, + gfp_t, int); + +typedef void (*btf_trace_kfree)(void *, unsigned long, const void *); + +typedef void (*btf_trace_kmem_cache_free)(void *, unsigned long, const void *, + const struct kmem_cache *); + +typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); + +typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); + +typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); + +typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, + int, int); + +typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); + +typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int); + +struct kmalloc_info_struct { + const char *name[19]; + unsigned int size; +}; + +enum slab_state { + DOWN = 0, + PARTIAL = 1, + PARTIAL_NODE = 2, + UP = 3, + FULL = 4, +}; + +struct trace_event_raw_kmem_cache_alloc { + struct trace_entry ent; + unsigned long call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + unsigned long gfp_flags; + int node; + bool accounted; + char __data[0]; +}; + +struct trace_event_raw_kmalloc { + struct trace_entry ent; + unsigned long call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + unsigned long gfp_flags; + int node; + char __data[0]; +}; + +struct trace_event_raw_kfree { + struct trace_entry ent; + unsigned long call_site; + const void *ptr; + char __data[0]; +}; + +struct trace_event_raw_kmem_cache_free { + struct trace_entry ent; + unsigned long call_site; + const void *ptr; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free { + struct trace_entry ent; + unsigned long pfn; + unsigned int order; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free_batched { + struct trace_entry ent; + unsigned long pfn; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc { + struct trace_entry ent; + unsigned long pfn; + unsigned int order; + unsigned long gfp_flags; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page { + struct trace_entry ent; + unsigned long pfn; + unsigned int order; + int migratetype; + int percpu_refill; + char __data[0]; +}; + +struct trace_event_raw_mm_page_pcpu_drain { + struct trace_entry ent; + unsigned long pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc_extfrag { + struct trace_entry ent; + unsigned long pfn; + int alloc_order; + int fallback_order; + int alloc_migratetype; + int fallback_migratetype; + int change_ownership; + char __data[0]; +}; + +struct trace_event_raw_rss_stat { + struct trace_entry ent; + unsigned int mm_id; + unsigned int curr; + int member; + long size; + char __data[0]; +}; + +struct trace_event_data_offsets_kmem_cache_free { + u32 name; +}; + +struct kmem_obj_info { + void *kp_ptr; + struct slab *kp_slab; + void *kp_objp; + unsigned long kp_data_offset; + struct kmem_cache *kp_slab_cache; + void *kp_ret; + void *kp_stack[16]; + void *kp_free_stack[16]; +}; + +struct slabinfo { + unsigned long active_objs; + unsigned long num_objs; + unsigned long active_slabs; + unsigned long num_slabs; + unsigned long shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +struct trace_event_data_offsets_kmem_cache_alloc {}; + +struct trace_event_data_offsets_kmalloc {}; + +struct trace_event_data_offsets_kfree {}; + +struct trace_event_data_offsets_mm_page_free {}; + +struct trace_event_data_offsets_mm_page_free_batched {}; + +struct trace_event_data_offsets_mm_page_alloc {}; + +struct trace_event_data_offsets_mm_page {}; + +struct trace_event_data_offsets_mm_page_pcpu_drain {}; + +struct trace_event_data_offsets_mm_page_alloc_extfrag {}; + +struct trace_event_data_offsets_rss_stat {}; + +typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, unsigned long, + unsigned long, unsigned long, + unsigned long); + +typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, unsigned long, unsigned long, + unsigned long, unsigned long); + +typedef void (*btf_trace_mm_compaction_fast_isolate_freepages)(void *, unsigned long, + unsigned long, unsigned long, + unsigned long); + +typedef void (*btf_trace_mm_compaction_migratepages)(void *, struct compact_control *, + unsigned int); + +typedef void (*btf_trace_mm_compaction_begin)(void *, struct compact_control *, + unsigned long, unsigned long, bool); + +typedef void (*btf_trace_mm_compaction_end)(void *, struct compact_control *, + unsigned long, unsigned long, bool, int); + +typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); + +typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); + +typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); + +typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); + +enum pageblock_bits { + PB_migrate = 0, + PB_migrate_end = 2, + PB_migrate_skip = 3, + NR_PAGEBLOCK_BITS = 4, +}; + +typedef unsigned int isolate_mode_t; + +struct trace_event_raw_mm_compaction_isolate_template { + struct trace_entry ent; + unsigned long start_pfn; + unsigned long end_pfn; + unsigned long nr_scanned; + unsigned long nr_taken; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_migratepages { + struct trace_entry ent; + unsigned long nr_migrated; + unsigned long nr_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_begin { + struct trace_entry ent; + unsigned long zone_start; + unsigned long migrate_pfn; + unsigned long free_pfn; + unsigned long zone_end; + bool sync; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_end { + struct trace_entry ent; + unsigned long zone_start; + unsigned long migrate_pfn; + unsigned long free_pfn; + unsigned long zone_end; + bool sync; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_try_to_compact_pages { + struct trace_entry ent; + int order; + unsigned long gfp_mask; + int prio; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_suitable_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_defer_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + unsigned int considered; + unsigned int defer_shift; + int order_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_kcompactd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_kcompactd_wake_template { + struct trace_entry ent; + int nid; + int order; + enum zone_type highest_zoneidx; + char __data[0]; +}; + +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *, struct page *, enum migrate_mode); + void (*putback_page)(struct page *); +}; + +typedef enum { + ISOLATE_ABORT = 0, + ISOLATE_NONE = 1, + ISOLATE_SUCCESS = 2, +} isolate_migrate_t; + +struct trace_event_data_offsets_mm_compaction_isolate_template {}; + +struct trace_event_data_offsets_mm_compaction_migratepages {}; + +struct trace_event_data_offsets_mm_compaction_begin {}; + +struct trace_event_data_offsets_mm_compaction_end {}; + +struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; + +struct trace_event_data_offsets_mm_compaction_suitable_template {}; + +struct trace_event_data_offsets_mm_compaction_defer_template {}; + +struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; + +struct trace_event_data_offsets_kcompactd_wake_template {}; + +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type highest_zoneidx; + bool spread_dirty_pages; +}; + +struct node { + struct device dev; + struct list_head access_list; + struct list_head cache_attrs; + struct device *cache_dev; +}; + +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; + struct rb_node rb; + unsigned long rb_subtree_last; +}; + +enum lru_status { + LRU_REMOVED = 0, + LRU_REMOVED_RETRY = 1, + LRU_ROTATE = 2, + LRU_SKIP = 3, + LRU_RETRY = 4, +}; + +struct list_lru_memcg { + struct callback_head rcu; + struct list_lru_one node[0]; +}; + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, + spinlock_t *, void *); + +struct list_lru_memcg_table { + struct list_lru_memcg *mlru; + struct mem_cgroup *memcg; +}; + +enum { + FOLL_TOUCH = 65536, + FOLL_TRIED = 131072, + FOLL_REMOTE = 262144, + FOLL_PIN = 524288, + FOLL_FAST_ONLY = 1048576, + FOLL_UNLOCKABLE = 2097152, +}; + +struct follow_page_context { + struct dev_pagemap *pgmap; + unsigned int page_mask; +}; + +typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, + const char *, bool); + +typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, + const char *, bool, bool); + +struct memcg_path { + local_lock_t lock; + char __attribute__((btf_type_tag("rcu"))) * buf; + local_t buf_idx; +}; + +struct trace_event_raw_mmap_lock { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + char __data[0]; +}; + +struct trace_event_raw_mmap_lock_acquire_returned { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + bool success; + char __data[0]; +}; + +struct trace_event_data_offsets_mmap_lock { + u32 memcg_path; +}; + +struct trace_event_data_offsets_mmap_lock_acquire_returned { + u32 memcg_path; +}; + +typedef int rmap_t; + +typedef unsigned long pte_marker; + +typedef int (*pte_fn_t)(pte_t *, unsigned long, void *); + +typedef unsigned int pgtbl_mod_mask; + +struct copy_subpage_arg { + struct page *dst; + struct page *src; + struct vm_area_struct *vma; +}; + +struct mlock_fbatch { + local_lock_t lock; + struct folio_batch fbatch; +}; + +struct anon_vma_name { + struct kref kref; + char name[0]; +}; + +typedef void (*btf_trace_vm_unmapped_area)(void *, unsigned long, + struct vm_unmapped_area_info *); + +typedef void (*btf_trace_vma_mas_szero)(void *, struct maple_tree *, unsigned long, + unsigned long); + +typedef void (*btf_trace_vma_store)(void *, struct maple_tree *, struct vm_area_struct *); + +typedef void (*btf_trace_exit_mmap)(void *, struct mm_struct *); + +enum { + HUGETLB_SHMFS_INODE = 1, + HUGETLB_ANONHUGE_INODE = 2, +}; + +struct trace_event_raw_vm_unmapped_area { + struct trace_entry ent; + unsigned long addr; + unsigned long total_vm; + unsigned long flags; + unsigned long length; + unsigned long low_limit; + unsigned long high_limit; + unsigned long align_mask; + unsigned long align_offset; + char __data[0]; +}; + +struct trace_event_raw_vma_mas_szero { + struct trace_entry ent; + struct maple_tree *mt; + unsigned long start; + unsigned long end; + char __data[0]; +}; + +struct trace_event_raw_vma_store { + struct trace_entry ent; + struct maple_tree *mt; + struct vm_area_struct *vma; + unsigned long vm_start; + unsigned long vm_end; + char __data[0]; +}; + +struct trace_event_raw_exit_mmap { + struct trace_entry ent; + struct mm_struct *mm; + struct maple_tree *mt; + char __data[0]; +}; + +struct vma_prepare { + struct vm_area_struct *vma; + struct vm_area_struct *adj_next; + struct file *file; + struct address_space *mapping; + struct anon_vma *anon_vma; + struct vm_area_struct *insert; + struct vm_area_struct *remove; + struct vm_area_struct *remove2; +}; + +struct trace_event_data_offsets_vm_unmapped_area {}; + +struct trace_event_data_offsets_vma_mas_szero {}; + +struct trace_event_data_offsets_vma_store {}; + +struct trace_event_data_offsets_exit_mmap {}; + +enum pgt_entry { + NORMAL_PMD = 0, + HPAGE_PMD = 1, + NORMAL_PUD = 2, + HPAGE_PUD = 3, +}; + +typedef void (*btf_trace_tlb_flush)(void *, int, unsigned long); + +typedef void (*btf_trace_mm_migrate_pages)(void *, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, enum migrate_mode, int); + +typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); + +typedef void (*btf_trace_set_migration_pte)(void *, unsigned long, unsigned long, int); + +typedef void (*btf_trace_remove_migration_pte)(void *, unsigned long, unsigned long, int); + +enum hugetlb_page_flags { + HPG_restore_reserve = 0, + HPG_migratable = 1, + HPG_temporary = 2, + HPG_freed = 3, + HPG_vmemmap_optimized = 4, + HPG_raw_hwp_unreliable = 5, + __NR_HPAGEFLAGS = 6, +}; + +struct trace_event_raw_tlb_flush { + struct trace_entry ent; + int reason; + unsigned long pages; + char __data[0]; +}; + +struct trace_event_raw_mm_migrate_pages { + struct trace_entry ent; + unsigned long succeeded; + unsigned long failed; + unsigned long thp_succeeded; + unsigned long thp_failed; + unsigned long thp_split; + unsigned long large_folio_split; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_mm_migrate_pages_start { + struct trace_entry ent; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_migration_pte { + struct trace_entry ent; + unsigned long addr; + unsigned long pte; + int order; + char __data[0]; +}; + +struct rmap_walk_control { + void *arg; + bool try_lock; + bool contended; + bool (*rmap_one)(struct folio *, struct vm_area_struct *, unsigned long, void *); + int (*done)(struct folio *); + struct anon_vma *(*anon_lock)(struct folio *, struct rmap_walk_control *); + bool (*invalid_vma)(struct vm_area_struct *, void *); +}; + +struct trace_event_data_offsets_tlb_flush {}; + +struct trace_event_data_offsets_mm_migrate_pages {}; + +struct trace_event_data_offsets_mm_migrate_pages_start {}; + +struct trace_event_data_offsets_migration_pte {}; + +struct folio_referenced_arg { + int mapcount; + int referenced; + unsigned long vm_flags; + struct mem_cgroup *memcg; +}; + +typedef void (*btf_trace_alloc_vmap_area)(void *, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, int); + +typedef void (*btf_trace_purge_vmap_area_lazy)(void *, unsigned long, unsigned long, + unsigned int); + +typedef void (*btf_trace_free_vmap_area_noflush)(void *, unsigned long, unsigned long, + unsigned long); + +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; + +struct vmap_block_queue { + spinlock_t lock; + struct list_head free; + struct xarray vmap_blocks; +}; + +struct vmap_area { + unsigned long va_start; + unsigned long va_end; + struct rb_node rb_node; + struct list_head list; + union { + unsigned long subtree_max_size; + struct vm_struct *vm; + }; + unsigned long flags; +}; + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, + LE_FIT_TYPE = 2, + RE_FIT_TYPE = 3, + NE_FIT_TYPE = 4, +}; + +typedef unsigned int kasan_vmalloc_flags_t; + +struct trace_event_raw_alloc_vmap_area { + struct trace_entry ent; + unsigned long addr; + unsigned long size; + unsigned long align; + unsigned long vstart; + unsigned long vend; + int failed; + char __data[0]; +}; + +struct trace_event_raw_purge_vmap_area_lazy { + struct trace_entry ent; + unsigned long start; + unsigned long end; + unsigned int npurged; + char __data[0]; +}; + +struct trace_event_raw_free_vmap_area_noflush { + struct trace_entry ent; + unsigned long va_start; + unsigned long nr_lazy; + unsigned long nr_lazy_max; + char __data[0]; +}; + +struct vmap_block { + spinlock_t lock; + struct vmap_area *va; + unsigned long free; + unsigned long dirty; + unsigned long used_map[16]; + unsigned long dirty_min; + unsigned long dirty_max; + struct list_head free_list; + struct callback_head callback_head; + struct list_head purge; +}; + +struct trace_event_data_offsets_alloc_vmap_area {}; + +struct trace_event_data_offsets_purge_vmap_area_lazy {}; + +struct trace_event_data_offsets_free_vmap_area_noflush {}; + +typedef int fpi_t; + +struct page_frag_cache { + void *va; + __u16 offset; + __u16 size; + unsigned int pagecnt_bias; + bool pfmemalloc; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +typedef void (*online_page_callback_t)(struct page *, unsigned int); + +enum { + MMOP_OFFLINE = 0, + MMOP_ONLINE = 1, + MMOP_ONLINE_KERNEL = 2, + MMOP_ONLINE_MOVABLE = 3, +}; + +enum { + ONLINE_POLICY_CONTIG_ZONES = 0, + ONLINE_POLICY_AUTO_MOVABLE = 1, +}; + +enum { + MEMMAP_ON_MEMORY_DISABLE = 0, + MEMMAP_ON_MEMORY_ENABLE = 1, + MEMMAP_ON_MEMORY_FORCE = 2, +}; + +typedef int mhp_t; + +struct memory_group; + +struct memory_block { + unsigned long start_section_nr; + unsigned long state; + int online_type; + int nid; + struct zone *zone; + struct device dev; + struct vmem_altmap *altmap; + struct memory_group *group; + struct list_head group_next; + atomic_long_t nr_hwpoison; +}; + +struct memory_group { + int nid; + struct list_head memory_blocks; + unsigned long present_kernel_pages; + unsigned long present_movable_pages; + bool is_dynamic; + union { + struct { + unsigned long max_pages; + } s; + struct { + unsigned long unit_pages; + } d; + }; +}; + +struct memory_notify { + unsigned long start_pfn; + unsigned long nr_pages; + int status_change_nid_normal; + int status_change_nid; +}; + +typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); + +struct auto_movable_stats { + unsigned long kernel_early_pages; + unsigned long movable_pages; +}; + +typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *); + +struct auto_movable_group_stats { + unsigned long movable_pages; + unsigned long req_kernel_early_pages; +}; + +struct madvise_walk_private { + struct mmu_gather *tlb; + bool pageout; +}; + +struct swap_iocb { + struct kiocb iocb; + struct bio_vec bvec[32]; + int pages; + int len; +}; + +struct vma_swap_readahead { + unsigned short win; + unsigned short offset; + unsigned short nr_pte; +}; + +enum { + PERCPU_REF_INIT_ATOMIC = 1, + PERCPU_REF_INIT_DEAD = 2, + PERCPU_REF_ALLOW_REINIT = 4, +}; + +struct swap_extent { + struct rb_node rb_node; + unsigned long start_page; + unsigned long nr_pages; + sector_t start_block; +}; + +union swap_header { + struct { + char reserved[4086]; + char magic[10]; + } magic; + struct { + char bootbits[1024]; + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; + swp_entry_t *slots_ret; + int n_ret; +}; + +struct zswap_tree { + struct rb_root rbroot; + spinlock_t lock; +}; + +enum zswap_init_type { + ZSWAP_UNINIT = 0, + ZSWAP_INIT_SUCCEED = 1, + ZSWAP_INIT_FAILED = 2, +}; + +enum zpool_mapmode { + ZPOOL_MM_RW = 0, + ZPOOL_MM_RO = 1, + ZPOOL_MM_WO = 2, + ZPOOL_MM_DEFAULT = 0, +}; + +struct crypto_wait { + struct completion completion; + int err; +}; + +struct crypto_acomp; + +struct acomp_req; + +struct crypto_acomp_ctx { + struct crypto_acomp *acomp; + struct acomp_req *req; + struct crypto_wait wait; + u8 *dstmem; + struct mutex *mutex; +}; + +struct crypto_acomp { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + unsigned int reqsize; + struct crypto_tfm base; +}; + +typedef void (*crypto_completion_t)(void *, int); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + u32 flags; +}; + +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + void *__ctx[0]; +}; + +struct zswap_pool; + +struct zswap_entry { + struct rb_node rbnode; + swp_entry_t swpentry; + int refcount; + unsigned int length; + struct zswap_pool *pool; + union { + unsigned long handle; + unsigned long value; + }; + struct obj_cgroup *objcg; + struct list_head lru; +}; + +struct zpool; + +struct zswap_pool { + struct zpool *zpools[32]; + struct crypto_acomp_ctx __attribute__((btf_type_tag("percpu"))) * acomp_ctx; + struct kref kref; + struct list_head list; + struct work_struct release_work; + struct work_struct shrink_work; + struct hlist_node node; + char tfm_name[128]; + struct list_head lru; + spinlock_t lru_lock; +}; + +struct comp_alg_common { + struct crypto_alg base; +}; + +struct zpool_driver; + +struct zpool { + struct zpool_driver *driver; + void *pool; +}; + +struct zpool_driver { + char *type; + struct module *owner; + atomic_t refcount; + struct list_head list; + void *(*create)(const char *, gfp_t); + void (*destroy)(void *); + bool malloc_support_movable; + int (*malloc)(void *, size_t, gfp_t, unsigned long *); + void (*free)(void *, unsigned long); + bool sleep_mapped; + void *(*map)(void *, unsigned long, enum zpool_mapmode); + void (*unmap)(void *, unsigned long); + u64 (*total_size)(void *); +}; + +struct dma_page { + struct list_head page_list; + void *vaddr; + dma_addr_t dma; +}; + +struct dma_block; + +struct dma_pool { + struct list_head page_list; + spinlock_t lock; + struct dma_block *next_block; + size_t nr_blocks; + size_t nr_active; + size_t nr_pages; + struct device *dev; + unsigned int size; + unsigned int allocation; + unsigned int boundary; + char name[32]; + struct list_head pools; +}; + +struct dma_block { + struct dma_block *next_block; + dma_addr_t dma; +}; + +struct node_hstate { + struct kobject *hugepages_kobj; + struct kobject *hstate_kobjs[2]; +}; + +enum vma_resv_mode { + VMA_NEEDS_RESV = 0, + VMA_COMMIT_RESV = 1, + VMA_END_RESV = 2, + VMA_ADD_RESV = 3, + VMA_DEL_RESV = 4, +}; + +enum string_size_units { + STRING_UNITS_10 = 0, + STRING_UNITS_2 = 1, +}; + +struct hugetlb_vma_lock { + struct kref refs; + struct rw_semaphore rw_sema; + struct vm_area_struct *vma; +}; + +struct resv_map { + struct kref refs; + spinlock_t lock; + struct list_head regions; + long adds_in_progress; + struct list_head region_cache; + long region_cache_count; + struct rw_semaphore rw_sema; + struct page_counter *reservation_counter; + unsigned long pages_per_hpage; + struct cgroup_subsys_state *css; +}; + +struct file_region { + struct list_head link; + long from; + long to; + struct page_counter *reservation_counter; + struct cgroup_subsys_state *css; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +}; + +struct hugetlb_cgroup_per_node; + +struct hugetlb_cgroup { + struct cgroup_subsys_state css; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct page_counter hugepage[2]; + struct page_counter rsvd_hugepage[2]; + atomic_long_t events[2]; + atomic_long_t events_local[2]; + struct cgroup_file events_file[2]; + struct cgroup_file events_local_file[2]; + struct hugetlb_cgroup_per_node *nodeinfo[0]; +}; + +struct hugetlb_cgroup_per_node { + unsigned long usage[2]; +}; + +struct vmemmap_remap_walk { + void (*remap_pte)(pte_t *, unsigned long, struct vmemmap_remap_walk *); + unsigned long nr_walked; + struct page *reuse_page; + unsigned long reuse_addr; + struct list_head *vmemmap_pages; + unsigned long flags; +}; + +struct mempolicy_operations { + int (*create)(struct mempolicy *, const nodemask_t *); + void (*rebind)(struct mempolicy *, const nodemask_t *); +}; + +struct sp_node { + struct rb_node nd; + unsigned long start; + unsigned long end; + struct mempolicy *policy; +}; + +struct migration_mpol { + struct mempolicy *pol; + unsigned long ilx; +}; + +struct queue_pages { + struct list_head *pagelist; + unsigned long flags; + nodemask_t *nmask; + unsigned long start; + unsigned long end; + struct vm_area_struct *first; + struct folio *large; + long nr_failed; +}; + +struct nodemask_scratch { + nodemask_t mask1; + nodemask_t mask2; +}; + +struct mmu_notifier_subscriptions { + struct hlist_head list; + bool has_itree; + spinlock_t lock; + unsigned long invalidate_seq; + unsigned long active_invalidate_ranges; + struct rb_root_cached itree; + wait_queue_head_t wq; + struct hlist_head deferred_list; +}; + +struct mmu_interval_notifier_ops; + +struct mmu_interval_notifier { + struct interval_tree_node interval_tree; + const struct mmu_interval_notifier_ops *ops; + struct mm_struct *mm; + struct hlist_node deferred_item; + unsigned long invalidate_seq; +}; + +struct mmu_interval_notifier_ops { + bool (*invalidate)(struct mmu_interval_notifier *, + const struct mmu_notifier_range *, unsigned long); +}; + +typedef void (*btf_trace_ksm_start_scan)(void *, int, u32); + +typedef void (*btf_trace_ksm_stop_scan)(void *, int, u32); + +typedef void (*btf_trace_ksm_enter)(void *, void *); + +typedef void (*btf_trace_ksm_exit)(void *, void *); + +typedef void (*btf_trace_ksm_merge_one_page)(void *, unsigned long, void *, void *, int); + +typedef void (*btf_trace_ksm_merge_with_ksm_page)(void *, void *, unsigned long, void *, + void *, int); + +typedef void (*btf_trace_ksm_remove_ksm_page)(void *, unsigned long); + +typedef void (*btf_trace_ksm_remove_rmap_item)(void *, unsigned long, void *, void *); + +struct mm_slot { + struct hlist_node hash; + struct list_head mm_node; + struct mm_struct *mm; +}; + +struct ksm_rmap_item; + +struct ksm_mm_slot { + struct mm_slot slot; + struct ksm_rmap_item *rmap_list; +}; + +typedef u8 rmap_age_t; + +struct ksm_stable_node; + +struct ksm_rmap_item { + struct ksm_rmap_item *rmap_list; + union { + struct anon_vma *anon_vma; + int nid; + }; + struct mm_struct *mm; + unsigned long address; + unsigned int oldchecksum; + rmap_age_t age; + rmap_age_t remaining_skips; + union { + struct rb_node node; + struct { + struct ksm_stable_node *head; + struct hlist_node hlist; + }; + }; +}; + +struct ksm_stable_node { + union { + struct rb_node node; + struct { + struct list_head *head; + struct { + struct hlist_node hlist_dup; + struct list_head list; + }; + }; + }; + struct hlist_head hlist; + union { + unsigned long kpfn; + unsigned long chain_prune_time; + }; + int rmap_hlist_len; + int nid; +}; + +struct ksm_scan { + struct ksm_mm_slot *mm_slot; + unsigned long address; + struct ksm_rmap_item **rmap_list; + unsigned long seqnr; +}; + +enum get_ksm_page_flags { + GET_KSM_PAGE_NOLOCK = 0, + GET_KSM_PAGE_LOCK = 1, + GET_KSM_PAGE_TRYLOCK = 2, +}; + +struct trace_event_raw_ksm_scan_template { + struct trace_entry ent; + int seq; + u32 rmap_entries; + char __data[0]; +}; + +struct trace_event_raw_ksm_enter_exit_template { + struct trace_entry ent; + void *mm; + char __data[0]; +}; + +struct trace_event_raw_ksm_merge_one_page { + struct trace_entry ent; + unsigned long pfn; + void *rmap_item; + void *mm; + int err; + char __data[0]; +}; + +struct trace_event_raw_ksm_merge_with_ksm_page { + struct trace_entry ent; + void *ksm_page; + unsigned long pfn; + void *rmap_item; + void *mm; + int err; + char __data[0]; +}; + +struct trace_event_raw_ksm_remove_ksm_page { + struct trace_entry ent; + unsigned long pfn; + char __data[0]; +}; + +struct trace_event_raw_ksm_remove_rmap_item { + struct trace_entry ent; + unsigned long pfn; + void *rmap_item; + void *mm; + char __data[0]; +}; + +struct trace_event_data_offsets_ksm_scan_template {}; + +struct trace_event_data_offsets_ksm_enter_exit_template {}; + +struct trace_event_data_offsets_ksm_merge_one_page {}; + +struct trace_event_data_offsets_ksm_merge_with_ksm_page {}; + +struct trace_event_data_offsets_ksm_remove_ksm_page {}; + +struct trace_event_data_offsets_ksm_remove_rmap_item {}; + +struct slub_flush_work { + struct work_struct work; + struct kmem_cache *s; + bool skip; +}; + +struct slab_attribute { + struct attribute attr; + ssize_t (*show)(struct kmem_cache *, char *); + ssize_t (*store)(struct kmem_cache *, const char *, size_t); +}; + +struct saved_alias { + struct kmem_cache *s; + const char *name; + struct saved_alias *next; +}; + +enum track_item { + TRACK_ALLOC = 0, + TRACK_FREE = 1, +}; + +enum stat_item { + ALLOC_FASTPATH = 0, + ALLOC_SLOWPATH = 1, + FREE_FASTPATH = 2, + FREE_SLOWPATH = 3, + FREE_FROZEN = 4, + FREE_ADD_PARTIAL = 5, + FREE_REMOVE_PARTIAL = 6, + ALLOC_FROM_PARTIAL = 7, + ALLOC_SLAB = 8, + ALLOC_REFILL = 9, + ALLOC_NODE_MISMATCH = 10, + FREE_SLAB = 11, + CPUSLAB_FLUSH = 12, + DEACTIVATE_FULL = 13, + DEACTIVATE_EMPTY = 14, + DEACTIVATE_TO_HEAD = 15, + DEACTIVATE_TO_TAIL = 16, + DEACTIVATE_REMOTE_FREES = 17, + DEACTIVATE_BYPASS = 18, + ORDER_FALLBACK = 19, + CMPXCHG_DOUBLE_CPU_FAIL = 20, + CMPXCHG_DOUBLE_FAIL = 21, + CPU_PARTIAL_ALLOC = 22, + CPU_PARTIAL_FREE = 23, + CPU_PARTIAL_NODE = 24, + CPU_PARTIAL_DRAIN = 25, + NR_SLUB_STAT_ITEMS = 26, +}; + +enum slab_modes { + M_NONE = 0, + M_PARTIAL = 1, + M_FREE = 2, + M_FULL_NOLIST = 3, +}; + +enum slab_stat_type { + SL_ALL = 0, + SL_PARTIAL = 1, + SL_CPU = 2, + SL_OBJECTS = 3, + SL_TOTAL = 4, +}; + +typedef struct { + unsigned long v; +} freeptr_t; + +typedef u32 depot_stack_handle_t; + +struct location { + depot_stack_handle_t handle; + unsigned long count; + unsigned long addr; + unsigned long waste; + long long sum_time; + long min_time; + long max_time; + long min_pid; + long max_pid; + unsigned long cpus[1]; + nodemask_t nodes; +}; + +struct track { + unsigned long addr; + depot_stack_handle_t handle; + int cpu; + int pid; + unsigned long when; +}; + +struct detached_freelist { + struct slab *slab; + void *tail; + void *freelist; + int cnt; + struct kmem_cache *s; +}; + +union __u128_halves { + u128 full; + struct { + u64 low; + u64 high; + }; +}; + +struct partial_context { + struct slab **slab; + gfp_t flags; + unsigned int orig_size; +}; + +struct loc_track { + unsigned long max; + unsigned long count; + struct location *loc; + loff_t idx; +}; + +enum bh_state_bits { + BH_Uptodate = 0, + BH_Dirty = 1, + BH_Lock = 2, + BH_Req = 3, + BH_Mapped = 4, + BH_New = 5, + BH_Async_Read = 6, + BH_Async_Write = 7, + BH_Delay = 8, + BH_Boundary = 9, + BH_Write_EIO = 10, + BH_Unwritten = 11, + BH_Quiet = 12, + BH_Meta = 13, + BH_Prio = 14, + BH_Defer_Completion = 15, + BH_PrivateStart = 16, +}; + +enum { + PAGE_WAS_MAPPED = 1, + PAGE_WAS_MLOCKED = 2, + PAGE_OLD_STATES = 3, +}; + +struct buffer_head; + +typedef void bh_end_io_t(struct buffer_head *, int); + +struct buffer_head { + unsigned long b_state; + struct buffer_head *b_this_page; + union { + struct page *b_page; + struct folio *b_folio; + }; + sector_t b_blocknr; + size_t b_size; + char *b_data; + struct block_device *b_bdev; + bh_end_io_t *b_end_io; + void *b_private; + struct list_head b_assoc_buffers; + struct address_space *b_assoc_map; + atomic_t b_count; + spinlock_t b_uptodate_lock; +}; + +struct migrate_pages_stats { + int nr_succeeded; + int nr_failed_pages; + int nr_thp_succeeded; + int nr_thp_failed; + int nr_thp_split; + int nr_split; +}; + +struct memory_dev_type; + +struct node_memory_type_map { + struct memory_dev_type *memtype; + int map_count; +}; + +struct memory_dev_type { + struct list_head tier_sibling; + struct list_head list; + int adistance; + nodemask_t nodes; + struct kref kref; +}; + +struct demotion_nodes { + nodemask_t preferred; +}; + +struct node_hmem_attrs { + unsigned int read_bandwidth; + unsigned int write_bandwidth; + unsigned int read_latency; + unsigned int write_latency; +}; + +enum migrate_vma_direction { + MIGRATE_VMA_SELECT_SYSTEM = 1, + MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 2, + MIGRATE_VMA_SELECT_DEVICE_COHERENT = 4, +}; + +struct migrate_vma { + struct vm_area_struct *vma; + unsigned long *dst; + unsigned long *src; + unsigned long cpages; + unsigned long npages; + unsigned long start; + unsigned long end; + void *pgmap_owner; + unsigned long flags; + struct page *fault_page; +}; + +typedef void (*btf_trace_hugepage_set_pmd)(void *, unsigned long, unsigned long); + +typedef void (*btf_trace_hugepage_set_pud)(void *, unsigned long, unsigned long); + +typedef void (*btf_trace_hugepage_update_pmd)(void *, unsigned long, unsigned long, + unsigned long, unsigned long); + +typedef void (*btf_trace_hugepage_update_pud)(void *, unsigned long, unsigned long, + unsigned long, unsigned long); + +typedef void (*btf_trace_set_migration_pmd)(void *, unsigned long, unsigned long); + +typedef void (*btf_trace_remove_migration_pmd)(void *, unsigned long, unsigned long); + +enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_UNSUPPORTED = 0, + TRANSPARENT_HUGEPAGE_FLAG = 1, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG = 2, + TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG = 3, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG = 4, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG = 5, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG = 6, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG = 7, + TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG = 8, +}; + +struct trace_event_raw_hugepage_set { + struct trace_entry ent; + unsigned long addr; + unsigned long pte; + char __data[0]; +}; + +struct trace_event_raw_hugepage_update { + struct trace_entry ent; + unsigned long addr; + unsigned long pte; + unsigned long clr; + unsigned long set; + char __data[0]; +}; + +struct trace_event_raw_migration_pmd { + struct trace_entry ent; + unsigned long addr; + unsigned long pmd; + char __data[0]; +}; + +struct trace_event_data_offsets_hugepage_set {}; + +struct trace_event_data_offsets_hugepage_update {}; + +struct trace_event_data_offsets_migration_pmd {}; + +typedef void (*btf_trace_mm_khugepaged_scan_pmd)(void *, struct mm_struct *, + struct page *, bool, int, int, int, int); + +typedef void (*btf_trace_mm_collapse_huge_page)(void *, struct mm_struct *, int, int); + +typedef void (*btf_trace_mm_collapse_huge_page_isolate)(void *, struct page *, int, int, + bool, int); + +typedef void (*btf_trace_mm_collapse_huge_page_swapin)(void *, struct mm_struct *, int, + int, int); + +typedef void (*btf_trace_mm_khugepaged_scan_file)(void *, struct mm_struct *, struct page *, + struct file *, int, int, int); + +typedef void (*btf_trace_mm_khugepaged_collapse_file)(void *, struct mm_struct *, + struct page *, unsigned long, bool, + unsigned long, struct file *, int, int); + +struct collapse_control { + bool is_khugepaged; + u32 node_load[64]; + nodemask_t alloc_nmask; +}; + +struct khugepaged_mm_slot; + +struct khugepaged_scan { + struct list_head mm_head; + struct khugepaged_mm_slot *mm_slot; + unsigned long address; +}; + +struct khugepaged_mm_slot { + struct mm_slot slot; +}; + +enum scan_result { + SCAN_FAIL = 0, + SCAN_SUCCEED = 1, + SCAN_PMD_NULL = 2, + SCAN_PMD_NONE = 3, + SCAN_PMD_MAPPED = 4, + SCAN_EXCEED_NONE_PTE = 5, + SCAN_EXCEED_SWAP_PTE = 6, + SCAN_EXCEED_SHARED_PTE = 7, + SCAN_PTE_NON_PRESENT = 8, + SCAN_PTE_UFFD_WP = 9, + SCAN_PTE_MAPPED_HUGEPAGE = 10, + SCAN_PAGE_RO = 11, + SCAN_LACK_REFERENCED_PAGE = 12, + SCAN_PAGE_NULL = 13, + SCAN_SCAN_ABORT = 14, + SCAN_PAGE_COUNT = 15, + SCAN_PAGE_LRU = 16, + SCAN_PAGE_LOCK = 17, + SCAN_PAGE_ANON = 18, + SCAN_PAGE_COMPOUND = 19, + SCAN_ANY_PROCESS = 20, + SCAN_VMA_NULL = 21, + SCAN_VMA_CHECK = 22, + SCAN_ADDRESS_RANGE = 23, + SCAN_DEL_PAGE_LRU = 24, + SCAN_ALLOC_HUGE_PAGE_FAIL = 25, + SCAN_CGROUP_CHARGE_FAIL = 26, + SCAN_TRUNCATED = 27, + SCAN_PAGE_HAS_PRIVATE = 28, + SCAN_STORE_FAILED = 29, + SCAN_COPY_MC = 30, + SCAN_PAGE_FILLED = 31, +}; + +struct trace_event_raw_mm_khugepaged_scan_pmd { + struct trace_entry ent; + struct mm_struct *mm; + unsigned long pfn; + bool writable; + int referenced; + int none_or_zero; + int status; + int unmapped; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page { + struct trace_entry ent; + struct mm_struct *mm; + int isolated; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page_isolate { + struct trace_entry ent; + unsigned long pfn; + int none_or_zero; + int referenced; + bool writable; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page_swapin { + struct trace_entry ent; + struct mm_struct *mm; + int swapped_in; + int referenced; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_khugepaged_scan_file { + struct trace_entry ent; + struct mm_struct *mm; + unsigned long pfn; + u32 __data_loc_filename; + int present; + int swap; + int result; + char __data[0]; +}; + +struct trace_event_raw_mm_khugepaged_collapse_file { + struct trace_entry ent; + struct mm_struct *mm; + unsigned long hpfn; + unsigned long index; + unsigned long addr; + bool is_shmem; + u32 __data_loc_filename; + int nr; + int result; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_khugepaged_scan_file { + u32 filename; +}; + +struct trace_event_data_offsets_mm_khugepaged_collapse_file { + u32 filename; +}; + +struct trace_event_data_offsets_mm_khugepaged_scan_pmd {}; + +struct trace_event_data_offsets_mm_collapse_huge_page {}; + +struct trace_event_data_offsets_mm_collapse_huge_page_isolate {}; + +struct trace_event_data_offsets_mm_collapse_huge_page_swapin {}; + +struct mem_cgroup_tree_per_node; + +struct mem_cgroup_tree { + struct mem_cgroup_tree_per_node *rb_tree_per_node[64]; +}; + +struct mem_cgroup_tree_per_node { + struct rb_root rb_root; + struct rb_node *rb_rightmost; + spinlock_t lock; +}; + +struct memory_stat { + const char *name; + unsigned int idx; +}; + +struct move_charge_struct { + spinlock_t lock; + struct mm_struct *mm; + struct mem_cgroup *from; + struct mem_cgroup *to; + unsigned long flags; + unsigned long precharge; + unsigned long moved_charge; + unsigned long moved_swap; + struct task_struct *moving_task; + wait_queue_head_t waitq; +}; + +struct memcg_stock_pcp { + local_lock_t stock_lock; + struct mem_cgroup *cached; + unsigned int nr_pages; + struct obj_cgroup *cached_objcg; + struct pglist_data *cached_pgdat; + unsigned int nr_bytes; + int nr_slab_reclaimable_b; + int nr_slab_unreclaimable_b; + struct work_struct work; + unsigned long flags; +}; + +struct numa_stat { + const char *name; + unsigned int lru_mask; +}; + +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH = 0, + MEM_CGROUP_TARGET_SOFTLIMIT = 1, + MEM_CGROUP_NTARGETS = 2, +}; + +enum mc_target_type { + MC_TARGET_NONE = 0, + MC_TARGET_PAGE = 1, + MC_TARGET_SWAP = 2, + MC_TARGET_DEVICE = 3, +}; + +enum res_type { + _MEM = 0, + _MEMSWAP = 1, + _KMEM = 2, + _TCP = 3, +}; + +enum { + RES_USAGE = 0, + RES_LIMIT = 1, + RES_MAX_USAGE = 2, + RES_FAILCNT = 3, + RES_SOFT_LIMIT = 4, +}; + +struct oom_wait_info { + struct mem_cgroup *memcg; + wait_queue_entry_t wait; +}; + +struct mem_cgroup_eventfd_list { + struct list_head list; + struct eventfd_ctx *eventfd; +}; + +struct mem_cgroup_event { + struct mem_cgroup *memcg; + struct eventfd_ctx *eventfd; + struct list_head list; + int (*register_event)(struct mem_cgroup *, struct eventfd_ctx *, const char *); + void (*unregister_event)(struct mem_cgroup *, struct eventfd_ctx *); + poll_table pt; + wait_queue_head_t *wqh; + wait_queue_entry_t wait; + struct work_struct remove; +}; + +struct uncharge_gather { + struct mem_cgroup *memcg; + unsigned long nr_memory; + unsigned long pgpgout; + unsigned long nr_kmem; + int nid; +}; + +union mc_target { + struct page *page; + swp_entry_t ent; +}; + +enum vmpressure_levels { + VMPRESSURE_LOW = 0, + VMPRESSURE_MEDIUM = 1, + VMPRESSURE_CRITICAL = 2, + VMPRESSURE_NUM_LEVELS = 3, +}; + +enum vmpressure_modes { + VMPRESSURE_NO_PASSTHROUGH = 0, + VMPRESSURE_HIERARCHY = 1, + VMPRESSURE_LOCAL = 2, + VMPRESSURE_NUM_MODES = 3, +}; + +struct vmpressure_event { + struct eventfd_ctx *efd; + enum vmpressure_levels level; + enum vmpressure_modes mode; + struct list_head node; +}; + +struct swap_cgroup_ctrl { + struct page **map; + unsigned long length; + spinlock_t lock; +}; + +struct swap_cgroup { + unsigned short id; +}; + +enum hugetlb_memory_event { + HUGETLB_MAX = 0, + HUGETLB_NR_MEMORY_EVENTS = 1, +}; + +enum { + RES_USAGE___2 = 0, + RES_RSVD_USAGE = 1, + RES_LIMIT___2 = 2, + RES_RSVD_LIMIT = 3, + RES_MAX_USAGE___2 = 4, + RES_RSVD_MAX_USAGE = 5, + RES_FAILCNT___2 = 6, + RES_RSVD_FAILCNT = 7, +}; + +struct memory_failure_entry { + unsigned long pfn; + int flags; +}; + +struct memory_failure_cpu { + struct { + union { + struct __kfifo kfifo; + struct memory_failure_entry *type; + const struct memory_failure_entry *const_type; + char (*rectype)[0]; + struct memory_failure_entry *ptr; + const struct memory_failure_entry *ptr_const; + }; + struct memory_failure_entry buf[16]; + } fifo; + spinlock_t lock; + struct work_struct work; +}; + +enum mf_action_page_type { + MF_MSG_KERNEL = 0, + MF_MSG_KERNEL_HIGH_ORDER = 1, + MF_MSG_SLAB = 2, + MF_MSG_DIFFERENT_COMPOUND = 3, + MF_MSG_HUGE = 4, + MF_MSG_FREE_HUGE = 5, + MF_MSG_UNMAP_FAILED = 6, + MF_MSG_DIRTY_SWAPCACHE = 7, + MF_MSG_CLEAN_SWAPCACHE = 8, + MF_MSG_DIRTY_MLOCKED_LRU = 9, + MF_MSG_CLEAN_MLOCKED_LRU = 10, + MF_MSG_DIRTY_UNEVICTABLE_LRU = 11, + MF_MSG_CLEAN_UNEVICTABLE_LRU = 12, + MF_MSG_DIRTY_LRU = 13, + MF_MSG_CLEAN_LRU = 14, + MF_MSG_TRUNCATED_LRU = 15, + MF_MSG_BUDDY = 16, + MF_MSG_DAX = 17, + MF_MSG_UNSPLIT_THP = 18, + MF_MSG_UNKNOWN = 19, +}; + +struct page_state { + unsigned long mask; + unsigned long res; + enum mf_action_page_type type; + int (*action)(struct page_state *, struct page *); +}; + +enum mf_result { + MF_IGNORED = 0, + MF_FAILED = 1, + MF_DELAYED = 2, + MF_RECOVERED = 3, +}; + +struct raw_hwp_page { + struct llist_node node; + struct page *page; +}; + +struct to_kill { + struct list_head nd; + struct task_struct *tsk; + unsigned long addr; + short size_shift; +}; + +typedef unsigned long dax_entry_t; + +struct hwpoison_walk { + struct to_kill tk; + unsigned long pfn; + int flags; +}; + +typedef void (*btf_trace_test_pages_isolated)(void *, unsigned long, unsigned long, + unsigned long); + +struct trace_event_raw_test_pages_isolated { + struct trace_entry ent; + unsigned long start_pfn; + unsigned long end_pfn; + unsigned long fin_pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_test_pages_isolated {}; + +enum buddy { + FIRST = 0, + LAST = 1, +}; + +struct zbud_header { + struct list_head buddy; + unsigned int first_chunks; + unsigned int last_chunks; +}; + +struct zbud_pool { + spinlock_t lock; + union { + struct list_head buddied; + struct list_head unbuddied[63]; + }; + u64 pages_nr; +}; + +enum zs_mapmode { + ZS_MM_RW = 0, + ZS_MM_RO = 1, + ZS_MM_WO = 2, +}; + +struct mapping_area { + local_lock_t lock; + char *vm_buf; + char *vm_addr; + enum zs_mapmode vm_mm; +}; + +enum class_stat_type { + ZS_OBJS_ALLOCATED = 12, + ZS_OBJS_INUSE = 13, + NR_CLASS_STAT_TYPES = 14, +}; + +enum fullness_group { + ZS_INUSE_RATIO_0 = 0, + ZS_INUSE_RATIO_10 = 1, + ZS_INUSE_RATIO_99 = 10, + ZS_INUSE_RATIO_100 = 11, + NR_FULLNESS_GROUPS = 12, +}; + +struct zs_pool; + +struct zspage { + struct { + unsigned int huge : 1; + unsigned int fullness : 4; + unsigned int class : 9; + unsigned int isolated : 5; + unsigned int magic : 8; + }; + unsigned int inuse; + unsigned int freeobj; + struct page *first_page; + struct list_head list; + struct zs_pool *pool; + rwlock_t lock; +}; + +struct zs_pool_stats { + atomic_long_t pages_compacted; +}; + +struct size_class; + +struct zs_pool { + const char *name; + struct size_class *size_class[255]; + struct kmem_cache *handle_cachep; + struct kmem_cache *zspage_cachep; + atomic_long_t pages_allocated; + struct zs_pool_stats stats; + struct shrinker *shrinker; + struct work_struct free_work; + spinlock_t lock; + atomic_t compaction_in_progress; +}; + +struct zs_size_stat { + unsigned long objs[14]; +}; + +struct size_class { + struct list_head fullness_list[12]; + int size; + int objs_per_zspage; + int pages_per_zspage; + unsigned int index; + struct zs_size_stat stats; +}; + +struct link_free { + union { + unsigned long next; + unsigned long handle; + }; +}; + +enum z3fold_page_flags { + PAGE_HEADLESS = 0, + MIDDLE_CHUNK_MAPPED = 1, + NEEDS_COMPACTING = 2, + PAGE_STALE = 3, + PAGE_CLAIMED = 4, + PAGE_MIGRATED = 5, +}; + +enum buddy___2 { + HEADLESS = 0, + FIRST___2 = 1, + MIDDLE = 2, + LAST___2 = 3, + BUDDIES_MAX = 3, +}; + +enum z3fold_handle_flags { + HANDLES_NOFREE = 0, +}; + +struct z3fold_pool { + const char *name; + spinlock_t lock; + spinlock_t stale_lock; + struct list_head *unbuddied; + struct list_head stale; + atomic64_t pages_nr; + struct kmem_cache *c_handle; + struct workqueue_struct *compact_wq; + struct workqueue_struct *release_wq; + struct work_struct work; +}; + +struct z3fold_buddy_slots; + +struct z3fold_header { + struct list_head buddy; + spinlock_t page_lock; + struct kref refcount; + struct work_struct work; + struct z3fold_buddy_slots *slots; + struct z3fold_pool *pool; + short cpu; + unsigned short first_chunks; + unsigned short middle_chunks; + unsigned short last_chunks; + unsigned short start_middle; + unsigned short first_num : 2; + unsigned short mapped_count : 2; + unsigned short foreign_handles : 2; +}; + +struct z3fold_buddy_slots { + unsigned long slot[4]; + unsigned long pool; + rwlock_t lock; +}; + +struct balloon_dev_info { + unsigned long isolated_pages; + spinlock_t pages_lock; + struct list_head pages; + int (*migratepage)(struct balloon_dev_info *, struct page *, struct page *, + enum migrate_mode); +}; + +struct damon_region; + +typedef void (*btf_trace_damos_before_apply)(void *, unsigned int, unsigned int, unsigned int, + struct damon_region *, unsigned int, bool); + +struct damon_addr_range { + unsigned long start; + unsigned long end; +}; + +struct damon_region { + struct damon_addr_range ar; + unsigned long sampling_addr; + unsigned int nr_accesses; + unsigned int nr_accesses_bp; + struct list_head list; + unsigned int age; + unsigned int last_nr_accesses; +}; + +typedef void (*btf_trace_damon_aggregated)(void *, unsigned int, struct damon_region *, + unsigned int); + +enum damon_ops_id { + DAMON_OPS_VADDR = 0, + DAMON_OPS_FVADDR = 1, + DAMON_OPS_PADDR = 2, + NR_DAMON_OPS = 3, +}; + +struct damon_ctx; + +struct damon_target; + +struct damos; + +struct damon_operations { + enum damon_ops_id id; + void (*init)(struct damon_ctx *); + void (*update)(struct damon_ctx *); + void (*prepare_access_checks)(struct damon_ctx *); + unsigned int (*check_accesses)(struct damon_ctx *); + void (*reset_aggregated)(struct damon_ctx *); + int (*get_scheme_score)(struct damon_ctx *, struct damon_target *, + struct damon_region *, struct damos *); + unsigned long (*apply_scheme)(struct damon_ctx *, struct damon_target *, + struct damon_region *, struct damos *); + bool (*target_valid)(struct damon_target *); + void (*cleanup)(struct damon_ctx *); +}; + +struct damon_attrs { + unsigned long sample_interval; + unsigned long aggr_interval; + unsigned long ops_update_interval; + unsigned long min_nr_regions; + unsigned long max_nr_regions; +}; + +struct damon_callback { + void *private; + int (*before_start)(struct damon_ctx *); + int (*after_wmarks_check)(struct damon_ctx *); + int (*after_sampling)(struct damon_ctx *); + int (*after_aggregation)(struct damon_ctx *); + int (*before_damos_apply)(struct damon_ctx *, struct damon_target *, + struct damon_region *, struct damos *); + void (*before_terminate)(struct damon_ctx *); +}; + +struct damon_ctx { + struct damon_attrs attrs; + unsigned long passed_sample_intervals; + unsigned long next_aggregation_sis; + unsigned long next_ops_update_sis; + struct completion kdamond_started; + struct task_struct *kdamond; + struct mutex kdamond_lock; + struct damon_operations ops; + struct damon_callback callback; + struct list_head adaptive_targets; + struct list_head schemes; +}; + +struct damon_target { + struct pid *pid; + unsigned int nr_regions; + struct list_head regions_list; + struct list_head list; +}; + +struct damos_access_pattern { + unsigned long min_sz_region; + unsigned long max_sz_region; + unsigned int min_nr_accesses; + unsigned int max_nr_accesses; + unsigned int min_age_region; + unsigned int max_age_region; +}; + +enum damos_action { + DAMOS_WILLNEED = 0, + DAMOS_COLD = 1, + DAMOS_PAGEOUT = 2, + DAMOS_HUGEPAGE = 3, + DAMOS_NOHUGEPAGE = 4, + DAMOS_LRU_PRIO = 5, + DAMOS_LRU_DEPRIO = 6, + DAMOS_STAT = 7, + NR_DAMOS_ACTIONS = 8, +}; + +struct damos_quota { + unsigned long ms; + unsigned long sz; + unsigned long reset_interval; + unsigned int weight_sz; + unsigned int weight_nr_accesses; + unsigned int weight_age; + unsigned long total_charged_sz; + unsigned long total_charged_ns; + unsigned long esz; + unsigned long charged_sz; + unsigned long charged_from; + struct damon_target *charge_target_from; + unsigned long charge_addr_from; + unsigned long histogram[100]; + unsigned int min_score; +}; + +enum damos_wmark_metric { + DAMOS_WMARK_NONE = 0, + DAMOS_WMARK_FREE_MEM_RATE = 1, + NR_DAMOS_WMARK_METRICS = 2, +}; + +struct damos_watermarks { + enum damos_wmark_metric metric; + unsigned long interval; + unsigned long high; + unsigned long mid; + unsigned long low; + bool activated; +}; + +struct damos_stat { + unsigned long nr_tried; + unsigned long sz_tried; + unsigned long nr_applied; + unsigned long sz_applied; + unsigned long qt_exceeds; +}; + +struct damos { + struct damos_access_pattern pattern; + enum damos_action action; + unsigned long apply_interval_us; + unsigned long next_apply_sis; + struct damos_quota quota; + struct damos_watermarks wmarks; + struct list_head filters; + struct damos_stat stat; + struct list_head list; +}; + +enum damos_filter_type { + DAMOS_FILTER_TYPE_ANON = 0, + DAMOS_FILTER_TYPE_MEMCG = 1, + DAMOS_FILTER_TYPE_ADDR = 2, + DAMOS_FILTER_TYPE_TARGET = 3, + NR_DAMOS_FILTER_TYPES = 4, +}; + +struct damos_filter { + enum damos_filter_type type; + bool matching; + union { + unsigned short memcg_id; + struct damon_addr_range addr_range; + int target_idx; + }; + struct list_head list; +}; + +struct trace_event_raw_damos_before_apply { + struct trace_entry ent; + unsigned int context_idx; + unsigned int scheme_idx; + unsigned long target_idx; + unsigned long start; + unsigned long end; + unsigned int nr_accesses; + unsigned int age; + unsigned int nr_regions; + char __data[0]; +}; + +struct trace_event_raw_damon_aggregated { + struct trace_entry ent; + unsigned long target_id; + unsigned int nr_regions; + unsigned long start; + unsigned long end; + unsigned int nr_accesses; + unsigned int age; + char __data[0]; +}; + +struct damon_system_ram_region { + unsigned long start; + unsigned long end; +}; + +struct trace_event_data_offsets_damos_before_apply {}; + +struct trace_event_data_offsets_damon_aggregated {}; + +struct damon_young_walk_private { + unsigned long *folio_sz; + bool young; +}; + +struct damon_sysfs_ul_range { + struct kobject kobj; + unsigned long min; + unsigned long max; +}; + +struct damon_sysfs_scheme; + +struct damon_sysfs_schemes { + struct kobject kobj; + struct damon_sysfs_scheme **schemes_arr; + int nr; +}; + +struct damon_sysfs_access_pattern; + +struct damon_sysfs_quotas; + +struct damon_sysfs_watermarks; + +struct damon_sysfs_scheme_filters; + +struct damon_sysfs_stats; + +struct damon_sysfs_scheme_regions; + +struct damon_sysfs_scheme { + struct kobject kobj; + enum damos_action action; + struct damon_sysfs_access_pattern *access_pattern; + unsigned long apply_interval_us; + struct damon_sysfs_quotas *quotas; + struct damon_sysfs_watermarks *watermarks; + struct damon_sysfs_scheme_filters *filters; + struct damon_sysfs_stats *stats; + struct damon_sysfs_scheme_regions *tried_regions; +}; + +struct damon_sysfs_access_pattern { + struct kobject kobj; + struct damon_sysfs_ul_range *sz; + struct damon_sysfs_ul_range *nr_accesses; + struct damon_sysfs_ul_range *age; +}; + +struct damon_sysfs_weights; + +struct damon_sysfs_quotas { + struct kobject kobj; + struct damon_sysfs_weights *weights; + unsigned long ms; + unsigned long sz; + unsigned long reset_interval_ms; +}; + +struct damon_sysfs_weights { + struct kobject kobj; + unsigned int sz; + unsigned int nr_accesses; + unsigned int age; +}; + +struct damon_sysfs_watermarks { + struct kobject kobj; + enum damos_wmark_metric metric; + unsigned long interval_us; + unsigned long high; + unsigned long mid; + unsigned long low; +}; + +struct damon_sysfs_scheme_filter; + +struct damon_sysfs_scheme_filters { + struct kobject kobj; + struct damon_sysfs_scheme_filter **filters_arr; + int nr; +}; + +struct damon_sysfs_scheme_filter { + struct kobject kobj; + enum damos_filter_type type; + bool matching; + char *memcg_path; + struct damon_addr_range addr_range; + int target_idx; +}; + +struct damon_sysfs_stats { + struct kobject kobj; + unsigned long nr_tried; + unsigned long sz_tried; + unsigned long nr_applied; + unsigned long sz_applied; + unsigned long qt_exceeds; +}; + +enum damos_sysfs_regions_upd_status { + DAMOS_TRIED_REGIONS_UPD_IDLE = 0, + DAMOS_TRIED_REGIONS_UPD_STARTED = 1, + DAMOS_TRIED_REGIONS_UPD_FINISHED = 2, +}; + +struct damon_sysfs_scheme_regions { + struct kobject kobj; + struct list_head regions_list; + int nr_regions; + unsigned long total_bytes; + enum damos_sysfs_regions_upd_status upd_status; + unsigned long upd_timeout_jiffies; +}; + +struct damon_sysfs_scheme_region { + struct kobject kobj; + struct damon_addr_range ar; + unsigned int nr_accesses; + unsigned int age; + struct list_head list; +}; + +enum damon_sysfs_cmd { + DAMON_SYSFS_CMD_ON = 0, + DAMON_SYSFS_CMD_OFF = 1, + DAMON_SYSFS_CMD_COMMIT = 2, + DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS = 3, + DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES = 4, + DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS = 5, + DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS = 6, + NR_DAMON_SYSFS_CMDS = 7, +}; + +struct damon_sysfs_kdamond; + +struct damon_sysfs_cmd_request { + enum damon_sysfs_cmd cmd; + struct damon_sysfs_kdamond *kdamond; +}; + +struct damon_sysfs_contexts; + +struct damon_sysfs_kdamond { + struct kobject kobj; + struct damon_sysfs_contexts *contexts; + struct damon_ctx *damon_ctx; +}; + +struct damon_sysfs_context; + +struct damon_sysfs_contexts { + struct kobject kobj; + struct damon_sysfs_context **contexts_arr; + int nr; +}; + +struct damon_sysfs_attrs; + +struct damon_sysfs_targets; + +struct damon_sysfs_context { + struct kobject kobj; + enum damon_ops_id ops_id; + struct damon_sysfs_attrs *attrs; + struct damon_sysfs_targets *targets; + struct damon_sysfs_schemes *schemes; +}; + +struct damon_sysfs_intervals; + +struct damon_sysfs_attrs { + struct kobject kobj; + struct damon_sysfs_intervals *intervals; + struct damon_sysfs_ul_range *nr_regions_range; +}; + +struct damon_sysfs_intervals { + struct kobject kobj; + unsigned long sample_us; + unsigned long aggr_us; + unsigned long update_us; +}; + +struct damon_sysfs_target; + +struct damon_sysfs_targets { + struct kobject kobj; + struct damon_sysfs_target **targets_arr; + int nr; +}; + +struct damon_sysfs_regions; + +struct damon_sysfs_target { + struct kobject kobj; + struct damon_sysfs_regions *regions; + int pid; +}; + +struct damon_sysfs_region; + +struct damon_sysfs_regions { + struct kobject kobj; + struct damon_sysfs_region **regions_arr; + int nr; +}; + +struct damon_sysfs_region { + struct kobject kobj; + struct damon_addr_range ar; +}; + +struct damon_sysfs_kdamonds; + +struct damon_sysfs_ui_dir { + struct kobject kobj; + struct damon_sysfs_kdamonds *kdamonds; +}; + +struct damon_sysfs_kdamonds { + struct kobject kobj; + struct damon_sysfs_kdamond **kdamonds_arr; + int nr; +}; + +enum { + BAD_STACK = -1, + NOT_STACK = 0, + GOOD_FRAME = 1, + GOOD_STACK = 2, +}; + +enum hmm_pfn_flags { + HMM_PFN_VALID = 9223372036854775808ULL, + HMM_PFN_WRITE = 4611686018427387904ULL, + HMM_PFN_ERROR = 2305843009213693952ULL, + HMM_PFN_ORDER_SHIFT = 56ULL, + HMM_PFN_REQ_FAULT = 9223372036854775808ULL, + HMM_PFN_REQ_WRITE = 4611686018427387904ULL, + HMM_PFN_FLAGS = 18374686479671623680ULL, +}; + +enum { + HMM_NEED_FAULT = 1, + HMM_NEED_WRITE_FAULT = 2, + HMM_NEED_ALL_BITS = 3, +}; + +struct hmm_range { + struct mmu_interval_notifier *notifier; + unsigned long notifier_seq; + unsigned long start; + unsigned long end; + unsigned long *hmm_pfns; + unsigned long default_flags; + unsigned long pfn_flags_mask; + void *dev_private_owner; +}; + +struct hmm_vma_walk { + struct hmm_range *range; + unsigned long last; +}; + +struct hugetlbfs_inode_info { + struct inode vfs_inode; + unsigned int seals; +}; + +struct page_reporting_dev_info { + int (*report)(struct page_reporting_dev_info *, struct scatterlist *, unsigned int); + struct delayed_work work; + atomic_t state; + unsigned int order; +}; + +enum { + PAGE_REPORTING_IDLE = 0, + PAGE_REPORTING_REQUESTED = 1, + PAGE_REPORTING_ACTIVE = 2, +}; + +typedef s32 compat_off_t; + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; + +typedef __kernel_long_t __kernel_off_t; + +typedef __kernel_off_t off_t; + +typedef __kernel_rwf_t rwf_t; + +typedef s64 compat_loff_t; + +struct files_stat_struct { + unsigned long nr_files; + unsigned long nr_free_files; + unsigned long max_files; +}; + +struct backing_file { + struct file file; + struct path user_path; +}; + +struct mount; + +struct mnt_namespace { + struct ns_common ns; + struct mount *root; + struct list_head list; + spinlock_t ns_lock; + struct user_namespace *user_ns; + struct ucounts *ucounts; + u64 seq; + wait_queue_head_t poll; + u64 event; + unsigned int mounts; + unsigned int pending_mounts; +}; + +struct mnt_pcp; + +struct mountpoint; + +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; + union { + struct callback_head mnt_rcu; + struct llist_node mnt_llist; + }; + struct mnt_pcp __attribute__((btf_type_tag("percpu"))) * mnt_pcp; + struct list_head mnt_mounts; + struct list_head mnt_child; + struct list_head mnt_instance; + const char *mnt_devname; + struct list_head mnt_list; + struct list_head mnt_expire; + struct list_head mnt_share; + struct list_head mnt_slave_list; + struct list_head mnt_slave; + struct mount *mnt_master; + struct mnt_namespace *mnt_ns; + struct mountpoint *mnt_mp; + union { + struct hlist_node mnt_mp_list; + struct hlist_node mnt_umount; + }; + struct list_head mnt_umounting; + struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) * mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; + int mnt_id; + int mnt_group_id; + int mnt_expiry_mark; + struct hlist_head mnt_pins; + struct hlist_head mnt_stuck_children; +}; + +struct mnt_pcp { + int mnt_count; + int mnt_writers; +}; + +struct mountpoint { + struct hlist_node m_hash; + struct dentry *m_dentry; + struct hlist_head m_list; + int m_count; +}; + +struct char_device_struct { + struct char_device_struct *next; + unsigned int major; + unsigned int baseminor; + int minorct; + char name[64]; + struct cdev *cdev; +}; + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned int st_size; + unsigned int st_atime; + unsigned int st_mtime; + unsigned int st_ctime; +}; + +struct stat { + __kernel_ulong_t st_dev; + __kernel_ulong_t st_ino; + __kernel_ulong_t st_nlink; + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad0; + __kernel_ulong_t st_rdev; + __kernel_long_t st_size; + __kernel_long_t st_blksize; + __kernel_long_t st_blocks; + __kernel_ulong_t st_atime; + __kernel_ulong_t st_atime_nsec; + __kernel_ulong_t st_mtime; + __kernel_ulong_t st_mtime_nsec; + __kernel_ulong_t st_ctime; + __kernel_ulong_t st_ctime_nsec; + __kernel_long_t __unused[3]; +}; + +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +struct statx { + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + __u64 stx_mnt_id; + __u32 stx_dio_mem_align; + __u32 stx_dio_offset_align; + __u64 __spare3[12]; +}; + +typedef u32 compat_ino_t; + +typedef u16 compat_mode_t; + +typedef u16 compat_nlink_t; + +typedef u16 __compat_uid_t; + +typedef u16 __compat_gid_t; + +struct compat_stat { + u32 st_dev; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid_t st_uid; + __compat_gid_t st_gid; + u32 st_rdev; + u32 st_size; + u32 st_blksize; + u32 st_blocks; + u32 st_atime; + u32 st_atime_nsec; + u32 st_mtime; + u32 st_mtime_nsec; + u32 st_ctime; + u32 st_ctime_nsec; + u32 __unused4; + u32 __unused5; +}; + +typedef unsigned short ushort; + +struct user_arg_ptr { + bool is_compat; + union { + const char __attribute__((btf_type_tag("user"))) *const + __attribute__((btf_type_tag("user"))) * + native; + const compat_uptr_t __attribute__((btf_type_tag("user"))) * compat; + } ptr; +}; + +enum inode_i_mutex_lock_class { + I_MUTEX_NORMAL = 0, + I_MUTEX_PARENT = 1, + I_MUTEX_CHILD = 2, + I_MUTEX_XATTR = 3, + I_MUTEX_NONDIR2 = 4, + I_MUTEX_PARENT2 = 5, +}; + +struct saved { + struct path link; + struct delayed_call done; + const char *name; + unsigned int seq; +}; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; + unsigned int flags; + unsigned int state; + unsigned int seq; + unsigned int next_seq; + unsigned int m_seq; + unsigned int r_seq; + int last_type; + unsigned int depth; + int total_link_count; + struct saved *stack; + struct saved internal[2]; + struct filename *name; + struct nameidata *saved; + unsigned int root_seq; + int dfd; + vfsuid_t dir_vfsuid; + umode_t dir_mode; +}; + +enum { + LAST_NORM = 0, + LAST_ROOT = 1, + LAST_DOT = 2, + LAST_DOTDOT = 3, +}; + +enum { + WALK_TRAILING = 1, + WALK_MORE = 2, + WALK_NOFOLLOW = 4, +}; + +struct word_at_a_time { + const unsigned long one_bits; + const unsigned long high_bits; +}; + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[32]; +}; + +struct renamedata { + struct mnt_idmap *old_mnt_idmap; + struct inode *old_dir; + struct dentry *old_dentry; + struct mnt_idmap *new_mnt_idmap; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +}; + +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; + +struct flock { + short l_type; + short l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; +}; + +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +} __attribute__((packed)); + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; +}; + +struct fiemap_extent; + +struct fiemap_extent_info { + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent __attribute__((btf_type_tag("user"))) * fi_extents_start; +}; + +struct fiemap_extent { + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + __u64 fe_reserved64[2]; + __u32 fe_flags; + __u32 fe_reserved[3]; +}; + +struct space_resv_32 { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +} __attribute__((packed)); + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; + +struct fiemap { + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; +}; + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + __s32 status; + __u32 reserved; +}; + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +}; + +struct old_linux_dirent { + unsigned long d_ino; + unsigned long d_offset; + unsigned short d_namlen; + char d_name[0]; +}; + +struct readdir_callback { + struct dir_context ctx; + struct old_linux_dirent __attribute__((btf_type_tag("user"))) * dirent; + int result; +}; + +struct linux_dirent { + unsigned long d_ino; + unsigned long d_off; + unsigned short d_reclen; + char d_name[0]; +}; + +struct getdents_callback { + struct dir_context ctx; + struct linux_dirent __attribute__((btf_type_tag("user"))) * current_dir; + int prev_reclen; + int count; + int error; +}; + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + unsigned short d_reclen; + unsigned char d_type; + char d_name[0]; +}; + +struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 __attribute__((btf_type_tag("user"))) * current_dir; + int prev_reclen; + int count; + int error; +}; + +struct compat_old_linux_dirent { + compat_ulong_t d_ino; + compat_ulong_t d_offset; + unsigned short d_namlen; + char d_name[0]; +}; + +struct compat_readdir_callback { + struct dir_context ctx; + struct compat_old_linux_dirent __attribute__((btf_type_tag("user"))) * dirent; + int result; +}; + +struct compat_linux_dirent { + compat_ulong_t d_ino; + compat_ulong_t d_off; + unsigned short d_reclen; + char d_name[0]; +}; + +struct compat_getdents_callback { + struct dir_context ctx; + struct compat_linux_dirent __attribute__((btf_type_tag("user"))) * current_dir; + int prev_reclen; + int count; + int error; +}; + +enum poll_time_type { + PT_TIMEVAL = 0, + PT_OLD_TIMEVAL = 1, + PT_TIMESPEC = 2, + PT_OLD_TIMESPEC = 3, +}; + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +struct poll_table_page; + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[8]; +}; + +struct poll_table_page { + struct poll_table_page *next; + struct poll_table_entry *entry; + struct poll_table_entry entries[0]; +}; + +typedef struct { + unsigned long fds_bits[16]; +} __kernel_fd_set; + +typedef __kernel_fd_set fd_set; + +struct poll_list { + struct poll_list *next; + int len; + struct pollfd entries[0]; +}; + +struct compat_sel_arg_struct { + compat_ulong_t n; + compat_uptr_t inp; + compat_uptr_t outp; + compat_uptr_t exp; + compat_uptr_t tvp; +}; + +typedef struct { + unsigned long *in; + unsigned long *out; + unsigned long *ex; + unsigned long *res_in; + unsigned long *res_out; + unsigned long *res_ex; +} fd_set_bits; + +struct sigset_argpack { + sigset_t __attribute__((btf_type_tag("user"))) * p; + size_t size; +}; + +struct compat_sigset_argpack { + compat_uptr_t p; + compat_size_t size; +}; + +struct dentry_stat_t { + long nr_dentry; + long nr_unused; + long age_limit; + long want_pages; + long nr_negative; + long dummy; +}; + +enum dentry_d_lock_class { + DENTRY_D_LOCK_NORMAL = 0, + DENTRY_D_LOCK_NESTED = 1, +}; + +enum d_walk_ret { + D_WALK_CONTINUE = 0, + D_WALK_QUIT = 1, + D_WALK_NORETRY = 2, + D_WALK_SKIP = 3, +}; + +struct external_name { + union { + atomic_t count; + struct callback_head head; + } u; + unsigned char name[0]; +}; + +struct check_mount { + struct vfsmount *mnt; + unsigned int mounted; +}; + +struct select_data { + struct dentry *start; + union { + long found; + struct dentry *victim; + }; + struct list_head dispose; +}; + +struct inodes_stat_t { + long nr_inodes; + long nr_unused; + long dummy[5]; +}; + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; + +enum mnt_tree_flags_t { + MNT_TREE_MOVE = 1, + MNT_TREE_BENEATH = 2, +}; + +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +struct mount_kattr { + unsigned int attr_set; + unsigned int attr_clr; + unsigned int propagation; + unsigned int lookup_flags; + bool recurse; + struct user_namespace *mnt_userns; + struct mnt_idmap *mnt_idmap; +}; + +struct proc_mounts { + struct mnt_namespace *ns; + struct path root; + int (*show)(struct seq_file *, struct vfsmount *); + struct mount cursor; +}; + +struct mnt_idmap { + struct user_namespace *owner; + refcount_t count; +}; + +struct xattr_name { + char name[256]; +}; + +struct xattr_ctx { + union { + const void __attribute__((btf_type_tag("user"))) * cvalue; + void __attribute__((btf_type_tag("user"))) * value; + }; + void *kvalue; + size_t size; + struct xattr_name *kname; + unsigned int flags; +}; + +struct utf8data; + +struct utf8data_table; + +struct unicode_map { + unsigned int version; + const struct utf8data *ntab[2]; + const struct utf8data_table *tables; +}; + +struct utf8data { + unsigned int maxage; + unsigned int offset; +}; + +struct utf8data_table { + const unsigned int *utf8agetab; + int utf8agetab_size; + const struct utf8data *utf8nfdicfdata; + int utf8nfdicfdata_size; + const struct utf8data *utf8nfdidata; + int utf8nfdidata_size; + const unsigned char *utf8data; +}; + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +struct simple_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; + char set_buf[24]; + void *data; + const char *fmt; + struct mutex mutex; +}; + +typedef void (*btf_trace_writeback_dirty_folio)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_folio_wait_writeback)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, + struct writeback_control *, unsigned int); + +typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, + struct bdi_writeback *); + +typedef void (*btf_trace_track_foreign_dirty)(void *, struct folio *, struct bdi_writeback *); + +typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, + unsigned int); + +typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, + struct writeback_control *); + +typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, + struct writeback_control *); + +struct wb_writeback_work; + +typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, + struct wb_writeback_work *); + +struct wb_writeback_work { + long nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages : 1; + unsigned int for_kupdate : 1; + unsigned int range_cyclic : 1; + unsigned int for_background : 1; + unsigned int for_sync : 1; + unsigned int auto_free : 1; + enum wb_reason reason; + struct list_head list; + struct wb_completion *done; +}; + +typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, + struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, + struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, + struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, + struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_pages_written)(void *, long); + +typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); + +typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); + +typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, + struct backing_dev_info *); + +typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, + struct wb_writeback_work *, unsigned long, int); + +typedef void (*btf_trace_global_dirty_state)(void *, unsigned long, unsigned long); + +typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, + unsigned long, unsigned long); + +typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + long, unsigned long); + +typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); + +typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, + struct writeback_control *, + unsigned long); + +typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, + struct writeback_control *, unsigned long); + +typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); + +typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); + +typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); + +typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); + +typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); + +struct trace_event_raw_writeback_folio_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + unsigned long index; + char __data[0]; +}; + +struct trace_event_raw_writeback_dirty_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + unsigned long state; + unsigned long flags; + char __data[0]; +}; + +struct trace_event_raw_inode_foreign_history { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t cgroup_ino; + unsigned int history; + char __data[0]; +}; + +struct trace_event_raw_inode_switch_wbs { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t old_cgroup_ino; + ino_t new_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_track_foreign_dirty { + struct trace_entry ent; + char name[32]; + u64 bdi_id; + ino_t ino; + unsigned int memcg_id; + ino_t cgroup_ino; + ino_t page_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_flush_foreign { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + unsigned int frn_bdi_id; + unsigned int frn_memcg_id; + char __data[0]; +}; + +struct trace_event_raw_writeback_write_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + int sync_mode; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_work_class { + struct trace_entry ent; + char name[32]; + long nr_pages; + dev_t sb_dev; + int sync_mode; + int for_kupdate; + int range_cyclic; + int for_background; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_pages_written { + struct trace_entry ent; + long pages; + char __data[0]; +}; + +struct trace_event_raw_writeback_class { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_bdi_register { + struct trace_entry ent; + char name[32]; + char __data[0]; +}; + +struct trace_event_raw_wbc_class { + struct trace_entry ent; + char name[32]; + long nr_to_write; + long pages_skipped; + int sync_mode; + int for_kupdate; + int for_background; + int for_reclaim; + int range_cyclic; + long range_start; + long range_end; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_queue_io { + struct trace_entry ent; + char name[32]; + unsigned long older; + long age; + int moved; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_global_dirty_state { + struct trace_entry ent; + unsigned long nr_dirty; + unsigned long nr_writeback; + unsigned long background_thresh; + unsigned long dirty_thresh; + unsigned long dirty_limit; + unsigned long nr_dirtied; + unsigned long nr_written; + char __data[0]; +}; + +struct trace_event_raw_bdi_dirty_ratelimit { + struct trace_entry ent; + char bdi[32]; + unsigned long write_bw; + unsigned long avg_write_bw; + unsigned long dirty_rate; + unsigned long dirty_ratelimit; + unsigned long task_ratelimit; + unsigned long balanced_dirty_ratelimit; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_balance_dirty_pages { + struct trace_entry ent; + char bdi[32]; + unsigned long limit; + unsigned long setpoint; + unsigned long dirty; + unsigned long bdi_setpoint; + unsigned long bdi_dirty; + unsigned long dirty_ratelimit; + unsigned long task_ratelimit; + unsigned int dirtied; + unsigned int dirtied_pause; + unsigned long paused; + long pause; + unsigned long period; + long think; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_sb_inodes_requeue { + struct trace_entry ent; + char name[32]; + ino_t ino; + unsigned long state; + unsigned long dirtied_when; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_single_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + unsigned long state; + unsigned long dirtied_when; + unsigned long writeback_index; + long nr_to_write; + unsigned long wrote; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_inode_template { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned long state; + __u16 mode; + unsigned long dirtied_when; + char __data[0]; +}; + +struct inode_switch_wbs_context { + struct rcu_work work; + struct bdi_writeback *new_wb; + struct inode *inodes[0]; +}; + +struct trace_event_data_offsets_writeback_folio_template {}; + +struct trace_event_data_offsets_writeback_dirty_inode_template {}; + +struct trace_event_data_offsets_inode_foreign_history {}; + +struct trace_event_data_offsets_inode_switch_wbs {}; + +struct trace_event_data_offsets_track_foreign_dirty {}; + +struct trace_event_data_offsets_flush_foreign {}; + +struct trace_event_data_offsets_writeback_write_inode_template {}; + +struct trace_event_data_offsets_writeback_work_class {}; + +struct trace_event_data_offsets_writeback_pages_written {}; + +struct trace_event_data_offsets_writeback_class {}; + +struct trace_event_data_offsets_writeback_bdi_register {}; + +struct trace_event_data_offsets_wbc_class {}; + +struct trace_event_data_offsets_writeback_queue_io {}; + +struct trace_event_data_offsets_global_dirty_state {}; + +struct trace_event_data_offsets_bdi_dirty_ratelimit {}; + +struct trace_event_data_offsets_balance_dirty_pages {}; + +struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; + +struct trace_event_data_offsets_writeback_single_inode_template {}; + +struct trace_event_data_offsets_writeback_inode_template {}; + +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + union { + void __attribute__((btf_type_tag("user"))) * userptr; + struct file *file; + void *data; + } u; + void (*splice_eof)(struct splice_desc *); + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; +}; + +typedef int +splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); + +typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); + +struct utimbuf { + __kernel_old_time_t actime; + __kernel_old_time_t modtime; +}; + +struct old_utimbuf32 { + old_time32_t actime; + old_time32_t modtime; +}; + +struct prepend_buffer { + char *buf; + int len; +}; + +struct statfs { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __kernel_long_t f_blocks; + __kernel_long_t f_bfree; + __kernel_long_t f_bavail; + __kernel_long_t f_files; + __kernel_long_t f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +struct statfs64 { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +typedef int __kernel_daddr_t; + +struct ustat { + __kernel_daddr_t f_tfree; + unsigned long f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +typedef __kernel_fsid_t compat_fsid_t; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; + int f_frsize; + int f_flags; + int f_spare[4]; +}; + +struct compat_statfs64 { + __u32 f_type; + __u32 f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_flags; + __u32 f_spare[4]; +} __attribute__((packed)); + +typedef s32 compat_daddr_t; + +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +enum legacy_fs_param { + LEGACY_FS_UNSET_PARAMS = 0, + LEGACY_FS_MONOLITHIC_PARAMS = 1, + LEGACY_FS_INDIVIDUAL_PARAMS = 2, +}; + +struct legacy_fs_context { + char *legacy_data; + size_t data_size; + enum legacy_fs_param param_type; +}; + +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, + FSCONFIG_SET_STRING = 1, + FSCONFIG_SET_BINARY = 2, + FSCONFIG_SET_PATH = 3, + FSCONFIG_SET_PATH_EMPTY = 4, + FSCONFIG_SET_FD = 5, + FSCONFIG_CMD_CREATE = 6, + FSCONFIG_CMD_RECONFIGURE = 7, + FSCONFIG_CMD_CREATE_EXCL = 8, +}; + +struct iomap_ops { + int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, + struct iomap *); + int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *); +}; + +struct bh_lru { + struct buffer_head *bhs[16]; +}; + +struct bh_accounting { + int nr; + int ratelimit; +}; + +struct postprocess_bh_ctx { + struct work_struct work; + struct buffer_head *bh; +}; + +struct dax_device; + +struct iomap_folio_ops; + +struct iomap { + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + struct block_device *bdev; + struct dax_device *dax_dev; + void *inline_data; + void *private; + const struct iomap_folio_ops *folio_ops; + u64 validity_cookie; +}; + +struct iomap_iter; + +struct iomap_folio_ops { + struct folio *(*get_folio)(struct iomap_iter *, loff_t, unsigned int); + void (*put_folio)(struct inode *, loff_t, unsigned int, struct folio *); + bool (*iomap_valid)(struct inode *, const struct iomap *); +}; + +struct iomap_iter { + struct inode *inode; + loff_t pos; + u64 len; + s64 processed; + unsigned int flags; + struct iomap iomap; + struct iomap srcmap; + void *private; +}; + +typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); + +struct folio_iter { + struct folio *folio; + size_t offset; + size_t length; + struct folio *_next; + size_t _seg_count; + int _i; +}; + +struct mpage_readpage_args { + struct bio *bio; + struct folio *folio; + unsigned int nr_pages; + bool is_readahead; + sector_t last_block_in_bio; + struct buffer_head map_bh; + unsigned long first_logical_block; + get_block_t *get_block; +}; + +struct mpage_data { + struct bio *bio; + sector_t last_block_in_bio; + get_block_t *get_block; +}; + +struct proc_fs_opts { + int flag; + const char *str; +}; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, + struct task_struct *); + const char *lsm; +}; + +struct proc_inode { + struct pid *pid; + unsigned int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + struct ctl_table *sysctl_entry; + struct hlist_node sibling_inodes; + const struct proc_ns_operations *ns_ops; + struct inode vfs_inode; +}; + +enum { + DIO_LOCKING = 1, + DIO_SKIP_HOLES = 2, +}; + +typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); + +struct dio { + int flags; + blk_opf_t opf; + struct gendisk *bio_disk; + struct inode *inode; + loff_t i_size; + dio_iodone_t *end_io; + bool is_pinned; + void *private; + spinlock_t bio_lock; + int page_errors; + int is_async; + bool defer_completion; + bool should_dirty; + int io_error; + unsigned long refcount; + struct bio *bio_list; + struct task_struct *waiter; + struct kiocb *iocb; + ssize_t result; + union { + struct page *pages[64]; + struct work_struct complete_work; + }; + long : 64; +}; + +struct dio_submit { + struct bio *bio; + unsigned int blkbits; + unsigned int blkfactor; + unsigned int start_zero_done; + int pages_in_io; + sector_t block_in_file; + unsigned int blocks_available; + int reap_counter; + sector_t final_block_in_request; + int boundary; + get_block_t *get_block; + loff_t logical_offset_in_bio; + sector_t final_block_in_bio; + sector_t next_block_for_io; + struct page *cur_page; + unsigned int cur_page_offset; + unsigned int cur_page_len; + sector_t cur_page_block; + loff_t cur_page_fs_offset; + struct iov_iter *iter; + unsigned int head; + unsigned int tail; + size_t from; + size_t to; +}; + +typedef unsigned int iov_iter_extraction_t; + +enum fsnotify_iter_type { + FSNOTIFY_ITER_TYPE_INODE = 0, + FSNOTIFY_ITER_TYPE_VFSMOUNT = 1, + FSNOTIFY_ITER_TYPE_SB = 2, + FSNOTIFY_ITER_TYPE_PARENT = 3, + FSNOTIFY_ITER_TYPE_INODE2 = 4, + FSNOTIFY_ITER_TYPE_COUNT = 5, +}; + +struct fs_error_report { + int error; + struct inode *inode; + struct super_block *sb; +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + unsigned char f_handle[0]; +}; + +struct inotify_inode_mark { + struct fsnotify_mark fsn_mark; + int wd; +}; + +struct dnotify_struct; + +struct dnotify_mark { + struct fsnotify_mark fsn_mark; + struct dnotify_struct *dn; +}; + +struct dnotify_struct { + struct dnotify_struct *dn_next; + __u32 dn_mask; + int dn_fd; + struct file *dn_filp; + fl_owner_t dn_owner; +}; + +struct inotify_event_info { + struct fsnotify_event fse; + u32 mask; + int wd; + u32 sync_cookie; + int name_len; + char name[0]; +}; + +struct inotify_event { + __s32 wd; + __u32 mask; + __u32 cookie; + __u32 len; + char name[0]; +}; + +enum fanotify_event_type { + FANOTIFY_EVENT_TYPE_FID = 0, + FANOTIFY_EVENT_TYPE_FID_NAME = 1, + FANOTIFY_EVENT_TYPE_PATH = 2, + FANOTIFY_EVENT_TYPE_PATH_PERM = 3, + FANOTIFY_EVENT_TYPE_OVERFLOW = 4, + FANOTIFY_EVENT_TYPE_FS_ERROR = 5, + __FANOTIFY_EVENT_TYPE_NUM = 6, +}; + +enum { + FAN_EVENT_INIT = 0, + FAN_EVENT_REPORTED = 1, + FAN_EVENT_ANSWERED = 2, + FAN_EVENT_CANCELED = 3, +}; + +struct fanotify_fh { + u8 type; + u8 len; + u8 flags; + u8 pad; + unsigned char buf[0]; +}; + +struct fanotify_event { + struct fsnotify_event fse; + struct hlist_node merge_list; + u32 mask; + struct { + unsigned int type : 3; + unsigned int hash : 29; + }; + struct pid *pid; +}; + +struct fanotify_path_event { + struct fanotify_event fae; + struct path path; +}; + +struct fanotify_fid_event { + struct fanotify_event fae; + __kernel_fsid_t fsid; + struct { + struct fanotify_fh object_fh; + unsigned char _inline_fh_buf[12]; + }; +}; + +struct fanotify_info { + u8 dir_fh_totlen; + u8 dir2_fh_totlen; + u8 file_fh_totlen; + u8 name_len; + u8 name2_len; + u8 pad[3]; + unsigned char buf[0]; +}; + +struct fanotify_name_event { + struct fanotify_event fae; + __kernel_fsid_t fsid; + struct fanotify_info info; +}; + +struct fanotify_error_event { + struct fanotify_event fae; + s32 error; + u32 err_count; + __kernel_fsid_t fsid; + struct { + struct fanotify_fh object_fh; + unsigned char _inline_fh_buf[128]; + }; +}; + +struct fanotify_perm_event { + struct fanotify_event fae; + struct path path; + u32 response; + unsigned short state; + int fd; + union { + struct fanotify_response_info_header hdr; + struct fanotify_response_info_audit_rule audit_rule; + }; +}; + +struct fanotify_event_metadata { + __u32 event_len; + __u8 vers; + __u8 reserved; + __u16 metadata_len; + __u64 mask; + __s32 fd; + __s32 pid; +}; + +struct fanotify_event_info_header { + __u8 info_type; + __u8 pad; + __u16 len; +}; + +struct fanotify_event_info_pidfd { + struct fanotify_event_info_header hdr; + __s32 pidfd; +}; + +struct fanotify_event_info_error { + struct fanotify_event_info_header hdr; + __s32 error; + __u32 error_count; +}; + +struct fanotify_response { + __s32 fd; + __u32 response; +}; + +struct fanotify_event_info_fid { + struct fanotify_event_info_header hdr; + __kernel_fsid_t fsid; + unsigned char handle[0]; +}; + +struct epitem; + +struct eventpoll { + struct mutex mtx; + wait_queue_head_t wq; + wait_queue_head_t poll_wait; + struct list_head rdllist; + rwlock_t lock; + struct rb_root_cached rbr; + struct epitem *ovflist; + struct wakeup_source *ws; + struct user_struct *user; + struct file *file; + u64 gen; + struct hlist_head refs; + refcount_t refcount; + unsigned int napi_id; +}; + +struct epoll_filefd { + struct file *file; + int fd; +} __attribute__((packed)); + +struct epoll_event { + __poll_t events; + __u64 data; +} __attribute__((packed)); + +struct eppoll_entry; + +struct epitem { + union { + struct rb_node rbn; + struct callback_head rcu; + }; + struct list_head rdllink; + struct epitem *next; + struct epoll_filefd ffd; + bool dying; + struct eppoll_entry *pwqlist; + struct eventpoll *ep; + struct hlist_node fllink; + struct wakeup_source __attribute__((btf_type_tag("rcu"))) * ws; + struct epoll_event event; +}; + +struct eppoll_entry { + struct eppoll_entry *next; + struct epitem *base; + wait_queue_entry_t wait; + wait_queue_head_t *whead; +}; + +struct epitems_head { + struct hlist_head epitems; + struct epitems_head *next; +}; + +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + __u16 __pad2; + __s32 ssi_syscall; + __u64 ssi_call_addr; + __u32 ssi_arch; + __u8 __pad[28]; +}; + +struct signalfd_ctx { + sigset_t sigmask; +}; + +struct timerfd_ctx { + union { + struct hrtimer tmr; + struct alarm alarm; + } t; + ktime_t tintv; + ktime_t moffs; + wait_queue_head_t wqh; + u64 ticks; + int clockid; + unsigned short expired; + unsigned short settime_flags; + struct callback_head rcu; + struct list_head clist; + spinlock_t cancel_lock; + bool might_cancel; +}; + +struct eventfd_ctx { + struct kref kref; + wait_queue_head_t wqh; + __u64 count; + unsigned int flags; + int id; +}; + +struct userfaultfd_ctx { + wait_queue_head_t fault_pending_wqh; + wait_queue_head_t fault_wqh; + wait_queue_head_t fd_wqh; + wait_queue_head_t event_wqh; + seqcount_spinlock_t refile_seq; + refcount_t refcount; + unsigned int flags; + unsigned int features; + bool released; + atomic_t mmap_changing; + struct mm_struct *mm; +}; + +struct userfaultfd_fork_ctx { + struct userfaultfd_ctx *orig; + struct userfaultfd_ctx *new; + struct list_head list; +}; + +struct userfaultfd_unmap_ctx { + struct userfaultfd_ctx *ctx; + unsigned long start; + unsigned long end; + struct list_head list; +}; + +struct uffd_msg { + __u8 event; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + union { + struct { + __u64 flags; + __u64 address; + union { + __u32 ptid; + } feat; + } pagefault; + struct { + __u32 ufd; + } fork; + struct { + __u64 from; + __u64 to; + __u64 len; + } remap; + struct { + __u64 start; + __u64 end; + } remove; + struct { + __u64 reserved1; + __u64 reserved2; + __u64 reserved3; + } reserved; + } arg; +}; + +struct userfaultfd_wait_queue { + struct uffd_msg msg; + wait_queue_entry_t wq; + struct userfaultfd_ctx *ctx; + bool waken; +}; + +struct uffdio_range { + __u64 start; + __u64 len; +}; + +struct uffdio_register { + struct uffdio_range range; + __u64 mode; + __u64 ioctls; +}; + +struct uffdio_copy { + __u64 dst; + __u64 src; + __u64 len; + __u64 mode; + __s64 copy; +}; + +struct uffdio_zeropage { + struct uffdio_range range; + __u64 mode; + __s64 zeropage; +}; + +struct uffdio_writeprotect { + struct uffdio_range range; + __u64 mode; +}; + +struct uffdio_continue { + struct uffdio_range range; + __u64 mode; + __s64 mapped; +}; + +struct uffdio_poison { + struct uffdio_range range; + __u64 mode; + __s64 updated; +}; + +struct uffdio_api { + __u64 api; + __u64 features; + __u64 ioctls; +}; + +struct userfaultfd_wake_range { + unsigned long start; + unsigned long len; +}; + +struct kioctx_cpu; + +struct ctx_rq_wait; + +struct kioctx { + struct percpu_ref users; + atomic_t dead; + struct percpu_ref reqs; + unsigned long user_id; + struct kioctx_cpu *cpu; + unsigned int req_batch; + unsigned int max_reqs; + unsigned int nr_events; + unsigned long mmap_base; + unsigned long mmap_size; + struct page **ring_pages; + long nr_pages; + struct rcu_work free_rwork; + struct ctx_rq_wait *rq_wait; + long : 64; + long : 64; + long : 64; + struct { + atomic_t reqs_available; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + }; + struct { + spinlock_t ctx_lock; + struct list_head active_reqs; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + }; + struct { + struct mutex ring_lock; + wait_queue_head_t wait; + long : 64; + }; + struct { + unsigned int tail; + unsigned int completed_events; + spinlock_t completion_lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + }; + struct page *internal_pages[8]; + struct file *aio_ring_file; + unsigned int id; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct kioctx_cpu { + unsigned int reqs_available; +}; + +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; + struct cred *creds; +}; + +struct poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool cancelled; + bool work_scheduled; + bool work_need_resched; + struct wait_queue_entry wait; + struct work_struct work; +}; + +typedef int kiocb_cancel_fn(struct kiocb *); + +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + +struct aio_kiocb { + union { + struct file *ki_filp; + struct kiocb rw; + struct fsync_iocb fsync; + struct poll_iocb poll; + }; + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + struct io_event ki_res; + struct list_head ki_list; + refcount_t ki_refcnt; + struct eventfd_ctx *ki_eventfd; +}; + +typedef __kernel_ulong_t aio_context_t; + +struct iocb { + __u64 aio_data; + __u32 aio_key; + __kernel_rwf_t aio_rw_flags; + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + __u64 aio_reserved2; + __u32 aio_flags; + __u32 aio_resfd; +}; + +struct aio_poll_table { + struct poll_table_struct pt; + struct aio_kiocb *iocb; + bool queued; + int error; +}; + +typedef u32 compat_aio_context_t; + +struct __aio_sigset { + const sigset_t __attribute__((btf_type_tag("user"))) * sigmask; + size_t sigsetsize; +}; + +struct __compat_aio_sigset { + compat_uptr_t sigmask; + compat_size_t sigsetsize; +}; + +struct aio_ring { + unsigned int id; + unsigned int nr; + unsigned int head; + unsigned int tail; + unsigned int magic; + unsigned int compat_features; + unsigned int incompat_features; + unsigned int header_length; + struct io_event io_events[0]; +}; + +typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, + struct file_lock_context *); + +typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lock *, struct file_lock *); + +struct file_lock_list_struct { + spinlock_t lock; + struct hlist_head hlist; +}; + +struct trace_event_raw_locks_get_lock_context { + struct trace_entry ent; + unsigned long i_ino; + dev_t s_dev; + unsigned char type; + struct file_lock_context *ctx; + char __data[0]; +}; + +struct trace_event_raw_filelock_lock { + struct trace_entry ent; + struct file_lock *fl; + unsigned long i_ino; + dev_t s_dev; + struct file_lock *fl_blocker; + fl_owner_t fl_owner; + unsigned int fl_pid; + unsigned int fl_flags; + unsigned char fl_type; + loff_t fl_start; + loff_t fl_end; + int ret; + char __data[0]; +}; + +struct trace_event_raw_filelock_lease { + struct trace_entry ent; + struct file_lock *fl; + unsigned long i_ino; + dev_t s_dev; + struct file_lock *fl_blocker; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned long fl_break_time; + unsigned long fl_downgrade_time; + char __data[0]; +}; + +struct trace_event_raw_generic_add_lease { + struct trace_entry ent; + unsigned long i_ino; + int wcount; + int rcount; + int icount; + dev_t s_dev; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + char __data[0]; +}; + +struct trace_event_raw_leases_conflict { + struct trace_entry ent; + void *lease; + void *breaker; + unsigned int l_fl_flags; + unsigned int b_fl_flags; + unsigned char l_fl_type; + unsigned char b_fl_type; + bool conflict; + char __data[0]; +}; + +struct flock64 { + short l_type; + short l_whence; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; +}; + +struct trace_event_data_offsets_locks_get_lock_context {}; + +struct trace_event_data_offsets_filelock_lock {}; + +struct trace_event_data_offsets_filelock_lease {}; + +struct trace_event_data_offsets_generic_add_lease {}; + +struct trace_event_data_offsets_leases_conflict {}; + +struct locks_iterator { + int li_cpu; + loff_t li_pos; +}; + +enum { + Enabled = 0, + Magic = 1, +}; + +typedef struct { + struct list_head list; + unsigned long flags; + int offset; + int size; + char *magic; + char *mask; + const char *interpreter; + char *name; + struct dentry *dentry; + struct file *interp_file; + refcount_t users; +} Node; + +struct memelfnote { + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +struct elf_thread_core_info; + +struct elf_note_info { + struct elf_thread_core_info *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct elf_thread_core_info { + struct elf_thread_core_info *next; + struct task_struct *task; + struct elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +typedef unsigned int __kernel_uid_t; + +typedef unsigned int __kernel_gid_t; + +struct elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + unsigned long pr_flag; + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct arch_elf_state {}; + +typedef __u16 Elf32_Half; + +typedef __u32 Elf32_Addr; + +typedef __u32 Elf32_Off; + +struct elf32_hdr { + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +}; + +struct compat_elf_siginfo { + compat_int_t si_signo; + compat_int_t si_code; + compat_int_t si_errno; +}; + +struct compat_elf_prstatus_common { + struct compat_elf_siginfo pr_info; + short pr_cursig; + compat_ulong_t pr_sigpend; + compat_ulong_t pr_sighold; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + struct old_timeval32 pr_utime; + struct old_timeval32 pr_stime; + struct old_timeval32 pr_cutime; + struct old_timeval32 pr_cstime; +}; + +struct user_regs_struct32 { + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 esi; + __u32 edi; + __u32 ebp; + __u32 eax; + unsigned short ds; + unsigned short __ds; + unsigned short es; + unsigned short __es; + unsigned short fs; + unsigned short __fs; + unsigned short gs; + unsigned short __gs; + __u32 orig_eax; + __u32 eip; + unsigned short cs; + unsigned short __cs; + __u32 eflags; + __u32 esp; + unsigned short ss; + unsigned short __ss; +}; + +struct i386_elf_prstatus { + struct compat_elf_prstatus_common common; + struct user_regs_struct32 pr_reg; + compat_int_t pr_fpvalid; +}; + +struct elf32_phdr { + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +}; + +struct elf_thread_core_info___2; + +struct elf_note_info___2 { + struct elf_thread_core_info___2 *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + compat_siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct user_regs_struct { + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long bp; + unsigned long bx; + unsigned long r11; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long orig_ax; + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; + unsigned long fs_base; + unsigned long gs_base; + unsigned long ds; + unsigned long es; + unsigned long fs; + unsigned long gs; +}; + +typedef struct user_regs_struct compat_elf_gregset_t; + +struct compat_elf_prstatus { + struct compat_elf_prstatus_common common; + compat_elf_gregset_t pr_reg; + compat_int_t pr_fpvalid; +}; + +struct elf_thread_core_info___2 { + struct elf_thread_core_info___2 *next; + struct task_struct *task; + struct compat_elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct compat_elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + compat_ulong_t pr_flag; + __compat_uid_t pr_uid; + __compat_gid_t pr_gid; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +}; + +typedef __u16 __le16; + +struct posix_acl_xattr_header { + __le32 a_version; +}; + +struct posix_acl_xattr_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +struct nfs4_ssc_client_ops; + +struct nfs_ssc_client_ops; + +struct nfs_ssc_client_ops_tbl { + const struct nfs4_ssc_client_ops *ssc_nfs4_ops; + const struct nfs_ssc_client_ops *ssc_nfs_ops; +}; + +struct nfs_fh; + +struct nfs4_stateid_struct; + +typedef struct nfs4_stateid_struct nfs4_stateid; + +struct nfs4_ssc_client_ops { + struct file *(*sco_open)(struct vfsmount *, struct nfs_fh *, nfs4_stateid *); + void (*sco_close)(struct file *); +}; + +struct rpc_timer { + struct list_head list; + unsigned long expires; + struct delayed_work dwork; +}; + +struct rpc_wait_queue { + spinlock_t lock; + struct list_head tasks[4]; + unsigned char maxpriority; + unsigned char priority; + unsigned char nr; + unsigned short qlen; + struct rpc_timer timer_list; + const char *name; +}; + +struct nfs_seqid_counter { + ktime_t create_time; + int owner_id; + int flags; + u32 counter; + spinlock_t lock; + struct list_head list; + struct rpc_wait_queue wait; +}; + +struct nfs4_stateid_struct { + union { + char data[16]; + struct { + __be32 seqid; + char other[12]; + }; + }; + enum { + NFS4_INVALID_STATEID_TYPE = 0, + NFS4_SPECIAL_STATEID_TYPE = 1, + NFS4_OPEN_STATEID_TYPE = 2, + NFS4_LOCK_STATEID_TYPE = 3, + NFS4_DELEGATION_STATEID_TYPE = 4, + NFS4_LAYOUT_STATEID_TYPE = 5, + NFS4_PNFS_DS_STATEID_TYPE = 6, + NFS4_REVOKED_STATEID_TYPE = 7, + } type; +}; + +struct nfs4_state; + +struct nfs4_lock_state { + struct list_head ls_locks; + struct nfs4_state *ls_state; + unsigned long ls_flags; + struct nfs_seqid_counter ls_seqid; + nfs4_stateid ls_stateid; + refcount_t ls_count; + fl_owner_t ls_owner; +}; + +struct nfs4_state_owner; + +struct nfs4_state { + struct list_head open_states; + struct list_head inode_states; + struct list_head lock_states; + struct nfs4_state_owner *owner; + struct inode *inode; + unsigned long flags; + spinlock_t state_lock; + seqlock_t seqlock; + nfs4_stateid stateid; + nfs4_stateid open_stateid; + unsigned int n_rdonly; + unsigned int n_wronly; + unsigned int n_rdwr; + fmode_t state; + refcount_t count; + wait_queue_head_t waitq; + struct callback_head callback_head; +}; + +struct nfs_server; + +struct nfs4_state_owner { + struct nfs_server *so_server; + struct list_head so_lru; + unsigned long so_expires; + struct rb_node so_server_node; + const struct cred *so_cred; + spinlock_t so_lock; + atomic_t so_count; + unsigned long so_flags; + struct list_head so_states; + struct nfs_seqid_counter so_seqid; + seqcount_spinlock_t so_reclaim_seqcount; + struct mutex so_delegreturn_mutex; +}; + +struct nlm_host; + +struct nfs_iostats; + +enum nfs4_change_attr_type { + NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2, + NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3, + NFS4_CHANGE_TYPE_IS_UNDEFINED = 4, +}; + +struct nfs_fsid { + uint64_t major; + uint64_t minor; +}; + +typedef u32 rpc_authflavor_t; + +struct nfs_auth_info { + unsigned int flavor_len; + rpc_authflavor_t flavors[12]; +}; + +struct fscache_volume; + +struct pnfs_layoutdriver_type; + +struct nfs_client; + +struct rpc_clnt; + +struct nfs_server { + struct nfs_client *nfs_client; + struct list_head client_link; + struct list_head master_link; + struct rpc_clnt *client; + struct rpc_clnt *client_acl; + struct nlm_host *nlm_host; + struct nfs_iostats __attribute__((btf_type_tag("percpu"))) * io_stats; + atomic_long_t writeback; + unsigned int write_congested; + unsigned int flags; + unsigned int fattr_valid; + unsigned int caps; + unsigned int rsize; + unsigned int rpages; + unsigned int wsize; + unsigned int wpages; + unsigned int wtmult; + unsigned int dtsize; + unsigned short port; + unsigned int bsize; + unsigned int gxasize; + unsigned int sxasize; + unsigned int lxasize; + unsigned int acregmin; + unsigned int acregmax; + unsigned int acdirmin; + unsigned int acdirmax; + unsigned int namelen; + unsigned int options; + unsigned int clone_blksize; + enum nfs4_change_attr_type change_attr_type; + struct nfs_fsid fsid; + int s_sysfs_id; + __u64 maxfilesize; + struct timespec64 time_delta; + unsigned long mount_time; + struct super_block *super; + dev_t s_dev; + struct nfs_auth_info auth_info; + struct fscache_volume *fscache; + char *fscache_uniq; + u32 pnfs_blksize; + u32 attr_bitmask[3]; + u32 attr_bitmask_nl[3]; + u32 exclcreat_bitmask[3]; + u32 cache_consistency_bitmask[3]; + u32 acl_bitmask; + u32 fh_expire_type; + struct pnfs_layoutdriver_type *pnfs_curr_ld; + struct rpc_wait_queue roc_rpcwaitq; + void *pnfs_ld_data; + struct rb_root state_owners; + struct ida openowner_id; + struct ida lockowner_id; + struct list_head state_owners_lru; + struct list_head layouts; + struct list_head delegations; + struct list_head ss_copies; + unsigned long delegation_gen; + unsigned long mig_gen; + unsigned long mig_status; + void (*destroy)(struct nfs_server *); + atomic_t active; + struct __kernel_sockaddr_storage mountd_address; + size_t mountd_addrlen; + u32 mountd_version; + unsigned short mountd_port; + unsigned short mountd_protocol; + struct rpc_wait_queue uoc_rpcwaitq; + unsigned int read_hdrsize; + const struct cred *cred; + bool has_sec_mnt_opts; + struct kobject kobj; +}; + +struct nfs_subversion; + +enum xprtsec_policies { + RPC_XPRTSEC_NONE = 0, + RPC_XPRTSEC_TLS_ANON = 1, + RPC_XPRTSEC_TLS_X509 = 2, +}; + +struct xprtsec_parms { + enum xprtsec_policies policy; + key_serial_t cert_serial; + key_serial_t privkey_serial; +}; + +typedef struct { + char data[8]; +} nfs4_verifier; + +struct idmap; + +struct nfs4_slot_table; + +struct nfs4_session; + +struct nfs_rpc_ops; + +struct nfs4_minor_version_ops; + +struct nfs41_server_owner; + +struct nfs41_server_scope; + +struct nfs41_impl_id; + +struct nfs_client { + refcount_t cl_count; + atomic_t cl_mds_count; + int cl_cons_state; + unsigned long cl_res_state; + unsigned long cl_flags; + struct __kernel_sockaddr_storage cl_addr; + size_t cl_addrlen; + char *cl_hostname; + char *cl_acceptor; + struct list_head cl_share_link; + struct list_head cl_superblocks; + struct rpc_clnt *cl_rpcclient; + const struct nfs_rpc_ops *rpc_ops; + int cl_proto; + struct nfs_subversion *cl_nfs_mod; + u32 cl_minorversion; + unsigned int cl_nconnect; + unsigned int cl_max_connect; + const char *cl_principal; + struct xprtsec_parms cl_xprtsec; + struct list_head cl_ds_clients; + u64 cl_clientid; + nfs4_verifier cl_confirm; + unsigned long cl_state; + spinlock_t cl_lock; + unsigned long cl_lease_time; + unsigned long cl_last_renewal; + struct delayed_work cl_renewd; + struct rpc_wait_queue cl_rpcwaitq; + struct idmap *cl_idmap; + const char *cl_owner_id; + u32 cl_cb_ident; + const struct nfs4_minor_version_ops *cl_mvops; + unsigned long cl_mig_gen; + struct nfs4_slot_table *cl_slot_tbl; + u32 cl_seqid; + u32 cl_exchange_flags; + struct nfs4_session *cl_session; + bool cl_preserve_clid; + struct nfs41_server_owner *cl_serverowner; + struct nfs41_server_scope *cl_serverscope; + struct nfs41_impl_id *cl_implid; + unsigned long cl_sp4_flags; + wait_queue_head_t cl_lock_waitq; + char cl_ipaddr[48]; + struct net *cl_net; + struct list_head pending_cb_stateids; +}; + +struct rpc_xprt_switch; + +struct rpc_xprt; + +struct rpc_xprt_iter_ops; + +struct rpc_xprt_iter { + struct rpc_xprt_switch __attribute__((btf_type_tag("rcu"))) * xpi_xpswitch; + struct rpc_xprt *xpi_cursor; + const struct rpc_xprt_iter_ops *xpi_ops; +}; + +struct rpc_iostats; + +struct rpc_pipe_dir_head { + struct list_head pdh_entries; + struct dentry *pdh_dentry; +}; + +struct rpc_rtt { + unsigned long timeo; + unsigned long srtt[5]; + unsigned long sdrtt[5]; + int ntimeouts[5]; +}; + +struct rpc_timeout { + unsigned long to_initval; + unsigned long to_maxval; + unsigned long to_increment; + unsigned int to_retries; + unsigned char to_exponential; +}; + +struct rpc_procinfo; + +struct rpc_auth; + +struct rpc_stat; + +struct rpc_program; + +struct rpc_sysfs_client; + +struct rpc_clnt { + refcount_t cl_count; + unsigned int cl_clid; + struct list_head cl_clients; + struct list_head cl_tasks; + atomic_t cl_pid; + spinlock_t cl_lock; + struct rpc_xprt __attribute__((btf_type_tag("rcu"))) * cl_xprt; + const struct rpc_procinfo *cl_procinfo; + u32 cl_prog; + u32 cl_vers; + u32 cl_maxproc; + struct rpc_auth *cl_auth; + struct rpc_stat *cl_stats; + struct rpc_iostats *cl_metrics; + unsigned int cl_softrtry : 1; + unsigned int cl_softerr : 1; + unsigned int cl_discrtry : 1; + unsigned int cl_noretranstimeo : 1; + unsigned int cl_autobind : 1; + unsigned int cl_chatty : 1; + unsigned int cl_shutdown : 1; + struct xprtsec_parms cl_xprtsec; + struct rpc_rtt *cl_rtt; + const struct rpc_timeout *cl_timeout; + atomic_t cl_swapper; + int cl_nodelen; + char cl_nodename[65]; + struct rpc_pipe_dir_head cl_pipedir_objects; + struct rpc_clnt *cl_parent; + struct rpc_rtt cl_rtt_default; + struct rpc_timeout cl_timeout_default; + const struct rpc_program *cl_program; + const char *cl_principal; + struct dentry *cl_debugfs; + struct rpc_sysfs_client *cl_sysfs; + union { + struct rpc_xprt_iter cl_xpi; + struct work_struct cl_work; + }; + const struct cred *cl_cred; + unsigned int cl_max_connect; + struct super_block *pipefs_sb; +}; + +struct svc_xprt; + +struct rpc_sysfs_xprt; + +struct rpc_xprt_ops; + +struct rpc_task; + +struct svc_serv; + +struct xprt_class; + +struct rpc_xprt { + struct kref kref; + const struct rpc_xprt_ops *ops; + unsigned int id; + const struct rpc_timeout *timeout; + struct __kernel_sockaddr_storage addr; + size_t addrlen; + int prot; + unsigned long cong; + unsigned long cwnd; + size_t max_payload; + struct rpc_wait_queue binding; + struct rpc_wait_queue sending; + struct rpc_wait_queue pending; + struct rpc_wait_queue backlog; + struct list_head free; + unsigned int max_reqs; + unsigned int min_reqs; + unsigned int num_reqs; + unsigned long state; + unsigned char resvport : 1; + unsigned char reuseport : 1; + atomic_t swapper; + unsigned int bind_index; + struct list_head xprt_switch; + unsigned long bind_timeout; + unsigned long reestablish_timeout; + struct xprtsec_parms xprtsec; + unsigned int connect_cookie; + struct work_struct task_cleanup; + struct timer_list timer; + unsigned long last_used; + unsigned long idle_timeout; + unsigned long connect_timeout; + unsigned long max_reconnect_timeout; + atomic_long_t queuelen; + spinlock_t transport_lock; + spinlock_t reserve_lock; + spinlock_t queue_lock; + u32 xid; + struct rpc_task *snd_task; + struct list_head xmit_queue; + atomic_long_t xmit_queuelen; + struct svc_xprt *bc_xprt; + struct svc_serv *bc_serv; + unsigned int bc_alloc_max; + unsigned int bc_alloc_count; + atomic_t bc_slot_count; + spinlock_t bc_pa_lock; + struct list_head bc_pa_list; + struct rb_root recv_queue; + struct { + unsigned long bind_count; + unsigned long connect_count; + unsigned long connect_start; + unsigned long connect_time; + unsigned long sends; + unsigned long recvs; + unsigned long bad_xids; + unsigned long max_slots; + unsigned long long req_u; + unsigned long long bklog_u; + unsigned long long sending_u; + unsigned long long pending_u; + } stat; + struct net *xprt_net; + netns_tracker ns_tracker; + const char *servername; + const char *address_strings[6]; + struct dentry *debugfs; + struct callback_head rcu; + const struct xprt_class *xprt_class; + struct rpc_sysfs_xprt *xprt_sysfs; + bool main; +}; + +struct rpc_rqst; + +struct xdr_buf; + +struct rpc_xprt_ops { + void (*set_buffer_size)(struct rpc_xprt *, size_t, size_t); + int (*reserve_xprt)(struct rpc_xprt *, struct rpc_task *); + void (*release_xprt)(struct rpc_xprt *, struct rpc_task *); + void (*alloc_slot)(struct rpc_xprt *, struct rpc_task *); + void (*free_slot)(struct rpc_xprt *, struct rpc_rqst *); + void (*rpcbind)(struct rpc_task *); + void (*set_port)(struct rpc_xprt *, unsigned short); + void (*connect)(struct rpc_xprt *, struct rpc_task *); + int (*get_srcaddr)(struct rpc_xprt *, char *, size_t); + unsigned short (*get_srcport)(struct rpc_xprt *); + int (*buf_alloc)(struct rpc_task *); + void (*buf_free)(struct rpc_task *); + int (*prepare_request)(struct rpc_rqst *, struct xdr_buf *); + int (*send_request)(struct rpc_rqst *); + void (*wait_for_reply_request)(struct rpc_task *); + void (*timer)(struct rpc_xprt *, struct rpc_task *); + void (*release_request)(struct rpc_task *); + void (*close)(struct rpc_xprt *); + void (*destroy)(struct rpc_xprt *); + void (*set_connect_timeout)(struct rpc_xprt *, unsigned long, unsigned long); + void (*print_stats)(struct rpc_xprt *, struct seq_file *); + int (*enable_swap)(struct rpc_xprt *); + void (*disable_swap)(struct rpc_xprt *); + void (*inject_disconnect)(struct rpc_xprt *); + int (*bc_setup)(struct rpc_xprt *, unsigned int); + size_t (*bc_maxpayload)(struct rpc_xprt *); + unsigned int (*bc_num_slots)(struct rpc_xprt *); + void (*bc_free_rqst)(struct rpc_rqst *); + void (*bc_destroy)(struct rpc_xprt *, unsigned int); +}; + +struct rpc_wait { + struct list_head list; + struct list_head links; + struct list_head timer_list; +}; + +struct rpc_message { + const struct rpc_procinfo *rpc_proc; + void *rpc_argp; + void *rpc_resp; + const struct cred *rpc_cred; +}; + +struct rpc_call_ops; + +struct rpc_cred; + +struct rpc_task { + atomic_t tk_count; + int tk_status; + struct list_head tk_task; + void (*tk_callback)(struct rpc_task *); + void (*tk_action)(struct rpc_task *); + unsigned long tk_timeout; + unsigned long tk_runstate; + struct rpc_wait_queue *tk_waitqueue; + union { + struct work_struct tk_work; + struct rpc_wait tk_wait; + } u; + struct rpc_message tk_msg; + void *tk_calldata; + const struct rpc_call_ops *tk_ops; + struct rpc_clnt *tk_client; + struct rpc_xprt *tk_xprt; + struct rpc_cred *tk_op_cred; + struct rpc_rqst *tk_rqstp; + struct workqueue_struct *tk_workqueue; + ktime_t tk_start; + pid_t tk_owner; + int tk_rpc_status; + unsigned short tk_flags; + unsigned short tk_timeouts; + unsigned short tk_pid; + unsigned char tk_priority : 2; + unsigned char tk_garb_retry : 2; + unsigned char tk_cred_retry : 2; +}; + +struct xdr_stream; + +typedef void (*kxdreproc_t)(struct rpc_rqst *, struct xdr_stream *, const void *); + +typedef int (*kxdrdproc_t)(struct rpc_rqst *, struct xdr_stream *, void *); + +struct rpc_procinfo { + u32 p_proc; + kxdreproc_t p_encode; + kxdrdproc_t p_decode; + unsigned int p_arglen; + unsigned int p_replen; + unsigned int p_timer; + u32 p_statidx; + const char *p_name; +}; + +struct xdr_buf { + struct kvec head[1]; + struct kvec tail[1]; + struct bio_vec *bvec; + struct page **pages; + unsigned int page_base; + unsigned int page_len; + unsigned int flags; + unsigned int buflen; + unsigned int len; +}; + +struct lwq_node { + struct llist_node node; +}; + +struct rpc_rqst { + struct rpc_xprt *rq_xprt; + struct xdr_buf rq_snd_buf; + struct xdr_buf rq_rcv_buf; + struct rpc_task *rq_task; + struct rpc_cred *rq_cred; + __be32 rq_xid; + int rq_cong; + u32 rq_seqno; + int rq_enc_pages_num; + struct page **rq_enc_pages; + void (*rq_release_snd_buf)(struct rpc_rqst *); + union { + struct list_head rq_list; + struct rb_node rq_recv; + }; + struct list_head rq_xmit; + struct list_head rq_xmit2; + void *rq_buffer; + size_t rq_callsize; + void *rq_rbuffer; + size_t rq_rcvsize; + size_t rq_xmit_bytes_sent; + size_t rq_reply_bytes_recvd; + struct xdr_buf rq_private_buf; + unsigned long rq_majortimeo; + unsigned long rq_minortimeo; + unsigned long rq_timeout; + ktime_t rq_rtt; + unsigned int rq_retries; + unsigned int rq_connect_cookie; + atomic_t rq_pin; + u32 rq_bytes_sent; + ktime_t rq_xtime; + int rq_ntrans; + struct lwq_node rq_bc_list; + unsigned long rq_bc_pa_state; + struct list_head rq_bc_pa_list; +}; + +struct rpc_credops; + +struct rpc_cred { + struct hlist_node cr_hash; + struct list_head cr_lru; + struct callback_head cr_rcu; + struct rpc_auth *cr_auth; + const struct rpc_credops *cr_ops; + unsigned long cr_expire; + unsigned long cr_flags; + refcount_t cr_count; + const struct cred *cr_cred; +}; + +struct rpc_cred_cache; + +struct rpc_authops; + +struct rpc_auth { + unsigned int au_cslack; + unsigned int au_rslack; + unsigned int au_verfsize; + unsigned int au_ralign; + unsigned long au_flags; + const struct rpc_authops *au_ops; + rpc_authflavor_t au_flavor; + refcount_t au_count; + struct rpc_cred_cache *au_credcache; +}; + +struct rpc_auth_create_args; + +struct auth_cred; + +struct rpcsec_gss_info; + +struct rpc_authops { + struct module *owner; + rpc_authflavor_t au_flavor; + char *au_name; + struct rpc_auth *(*create)(const struct rpc_auth_create_args *, struct rpc_clnt *); + void (*destroy)(struct rpc_auth *); + int (*hash_cred)(struct auth_cred *, unsigned int); + struct rpc_cred *(*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); + struct rpc_cred *(*crcreate)(struct rpc_auth *, struct auth_cred *, int, gfp_t); + rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); + int (*flavor2info)(rpc_authflavor_t, struct rpcsec_gss_info *); + int (*key_timeout)(struct rpc_auth *, struct rpc_cred *); + int (*ping)(struct rpc_clnt *); +}; + +struct rpc_auth_create_args { + rpc_authflavor_t pseudoflavor; + const char *target_name; +}; + +struct auth_cred { + const struct cred *cred; + const char *principal; +}; + +struct rpcsec_gss_oid { + unsigned int len; + u8 data[32]; +}; + +struct rpcsec_gss_info { + struct rpcsec_gss_oid oid; + u32 qop; + u32 service; +}; + +struct rpc_credops { + const char *cr_name; + int (*cr_init)(struct rpc_auth *, struct rpc_cred *); + void (*crdestroy)(struct rpc_cred *); + int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); + int (*crmarshal)(struct rpc_task *, struct xdr_stream *); + int (*crrefresh)(struct rpc_task *); + int (*crvalidate)(struct rpc_task *, struct xdr_stream *); + int (*crwrap_req)(struct rpc_task *, struct xdr_stream *); + int (*crunwrap_resp)(struct rpc_task *, struct xdr_stream *); + int (*crkey_timeout)(struct rpc_cred *); + char *(*crstringify_acceptor)(struct rpc_cred *); + bool (*crneed_reencode)(struct rpc_task *); +}; + +struct xdr_stream { + __be32 *p; + struct xdr_buf *buf; + __be32 *end; + struct kvec *iov; + struct kvec scratch; + struct page **page_ptr; + void *page_kaddr; + unsigned int nwords; + struct rpc_rqst *rqst; +}; + +struct rpc_call_ops { + void (*rpc_call_prepare)(struct rpc_task *, void *); + void (*rpc_call_done)(struct rpc_task *, void *); + void (*rpc_count_stats)(struct rpc_task *, void *); + void (*rpc_release)(void *); +}; + +struct lwq { + spinlock_t lock; + struct llist_node *ready; + struct llist_head new; +}; + +struct svc_program; + +struct svc_stat; + +struct svc_pool; + +struct svc_serv { + struct svc_program *sv_program; + struct svc_stat *sv_stats; + spinlock_t sv_lock; + struct kref sv_refcnt; + unsigned int sv_nrthreads; + unsigned int sv_maxconn; + unsigned int sv_max_payload; + unsigned int sv_max_mesg; + unsigned int sv_xdrsize; + struct list_head sv_permsocks; + struct list_head sv_tempsocks; + int sv_tmpcnt; + struct timer_list sv_temptimer; + char *sv_name; + unsigned int sv_nrpools; + struct svc_pool *sv_pools; + int (*sv_threadfn)(void *); + struct lwq sv_cb_list; + bool sv_bc_enabled; +}; + +enum svc_auth_status { + SVC_GARBAGE = 1, + SVC_SYSERR = 2, + SVC_VALID = 3, + SVC_NEGATIVE = 4, + SVC_OK = 5, + SVC_DROP = 6, + SVC_CLOSE = 7, + SVC_DENIED = 8, + SVC_PENDING = 9, + SVC_COMPLETE = 10, +}; + +struct svc_version; + +struct svc_rqst; + +struct svc_process_info; + +struct svc_program { + struct svc_program *pg_next; + u32 pg_prog; + unsigned int pg_lovers; + unsigned int pg_hivers; + unsigned int pg_nvers; + const struct svc_version **pg_vers; + char *pg_name; + char *pg_class; + struct svc_stat *pg_stats; + enum svc_auth_status (*pg_authenticate)(struct svc_rqst *); + __be32 (*pg_init_request)(struct svc_rqst *, const struct svc_program *, + struct svc_process_info *); + int (*pg_rpcbind_set)(struct net *, const struct svc_program *, u32, int, + unsigned short, unsigned short); +}; + +struct svc_procedure; + +struct svc_version { + u32 vs_vers; + u32 vs_nproc; + const struct svc_procedure *vs_proc; + unsigned long __attribute__((btf_type_tag("percpu"))) * vs_count; + u32 vs_xdrsize; + bool vs_hidden; + bool vs_rpcb_optnl; + bool vs_need_cong_ctrl; + int (*vs_dispatch)(struct svc_rqst *); +}; + +struct svc_procedure { + __be32 (*pc_func)(struct svc_rqst *); + bool (*pc_decode)(struct svc_rqst *, struct xdr_stream *); + bool (*pc_encode)(struct svc_rqst *, struct xdr_stream *); + void (*pc_release)(struct svc_rqst *); + unsigned int pc_argsize; + unsigned int pc_argzero; + unsigned int pc_ressize; + unsigned int pc_cachetype; + unsigned int pc_xdrressize; + const char *pc_name; +}; + +struct gss_api_mech; + +struct svc_cred { + kuid_t cr_uid; + kgid_t cr_gid; + struct group_info *cr_group_info; + u32 cr_flavor; + char *cr_raw_principal; + char *cr_principal; + char *cr_targ_princ; + struct gss_api_mech *cr_gss_mech; +}; + +struct cache_deferred_req; + +struct cache_req { + struct cache_deferred_req *(*defer)(struct cache_req *); + unsigned long thread_wait; +}; + +struct auth_ops; + +struct svc_deferred_req; + +struct auth_domain; + +struct svc_rqst { + struct list_head rq_all; + struct llist_node rq_idle; + struct callback_head rq_rcu_head; + struct svc_xprt *rq_xprt; + struct __kernel_sockaddr_storage rq_addr; + size_t rq_addrlen; + struct __kernel_sockaddr_storage rq_daddr; + size_t rq_daddrlen; + struct svc_serv *rq_server; + struct svc_pool *rq_pool; + const struct svc_procedure *rq_procinfo; + struct auth_ops *rq_authop; + struct svc_cred rq_cred; + void *rq_xprt_ctxt; + struct svc_deferred_req *rq_deferred; + struct xdr_buf rq_arg; + struct xdr_stream rq_arg_stream; + struct xdr_stream rq_res_stream; + struct page *rq_scratch_page; + struct xdr_buf rq_res; + struct page *rq_pages[260]; + struct page **rq_respages; + struct page **rq_next_page; + struct page **rq_page_end; + struct folio_batch rq_fbatch; + struct kvec rq_vec[259]; + struct bio_vec rq_bvec[259]; + __be32 rq_xid; + u32 rq_prog; + u32 rq_vers; + u32 rq_proc; + u32 rq_prot; + int rq_cachetype; + unsigned long rq_flags; + ktime_t rq_qtime; + void *rq_argp; + void *rq_resp; + __be32 *rq_accept_statp; + void *rq_auth_data; + __be32 rq_auth_stat; + int rq_auth_slack; + int rq_reserved; + ktime_t rq_stime; + struct cache_req rq_chandle; + struct auth_domain *rq_client; + struct auth_domain *rq_gssclient; + struct task_struct *rq_task; + struct net *rq_bc_net; + void **rq_lease_breaker; + unsigned int rq_status_counter; +}; + +struct svc_pool { + unsigned int sp_id; + struct lwq sp_xprts; + atomic_t sp_nrthreads; + struct list_head sp_all_threads; + struct llist_head sp_idle_threads; + struct percpu_counter sp_messages_arrived; + struct percpu_counter sp_sockets_queued; + struct percpu_counter sp_threads_woken; + unsigned long sp_flags; +}; + +struct auth_ops { + char *name; + struct module *owner; + int flavour; + enum svc_auth_status (*accept)(struct svc_rqst *); + int (*release)(struct svc_rqst *); + void (*domain_release)(struct auth_domain *); + enum svc_auth_status (*set_client)(struct svc_rqst *); +}; + +struct auth_domain { + struct kref ref; + struct hlist_node hash; + char *name; + struct auth_ops *flavour; + struct callback_head callback_head; +}; + +struct gss_api_ops; + +struct pf_desc; + +struct gss_api_mech { + struct list_head gm_list; + struct module *gm_owner; + struct rpcsec_gss_oid gm_oid; + char *gm_name; + const struct gss_api_ops *gm_ops; + int gm_pf_num; + struct pf_desc *gm_pfs; + const char *gm_upcall_enctypes; +}; + +struct gss_ctx; + +struct xdr_netobj; + +struct gss_api_ops { + int (*gss_import_sec_context)(const void *, size_t, struct gss_ctx *, time64_t *, gfp_t); + u32 (*gss_get_mic)(struct gss_ctx *, struct xdr_buf *, struct xdr_netobj *); + u32 (*gss_verify_mic)(struct gss_ctx *, struct xdr_buf *, struct xdr_netobj *); + u32 (*gss_wrap)(struct gss_ctx *, int, struct xdr_buf *, struct page **); + u32 (*gss_unwrap)(struct gss_ctx *, int, int, struct xdr_buf *); + void (*gss_delete_sec_context)(void *); +}; + +struct gss_ctx { + struct gss_api_mech *mech_type; + void *internal_ctx_id; + unsigned int slack; + unsigned int align; +}; + +struct xdr_netobj { + unsigned int len; + u8 *data; +}; + +struct pf_desc { + u32 pseudoflavor; + u32 qop; + u32 service; + char *name; + char *auth_domain_name; + struct auth_domain *domain; + bool datatouch; +}; + +struct cache_head; + +struct cache_deferred_req { + struct hlist_node hash; + struct list_head recent; + struct cache_head *item; + void *owner; + void (*revisit)(struct cache_deferred_req *, int); +}; + +struct svc_deferred_req { + u32 prot; + struct svc_xprt *xprt; + struct __kernel_sockaddr_storage addr; + size_t addrlen; + struct __kernel_sockaddr_storage daddr; + size_t daddrlen; + void *xprt_ctxt; + struct cache_deferred_req handle; + int argslen; + __be32 args[0]; +}; + +struct cache_head { + struct hlist_node cache_list; + time64_t expiry_time; + time64_t last_refresh; + struct kref ref; + unsigned long flags; +}; + +struct svc_stat { + struct svc_program *program; + unsigned int netcnt; + unsigned int netudpcnt; + unsigned int nettcpcnt; + unsigned int nettcpconn; + unsigned int rpccnt; + unsigned int rpcbadfmt; + unsigned int rpcbadauth; + unsigned int rpcbadclnt; +}; + +struct svc_process_info { + union { + int (*dispatch)(struct svc_rqst *); + struct { + unsigned int lovers; + unsigned int hivers; + } mismatch; + }; +}; + +struct xprt_create; + +struct xprt_class { + struct list_head list; + int ident; + struct rpc_xprt *(*setup)(struct xprt_create *); + struct module *owner; + char name[32]; + const char *netid[0]; +}; + +struct xprt_create { + int ident; + struct net *net; + struct sockaddr *srcaddr; + struct sockaddr *dstaddr; + size_t addrlen; + const char *servername; + struct svc_xprt *bc_xprt; + struct rpc_xprt_switch *bc_xps; + unsigned int flags; + struct xprtsec_parms xprtsec; + unsigned long connect_timeout; + unsigned long reconnect_timeout; +}; + +struct rpc_sysfs_xprt_switch; + +struct rpc_xprt_switch { + spinlock_t xps_lock; + struct kref xps_kref; + unsigned int xps_id; + unsigned int xps_nxprts; + unsigned int xps_nactive; + unsigned int xps_nunique_destaddr_xprts; + atomic_long_t xps_queuelen; + struct list_head xps_xprt_list; + struct net *xps_net; + const struct rpc_xprt_iter_ops *xps_iter_ops; + struct rpc_sysfs_xprt_switch *xps_sysfs; + struct callback_head xps_rcu; +}; + +struct rpc_xprt_iter_ops { + void (*xpi_rewind)(struct rpc_xprt_iter *); + struct rpc_xprt *(*xpi_xprt)(struct rpc_xprt_iter *); + struct rpc_xprt *(*xpi_next)(struct rpc_xprt_iter *); +}; + +struct rpc_stat { + const struct rpc_program *program; + unsigned int netcnt; + unsigned int netudpcnt; + unsigned int nettcpcnt; + unsigned int nettcpconn; + unsigned int netreconn; + unsigned int rpccnt; + unsigned int rpcretrans; + unsigned int rpcauthrefresh; + unsigned int rpcgarbage; +}; + +struct rpc_version; + +struct rpc_program { + const char *name; + u32 number; + unsigned int nrvers; + const struct rpc_version **version; + struct rpc_stat *stats; + const char *pipe_dir_name; +}; + +struct rpc_version { + u32 number; + unsigned int nrprocs; + const struct rpc_procinfo *procs; + unsigned int *counts; +}; + +struct rpc_sysfs_client { + struct kobject kobject; + struct net *net; + struct rpc_clnt *clnt; + struct rpc_xprt_switch *xprt_switch; +}; + +struct nlmclnt_operations; + +struct nfs_client_initdata; + +struct nfs_fsinfo; + +struct nfs_fattr; + +struct nfs_access_entry; + +struct nfs_unlinkdata; + +struct nfs_renamedata; + +struct nfs_readdir_arg; + +struct nfs_readdir_res; + +struct nfs_fsstat; + +struct nfs_pathconf; + +struct nfs_entry; + +struct nfs_pgio_header; + +struct nfs_commit_data; + +struct nfs_open_context; + +struct nfs_rpc_ops { + u32 version; + const struct dentry_operations *dentry_ops; + const struct inode_operations *dir_inode_ops; + const struct inode_operations *file_inode_ops; + const struct file_operations *file_ops; + const struct nlmclnt_operations *nlmclnt_ops; + int (*getroot)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + int (*submount)(struct fs_context *, struct nfs_server *); + int (*try_get_tree)(struct fs_context *); + int (*getattr)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, + struct inode *); + int (*setattr)(struct dentry *, struct nfs_fattr *, struct iattr *); + int (*lookup)(struct inode *, struct dentry *, struct nfs_fh *, struct nfs_fattr *); + int (*lookupp)(struct inode *, struct nfs_fh *, struct nfs_fattr *); + int (*access)(struct inode *, struct nfs_access_entry *, const struct cred *); + int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); + int (*create)(struct inode *, struct dentry *, struct iattr *, int); + int (*remove)(struct inode *, struct dentry *); + void (*unlink_setup)(struct rpc_message *, struct dentry *, struct inode *); + void (*unlink_rpc_prepare)(struct rpc_task *, struct nfs_unlinkdata *); + int (*unlink_done)(struct rpc_task *, struct inode *); + void (*rename_setup)(struct rpc_message *, struct dentry *, struct dentry *); + void (*rename_rpc_prepare)(struct rpc_task *, struct nfs_renamedata *); + int (*rename_done)(struct rpc_task *, struct inode *, struct inode *); + int (*link)(struct inode *, struct inode *, const struct qstr *); + int (*symlink)(struct inode *, struct dentry *, struct folio *, unsigned int, + struct iattr *); + int (*mkdir)(struct inode *, struct dentry *, struct iattr *); + int (*rmdir)(struct inode *, const struct qstr *); + int (*readdir)(struct nfs_readdir_arg *, struct nfs_readdir_res *); + int (*mknod)(struct inode *, struct dentry *, struct iattr *, dev_t); + int (*statfs)(struct nfs_server *, struct nfs_fh *, struct nfs_fsstat *); + int (*fsinfo)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + int (*pathconf)(struct nfs_server *, struct nfs_fh *, struct nfs_pathconf *); + int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); + int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool); + int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_header *); + void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *); + int (*read_done)(struct rpc_task *, struct nfs_pgio_header *); + void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, + struct rpc_clnt **); + int (*write_done)(struct rpc_task *, struct nfs_pgio_header *); + void (*commit_setup)(struct nfs_commit_data *, struct rpc_message *, + struct rpc_clnt **); + void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); + int (*commit_done)(struct rpc_task *, struct nfs_commit_data *); + int (*lock)(struct file *, int, struct file_lock *); + int (*lock_check_bounds)(const struct file_lock *); + void (*clear_acl_cache)(struct inode *); + void (*close_context)(struct nfs_open_context *, int); + struct inode *(*open_context)(struct inode *, struct nfs_open_context *, int, + struct iattr *, int *); + int (*have_delegation)(struct inode *, fmode_t); + struct nfs_client *(*alloc_client)(const struct nfs_client_initdata *); + struct nfs_client *(*init_client)(struct nfs_client *, + const struct nfs_client_initdata *); + void (*free_client)(struct nfs_client *); + struct nfs_server *(*create_server)(struct fs_context *); + struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, + struct nfs_fattr *, rpc_authflavor_t); + int (*discover_trunking)(struct nfs_server *, struct nfs_fh *); + void (*enable_swap)(struct inode *); + void (*disable_swap)(struct inode *); +}; + +struct nfs_fh { + unsigned short size; + unsigned char data[128]; +}; + +struct nfs_fsinfo { + struct nfs_fattr *fattr; + __u32 rtmax; + __u32 rtpref; + __u32 rtmult; + __u32 wtmax; + __u32 wtpref; + __u32 wtmult; + __u32 dtpref; + __u64 maxfilesize; + struct timespec64 time_delta; + __u32 lease_time; + __u32 nlayouttypes; + __u32 layouttype[8]; + __u32 blksize; + __u32 clone_blksize; + enum nfs4_change_attr_type change_attr_type; + __u32 xattr_support; +}; + +struct nfs4_string; + +struct nfs4_threshold; + +struct nfs4_label; + +struct nfs_fattr { + unsigned int valid; + umode_t mode; + __u32 nlink; + kuid_t uid; + kgid_t gid; + dev_t rdev; + __u64 size; + union { + struct { + __u32 blocksize; + __u32 blocks; + } nfs2; + struct { + __u64 used; + } nfs3; + } du; + struct nfs_fsid fsid; + __u64 fileid; + __u64 mounted_on_fileid; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + __u64 change_attr; + __u64 pre_change_attr; + __u64 pre_size; + struct timespec64 pre_mtime; + struct timespec64 pre_ctime; + unsigned long time_start; + unsigned long gencount; + struct nfs4_string *owner_name; + struct nfs4_string *group_name; + struct nfs4_threshold *mdsthreshold; + struct nfs4_label *label; +}; + +struct nfs4_string { + unsigned int len; + char *data; +}; + +struct nfs4_threshold { + __u32 bm; + __u32 l_type; + __u64 rd_sz; + __u64 wr_sz; + __u64 rd_io_sz; + __u64 wr_io_sz; +}; + +struct nfs4_label { + uint32_t lfs; + uint32_t pi; + u32 len; + char *label; +}; + +struct nfs_access_entry { + struct rb_node rb_node; + struct list_head lru; + kuid_t fsuid; + kgid_t fsgid; + struct group_info *group_info; + u64 timestamp; + __u32 mask; + struct callback_head callback_head; +}; + +struct nfs4_slot; + +struct nfs4_sequence_args { + struct nfs4_slot *sa_slot; + u8 sa_cache_this : 1; + u8 sa_privileged : 1; +}; + +struct nfs_removeargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *fh; + struct qstr name; +}; + +struct nfs4_sequence_res { + struct nfs4_slot *sr_slot; + unsigned long sr_timestamp; + int sr_status; + u32 sr_status_flags; + u32 sr_highest_slotid; + u32 sr_target_highest_slotid; +}; + +struct nfs4_change_info { + u32 atomic; + u64 before; + u64 after; +}; + +struct nfs_removeres { + struct nfs4_sequence_res seq_res; + struct nfs_server *server; + struct nfs_fattr *dir_attr; + struct nfs4_change_info cinfo; +}; + +struct nfs_unlinkdata { + struct nfs_removeargs args; + struct nfs_removeres res; + struct dentry *dentry; + wait_queue_head_t wq; + const struct cred *cred; + struct nfs_fattr dir_attr; + long timeout; +}; + +struct nfs_renameargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *old_dir; + const struct nfs_fh *new_dir; + const struct qstr *old_name; + const struct qstr *new_name; +}; + +struct nfs_renameres { + struct nfs4_sequence_res seq_res; + struct nfs_server *server; + struct nfs4_change_info old_cinfo; + struct nfs_fattr *old_fattr; + struct nfs4_change_info new_cinfo; + struct nfs_fattr *new_fattr; +}; + +struct nfs_renamedata { + struct nfs_renameargs args; + struct nfs_renameres res; + struct rpc_task task; + const struct cred *cred; + struct inode *old_dir; + struct dentry *old_dentry; + struct nfs_fattr old_fattr; + struct inode *new_dir; + struct dentry *new_dentry; + struct nfs_fattr new_fattr; + void (*complete)(struct rpc_task *, struct nfs_renamedata *); + long timeout; + bool cancelled; +}; + +struct nfs_readdir_arg { + struct dentry *dentry; + const struct cred *cred; + __be32 *verf; + u64 cookie; + struct page **pages; + unsigned int page_len; + bool plus; +}; + +struct nfs_readdir_res { + __be32 *verf; +}; + +struct nfs_fsstat { + struct nfs_fattr *fattr; + __u64 tbytes; + __u64 fbytes; + __u64 abytes; + __u64 tfiles; + __u64 ffiles; + __u64 afiles; +}; + +struct nfs_pathconf { + struct nfs_fattr *fattr; + __u32 max_link; + __u32 max_namelen; +}; + +struct nfs_entry { + __u64 ino; + __u64 cookie; + const char *name; + unsigned int len; + int eof; + struct nfs_fh *fh; + struct nfs_fattr *fattr; + unsigned char d_type; + struct nfs_server *server; +}; + +struct nfs_page; + +struct nfs_write_verifier { + char data[8]; +}; + +enum nfs3_stable_how { + NFS_UNSTABLE = 0, + NFS_DATA_SYNC = 1, + NFS_FILE_SYNC = 2, + NFS_INVALID_STABLE_HOW = -1, +}; + +struct nfs_writeverf { + struct nfs_write_verifier verifier; + enum nfs3_stable_how committed; +}; + +struct pnfs_layout_segment; + +struct nfs_rw_ops; + +struct nfs_io_completion; + +struct nfs_direct_req; + +struct nfs_lock_context; + +struct nfs_pgio_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + struct nfs_open_context *context; + struct nfs_lock_context *lock_context; + nfs4_stateid stateid; + __u64 offset; + __u32 count; + unsigned int pgbase; + struct page **pages; + union { + unsigned int replen; + struct { + const u32 *bitmask; + u32 bitmask_store[3]; + enum nfs3_stable_how stable; + }; + }; +}; + +struct nfs_pgio_res { + struct nfs4_sequence_res seq_res; + struct nfs_fattr *fattr; + __u64 count; + __u32 op_status; + union { + struct { + unsigned int replen; + int eof; + void *scratch; + }; + struct { + struct nfs_writeverf *verf; + const struct nfs_server *server; + }; + }; +}; + +struct nfs_page_array { + struct page **pagevec; + unsigned int npages; + struct page *page_array[8]; +}; + +struct nfs_pgio_completion_ops; + +struct nfs_pgio_header { + struct inode *inode; + const struct cred *cred; + struct list_head pages; + struct nfs_page *req; + struct nfs_writeverf verf; + fmode_t rw_mode; + struct pnfs_layout_segment *lseg; + loff_t io_start; + const struct rpc_call_ops *mds_ops; + void (*release)(struct nfs_pgio_header *); + const struct nfs_pgio_completion_ops *completion_ops; + const struct nfs_rw_ops *rw_ops; + struct nfs_io_completion *io_completion; + struct nfs_direct_req *dreq; + void *netfs; + int pnfs_error; + int error; + unsigned int good_bytes; + unsigned long flags; + struct rpc_task task; + struct nfs_fattr fattr; + struct nfs_pgio_args args; + struct nfs_pgio_res res; + unsigned long timestamp; + int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *); + __u64 mds_offset; + struct nfs_page_array page_array; + struct nfs_client *ds_clp; + u32 ds_commit_idx; + u32 pgio_mirror_idx; +}; + +struct nfs_pgio_completion_ops { + void (*error_cleanup)(struct list_head *, int); + void (*init_hdr)(struct nfs_pgio_header *); + void (*completion)(struct nfs_pgio_header *); + void (*reschedule_io)(struct nfs_pgio_header *); +}; + +struct nfs_lock_context { + refcount_t count; + struct list_head list; + struct nfs_open_context *open_context; + fl_owner_t lockowner; + atomic_t io_count; + struct callback_head callback_head; +}; + +struct nfs_open_context { + struct nfs_lock_context lock_context; + fl_owner_t flock_owner; + struct dentry *dentry; + const struct cred *cred; + struct rpc_cred __attribute__((btf_type_tag("rcu"))) * ll_cred; + struct nfs4_state *state; + fmode_t mode; + unsigned long flags; + int error; + struct list_head list; + struct nfs4_threshold *mdsthreshold; + struct callback_head callback_head; +}; + +struct nfs_commitargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + __u64 offset; + __u32 count; + const u32 *bitmask; +}; + +struct nfs_commitres { + struct nfs4_sequence_res seq_res; + __u32 op_status; + struct nfs_fattr *fattr; + struct nfs_writeverf *verf; + const struct nfs_server *server; +}; + +struct nfs_commit_completion_ops; + +struct nfs_commit_data { + struct rpc_task task; + struct inode *inode; + const struct cred *cred; + struct nfs_fattr fattr; + struct nfs_writeverf verf; + struct list_head pages; + struct list_head list; + struct nfs_direct_req *dreq; + struct nfs_commitargs args; + struct nfs_commitres res; + struct nfs_open_context *context; + struct pnfs_layout_segment *lseg; + struct nfs_client *ds_clp; + int ds_commit_index; + loff_t lwb; + const struct rpc_call_ops *mds_ops; + const struct nfs_commit_completion_ops *completion_ops; + int (*commit_done_cb)(struct rpc_task *, struct nfs_commit_data *); + unsigned long flags; +}; + +struct nfs_commit_info; + +struct nfs_commit_completion_ops { + void (*completion)(struct nfs_commit_data *); + void (*resched_write)(struct nfs_commit_info *, struct nfs_page *); +}; + +struct nfs_mds_commit_info; + +struct pnfs_ds_commit_info; + +struct nfs_commit_info { + struct inode *inode; + struct nfs_mds_commit_info *mds; + struct pnfs_ds_commit_info *ds; + struct nfs_direct_req *dreq; + const struct nfs_commit_completion_ops *completion_ops; +}; + +struct nfs_mds_commit_info { + atomic_t rpcs_out; + atomic_long_t ncommit; + struct list_head list; +}; + +struct pnfs_commit_ops; + +struct pnfs_ds_commit_info { + struct list_head commits; + unsigned int nwritten; + unsigned int ncommitting; + const struct pnfs_commit_ops *ops; +}; + +struct nfs_seqid; + +struct nfs4_state_recovery_ops; + +struct nfs4_state_maintenance_ops; + +struct nfs4_mig_recovery_ops; + +struct nfs4_minor_version_ops { + u32 minor_version; + unsigned int init_caps; + int (*init_client)(struct nfs_client *); + void (*shutdown_client)(struct nfs_client *); + bool (*match_stateid)(const nfs4_stateid *, const nfs4_stateid *); + int (*find_root_sec)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + void (*free_lock_state)(struct nfs_server *, struct nfs4_lock_state *); + int (*test_and_free_expired)(struct nfs_server *, nfs4_stateid *, const struct cred *); + struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); + void (*session_trunk)(struct rpc_clnt *, struct rpc_xprt *, void *); + const struct rpc_call_ops *call_sync_ops; + const struct nfs4_state_recovery_ops *reboot_recovery_ops; + const struct nfs4_state_recovery_ops *nograce_recovery_ops; + const struct nfs4_state_maintenance_ops *state_renewal_ops; + const struct nfs4_mig_recovery_ops *mig_recovery_ops; +}; + +struct nfs_seqid { + struct nfs_seqid_counter *sequence; + struct list_head list; + struct rpc_task *task; +}; + +struct nfs4_state_recovery_ops { + int owner_flag_bit; + int state_flag_bit; + int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *); + int (*recover_lock)(struct nfs4_state *, struct file_lock *); + int (*establish_clid)(struct nfs_client *, const struct cred *); + int (*reclaim_complete)(struct nfs_client *, const struct cred *); + int (*detect_trunking)(struct nfs_client *, struct nfs_client **, const struct cred *); +}; + +struct nfs4_state_maintenance_ops { + int (*sched_state_renewal)(struct nfs_client *, const struct cred *, unsigned int); + const struct cred *(*get_state_renewal_cred)(struct nfs_client *); + int (*renew_lease)(struct nfs_client *, const struct cred *); +}; + +struct nfs4_fs_locations; + +struct nfs4_mig_recovery_ops { + int (*get_locations)(struct nfs_server *, struct nfs_fh *, + struct nfs4_fs_locations *, struct page *, const struct cred *); + int (*fsid_present)(struct inode *, const struct cred *); +}; + +struct nfs4_pathname { + unsigned int ncomponents; + struct nfs4_string components[512]; +}; + +struct nfs4_fs_location { + unsigned int nservers; + struct nfs4_string servers[10]; + struct nfs4_pathname rootpath; +}; + +struct nfs4_fs_locations { + struct nfs_fattr *fattr; + const struct nfs_server *server; + struct nfs4_pathname fs_path; + int nlocations; + struct nfs4_fs_location locations[10]; +}; + +struct nfs41_server_owner { + uint64_t minor_id; + uint32_t major_id_sz; + char major_id[1024]; +}; + +struct nfs41_server_scope { + uint32_t server_scope_sz; + char server_scope[1024]; +}; + +struct nfstime4 { + u64 seconds; + u32 nseconds; +}; + +struct nfs41_impl_id { + char domain[1025]; + char name[1025]; + struct nfstime4 date; +}; + +struct nfs_ssc_client_ops { + void (*sco_sb_deactive)(struct super_block *); +}; + +struct core_name { + char *corename; + int used; + int size; +}; + +typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_release_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_invalidate_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_rw_queued)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_iter_dstmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_iter_srcmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_writepage_map)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_iter)(void *, struct iomap_iter *, const void *, unsigned long); + +typedef void (*btf_trace_iomap_dio_rw_begin)(void *, struct kiocb *, struct iov_iter *, + unsigned int, size_t); + +typedef void (*btf_trace_iomap_dio_complete)(void *, struct kiocb *, int, ssize_t); + +struct trace_event_raw_iomap_readpage_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + int nr_pages; + char __data[0]; +}; + +struct trace_event_raw_iomap_range_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t size; + loff_t offset; + u64 length; + char __data[0]; +}; + +struct trace_event_raw_iomap_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_iter { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t pos; + u64 length; + unsigned int flags; + const void *ops; + unsigned long caller; + char __data[0]; +}; + +struct trace_event_raw_iomap_dio_rw_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + size_t count; + size_t done_before; + int ki_flags; + unsigned int dio_flags; + bool aio; + char __data[0]; +}; + +struct trace_event_raw_iomap_dio_complete { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + int ki_flags; + bool aio; + int error; + ssize_t ret; + char __data[0]; +}; + +struct trace_event_data_offsets_iomap_readpage_class {}; + +struct trace_event_data_offsets_iomap_range_class {}; + +struct trace_event_data_offsets_iomap_class {}; + +struct trace_event_data_offsets_iomap_iter {}; + +struct trace_event_data_offsets_iomap_dio_rw_begin {}; + +struct trace_event_data_offsets_iomap_dio_complete {}; + +enum { + BIOSET_NEED_BVECS = 1, + BIOSET_NEED_RESCUER = 2, + BIOSET_PERCPU_CACHE = 4, +}; + +struct iomap_ioend { + struct list_head io_list; + u16 io_type; + u16 io_flags; + u32 io_folios; + struct inode *io_inode; + size_t io_size; + loff_t io_offset; + sector_t io_sector; + struct bio *io_bio; + struct bio io_inline_bio; +}; + +struct iomap_readpage_ctx { + struct folio *cur_folio; + bool cur_folio_in_bio; + struct bio *bio; + struct readahead_control *rac; +}; + +struct iomap_folio_state { + spinlock_t state_lock; + unsigned int read_bytes_pending; + atomic_t write_bytes_pending; + unsigned long state[0]; +}; + +typedef int (*iomap_punch_t)(struct inode *, loff_t, loff_t); + +typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); + +struct iomap_writeback_ops; + +struct iomap_writepage_ctx { + struct iomap iomap; + struct iomap_ioend *ioend; + const struct iomap_writeback_ops *ops; +}; + +struct iomap_writeback_ops { + int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t); + int (*prepare_ioend)(struct iomap_ioend *, int); + void (*discard_folio)(struct folio *, loff_t); +}; + +struct iomap_dio_ops; + +struct iomap_dio { + struct kiocb *iocb; + const struct iomap_dio_ops *dops; + loff_t i_size; + loff_t size; + atomic_t ref; + unsigned int flags; + int error; + size_t done_before; + bool wait_for_completion; + union { + struct { + struct iov_iter *iter; + struct task_struct *waiter; + } submit; + struct { + struct work_struct work; + } aio; + }; +}; + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); + void (*submit_io)(const struct iomap_iter *, struct bio *, loff_t); + struct bio_set *bio_set; +}; + +struct iomap_swapfile_info { + struct iomap iomap; + struct swap_info_struct *sis; + uint64_t lowest_ppage; + uint64_t highest_ppage; + unsigned long nr_pages; + int nr_extents; + struct file *file; +}; + +struct dqstats { + unsigned long stat[8]; + struct percpu_counter counter[8]; +}; + +struct quota_module_name { + int qm_fmt_id; + char *qm_mod_name; +}; + +enum { + DQF_INFO_DIRTY_B = 17, +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +enum { + QIF_BLIMITS_B = 0, + QIF_SPACE_B = 1, + QIF_ILIMITS_B = 2, + QIF_INODES_B = 3, + QIF_BTIME_B = 4, + QIF_ITIME_B = 5, +}; + +typedef __kernel_uid32_t qid_t; + +struct dquot_warn { + struct super_block *w_sb; + struct kqid w_dq_id; + short w_type; +}; + +struct qtree_fmt_operations { + void (*mem2disk_dqblk)(void *, struct dquot *); + void (*disk2mem_dqblk)(struct dquot *, void *); + int (*is_id)(void *, struct dquot *); +}; + +struct v2_disk_dqheader { + __le32 dqh_magic; + __le32 dqh_version; +}; + +struct qtree_mem_dqinfo { + struct super_block *dqi_sb; + int dqi_type; + unsigned int dqi_blocks; + unsigned int dqi_free_blk; + unsigned int dqi_free_entry; + unsigned int dqi_blocksize_bits; + unsigned int dqi_entry_size; + unsigned int dqi_usable_bs; + unsigned int dqi_qtree_depth; + const struct qtree_fmt_operations *dqi_ops; +}; + +struct v2_disk_dqinfo { + __le32 dqi_bgrace; + __le32 dqi_igrace; + __le32 dqi_flags; + __le32 dqi_blocks; + __le32 dqi_free_blk; + __le32 dqi_free_entry; +}; + +struct v2r0_disk_dqblk { + __le32 dqb_id; + __le32 dqb_ihardlimit; + __le32 dqb_isoftlimit; + __le32 dqb_curinodes; + __le32 dqb_bhardlimit; + __le32 dqb_bsoftlimit; + __le64 dqb_curspace; + __le64 dqb_btime; + __le64 dqb_itime; +}; + +struct v2r1_disk_dqblk { + __le32 dqb_id; + __le32 dqb_pad; + __le64 dqb_ihardlimit; + __le64 dqb_isoftlimit; + __le64 dqb_curinodes; + __le64 dqb_bhardlimit; + __le64 dqb_bsoftlimit; + __le64 dqb_curspace; + __le64 dqb_btime; + __le64 dqb_itime; +}; + +struct qt_disk_dqdbheader { + __le32 dqdh_next_free; + __le32 dqdh_prev_free; + __le16 dqdh_entries; + __le16 dqdh_pad1; + __le32 dqdh_pad2; +}; + +struct if_dqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; +}; + +struct fs_qfilestat { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; +}; + +typedef struct fs_qfilestat fs_qfilestat_t; + +struct fs_quota_stat { + __s8 qs_version; + __u16 qs_flags; + __s8 qs_pad; + fs_qfilestat_t qs_uquota; + fs_qfilestat_t qs_gquota; + __u32 qs_incoredqs; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +}; + +struct compat_fs_qfilestat { + compat_u64 dqb_bhardlimit; + compat_u64 qfs_nblks; + compat_uint_t qfs_nextents; +} __attribute__((packed)); + +struct compat_fs_quota_stat { + __s8 qs_version; + __u16 qs_flags; + __s8 qs_pad; + long : 0; + struct compat_fs_qfilestat qs_uquota; + struct compat_fs_qfilestat qs_gquota; + compat_uint_t qs_incoredqs; + compat_int_t qs_btimelimit; + compat_int_t qs_itimelimit; + compat_int_t qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +}; + +struct fs_qfilestatv { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; + __u32 qfs_pad; +}; + +struct fs_quota_statv { + __s8 qs_version; + __u8 qs_pad1; + __u16 qs_flags; + __u32 qs_incoredqs; + struct fs_qfilestatv qs_uquota; + struct fs_qfilestatv qs_gquota; + struct fs_qfilestatv qs_pquota; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; + __u16 qs_rtbwarnlimit; + __u16 qs_pad3; + __u32 qs_pad4; + __u64 qs_pad2[7]; +}; + +struct fs_disk_quota { + __s8 d_version; + __s8 d_flags; + __u16 d_fieldmask; + __u32 d_id; + __u64 d_blk_hardlimit; + __u64 d_blk_softlimit; + __u64 d_ino_hardlimit; + __u64 d_ino_softlimit; + __u64 d_bcount; + __u64 d_icount; + __s32 d_itimer; + __s32 d_btimer; + __u16 d_iwarns; + __u16 d_bwarns; + __s8 d_itimer_hi; + __s8 d_btimer_hi; + __s8 d_rtbtimer_hi; + __s8 d_padding2; + __u64 d_rtb_hardlimit; + __u64 d_rtb_softlimit; + __u64 d_rtbcount; + __s32 d_rtbtimer; + __u16 d_rtbwarns; + __s16 d_padding3; + char d_padding4[8]; +}; + +struct if_dqinfo { + __u64 dqi_bgrace; + __u64 dqi_igrace; + __u32 dqi_flags; + __u32 dqi_valid; +}; + +struct compat_if_dqblk { + compat_u64 dqb_bhardlimit; + compat_u64 dqb_bsoftlimit; + compat_u64 dqb_curspace; + compat_u64 dqb_ihardlimit; + compat_u64 dqb_isoftlimit; + compat_u64 dqb_curinodes; + compat_u64 dqb_btime; + compat_u64 dqb_itime; + compat_uint_t dqb_valid; +} __attribute__((packed)); + +struct if_nextdqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; + __u32 dqb_id; +}; + +enum { + QUOTA_NL_C_UNSPEC = 0, + QUOTA_NL_C_WARNING = 1, + __QUOTA_NL_C_MAX = 2, +}; + +enum { + QUOTA_NL_A_UNSPEC = 0, + QUOTA_NL_A_QTYPE = 1, + QUOTA_NL_A_EXCESS_ID = 2, + QUOTA_NL_A_WARNING = 3, + QUOTA_NL_A_DEV_MAJOR = 4, + QUOTA_NL_A_DEV_MINOR = 5, + QUOTA_NL_A_CAUSED_ID = 6, + QUOTA_NL_A_PAD = 7, + __QUOTA_NL_A_MAX = 8, +}; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON = 2, + CLEAR_REFS_MAPPED = 3, + CLEAR_REFS_SOFT_DIRTY = 4, + CLEAR_REFS_MM_HIWATER_RSS = 5, + CLEAR_REFS_LAST = 6, +}; + +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +struct proc_maps_private { + struct inode *inode; + struct task_struct *task; + struct mm_struct *mm; + struct vma_iterator iter; + struct mempolicy *task_mempolicy; +}; + +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + +struct pagemap_scan_private { + struct pm_scan_arg arg; + unsigned long masks_of_interest; + unsigned long cur_vma_category; + struct page_region *vec_buf; + unsigned long vec_buf_len; + unsigned long vec_buf_index; + unsigned long found_pages; + struct page_region __attribute__((btf_type_tag("user"))) * vec_out; +}; + +struct mem_size_stats { + unsigned long resident; + unsigned long shared_clean; + unsigned long shared_dirty; + unsigned long private_clean; + unsigned long private_dirty; + unsigned long referenced; + unsigned long anonymous; + unsigned long lazyfree; + unsigned long anonymous_thp; + unsigned long shmem_thp; + unsigned long file_thp; + unsigned long swap; + unsigned long shared_hugetlb; + unsigned long private_hugetlb; + unsigned long ksm; + u64 pss; + u64 pss_anon; + u64 pss_file; + u64 pss_shmem; + u64 pss_dirty; + u64 pss_locked; + u64 swap_pss; +}; + +typedef struct { + u64 pme; +} pagemap_entry_t; + +struct pagemapread { + int pos; + int len; + pagemap_entry_t *buffer; + bool show_pfn; +}; + +struct clear_refs_private { + enum clear_refs_types type; +}; + +struct numa_maps { + unsigned long pages; + unsigned long anon; + unsigned long active; + unsigned long writeback; + unsigned long mapcount_max; + unsigned long dirty; + unsigned long swapcache; + unsigned long node[64]; +}; + +struct numa_maps_private { + struct proc_maps_private proc_maps; + struct numa_maps md; +}; + +enum { + BIAS = 2147483648, +}; + +enum { + PROC_ENTRY_PERMANENT = 1, +}; + +struct pde_opener { + struct list_head lh; + struct file *file; + bool closing; + struct completion *c; +}; + +enum proc_param { + Opt_gid___2 = 0, + Opt_hidepid = 1, + Opt_subset = 2, +}; + +struct proc_fs_context { + struct pid_namespace *pid_ns; + unsigned int mask; + enum proc_hidepid hidepid; + int gid; + enum proc_pidonly pidonly; +}; + +struct pid_entry { + const char *name; + unsigned int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; + union proc_op op; +}; + +struct limit_names { + const char *name; + const char *unit; +}; + +struct map_files_info { + unsigned long start; + unsigned long end; + fmode_t mode; +}; + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +struct genradix_root; + +struct __genradix { + struct genradix_root *root; +}; + +struct tgid_iter { + unsigned int tgid; + struct task_struct *task; +}; + +typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); + +struct timers_private { + struct pid *pid; + struct task_struct *task; + struct sighand_struct *sighand; + struct pid_namespace *ns; + unsigned long flags; +}; + +struct fd_data { + fmode_t mode; + unsigned int fd; +}; + +struct sysctl_alias { + const char *kernel_param; + const char *sysctl_param; +}; + +struct seq_net_private { + struct net *net; + netns_tracker ns_tracker; +}; + +struct kernfs_global_locks { + struct mutex open_file_mutex[1024]; +}; + +struct kernfs_super_info { + struct super_block *sb; + struct kernfs_root *root; + const void *ns; + struct list_head node; +}; + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 16, + KERNFS_NS = 32, + KERNFS_HAS_SEQ_SHOW = 64, + KERNFS_HAS_MMAP = 128, + KERNFS_LOCKDEP = 256, + KERNFS_HIDDEN = 512, + KERNFS_SUICIDAL = 1024, + KERNFS_SUICIDED = 2048, + KERNFS_EMPTY_DIR = 4096, + KERNFS_HAS_RELEASE = 8192, + KERNFS_REMOVING = 16384, +}; + +struct configfs_fragment; + +struct configfs_dirent { + atomic_t s_count; + int s_dependent_count; + struct list_head s_sibling; + struct list_head s_children; + int s_links; + void *s_element; + int s_type; + umode_t s_mode; + struct dentry *s_dentry; + struct iattr *s_iattr; + struct configfs_fragment *s_frag; +}; + +struct configfs_fragment { + atomic_t frag_count; + struct rw_semaphore frag_sem; + bool frag_dead; +}; + +struct config_item; + +struct configfs_attribute { + const char *ca_name; + struct module *ca_owner; + umode_t ca_mode; + ssize_t (*show)(struct config_item *, char *); + ssize_t (*store)(struct config_item *, const char *, size_t); +}; + +struct config_group; + +struct config_item_type; + +struct config_item { + char *ci_name; + char ci_namebuf[20]; + struct kref ci_kref; + struct list_head ci_entry; + struct config_item *ci_parent; + struct config_group *ci_group; + const struct config_item_type *ci_type; + struct dentry *ci_dentry; +}; + +struct configfs_subsystem; + +struct config_group { + struct config_item cg_item; + struct list_head cg_children; + struct configfs_subsystem *cg_subsys; + struct list_head default_groups; + struct list_head group_entry; +}; + +struct configfs_subsystem { + struct config_group su_group; + struct mutex su_mutex; +}; + +struct configfs_item_operations; + +struct configfs_group_operations; + +struct configfs_bin_attribute; + +struct config_item_type { + struct module *ct_owner; + struct configfs_item_operations *ct_item_ops; + struct configfs_group_operations *ct_group_ops; + struct configfs_attribute **ct_attrs; + struct configfs_bin_attribute **ct_bin_attrs; +}; + +struct configfs_item_operations { + void (*release)(struct config_item *); + int (*allow_link)(struct config_item *, struct config_item *); + void (*drop_link)(struct config_item *, struct config_item *); +}; + +struct configfs_group_operations { + struct config_item *(*make_item)(struct config_group *, const char *); + struct config_group *(*make_group)(struct config_group *, const char *); + void (*disconnect_notify)(struct config_group *, struct config_item *); + void (*drop_item)(struct config_group *, struct config_item *); +}; + +struct configfs_bin_attribute { + struct configfs_attribute cb_attr; + void *cb_private; + size_t cb_max_size; + ssize_t (*read)(struct config_item *, void *, size_t); + ssize_t (*write)(struct config_item *, const void *, size_t); +}; + +struct configfs_buffer { + size_t count; + loff_t pos; + char *page; + struct configfs_item_operations *ops; + struct mutex mutex; + int needs_read_fill; + bool read_in_progress; + bool write_in_progress; + char *bin_buffer; + int bin_buffer_size; + int cb_max_size; + struct config_item *item; + struct module *owner; + union { + struct configfs_attribute *attr; + struct configfs_bin_attribute *bin_attr; + }; +}; + +enum { + Opt_uid___2 = 0, + Opt_gid___3 = 1, + Opt_mode___2 = 2, + Opt_ptmxmode = 3, + Opt_newinstance = 4, + Opt_max = 5, + Opt_err = 6, +}; + +struct pts_mount_opts { + int setuid; + int setgid; + kuid_t uid; + kgid_t gid; + umode_t mode; + umode_t ptmxmode; + int reserve; + int max; +}; + +struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; + struct super_block *sb; + struct dentry *ptmx_dentry; +}; + +enum ramfs_param { + Opt_mode___3 = 0, +}; + +struct ramfs_mount_opts { + umode_t mode; +}; + +struct ramfs_fs_info { + struct ramfs_mount_opts mount_opts; +}; + +enum hugetlbfs_size_type { + NO_SIZE = 0, + SIZE_STD = 1, + SIZE_PERCENT = 2, +}; + +enum hugetlb_param { + Opt_gid___4 = 0, + Opt_min_size = 1, + Opt_mode___4 = 2, + Opt_nr_inodes___2 = 3, + Opt_pagesize = 4, + Opt_size___2 = 5, + Opt_uid___3 = 6, +}; + +struct hugetlbfs_fs_context { + struct hstate *hstate; + unsigned long long max_size_opt; + unsigned long long min_size_opt; + long max_hpages; + long nr_inodes; + long min_hpages; + enum hugetlbfs_size_type max_val_type; + enum hugetlbfs_size_type min_val_type; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +struct getdents_callback___2 { + struct dir_context ctx; + char *name; + u64 ino; + int found; + int sequence; +}; + +struct utf8_table { + int cmask; + int cval; + int shift; + long lmask; + long lval; +}; + +typedef u16 wchar_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char)(wchar_t, unsigned char *, int); + int (*char2uni)(const unsigned char *, int, wchar_t *); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +enum utf16_endian { + UTF16_HOST_ENDIAN = 0, + UTF16_LITTLE_ENDIAN = 1, + UTF16_BIG_ENDIAN = 2, +}; + +typedef u32 unicode_t; + +enum utf8_normalization { + UTF8_NFDI = 0, + UTF8_NFDICF = 1, + UTF8_NMAX = 2, +}; + +typedef const unsigned char utf8leaf_t; + +typedef const unsigned char utf8trie_t; + +struct utf8cursor { + const struct unicode_map *um; + enum utf8_normalization n; + const char *s; + const char *p; + const char *ss; + const char *sp; + unsigned int len; + unsigned int slen; + short ccc; + short nccc; + unsigned char hangul[12]; +}; + +enum { + Opt_uid___4 = 0, + Opt_gid___5 = 1, + Opt_mode___5 = 2, + Opt_err___2 = 3, +}; + +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +struct debugfs_fsdata { + const struct file_operations *real_fops; + union { + debugfs_automount_t automount; + struct { + refcount_t active_users; + struct completion active_users_drained; + struct mutex cancellations_mtx; + struct list_head cancellations; + }; + }; +}; + +struct debugfs_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +struct debugfs_fs_info { + struct debugfs_mount_opts mount_opts; +}; + +struct debugfs_reg32 { + char *name; + unsigned long offset; +}; + +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void *base; + struct device *dev; +}; + +struct debugfs_devm_entry { + int (*read)(struct seq_file *, void *); + struct device *dev; +}; + +struct tracefs_dir_ops { + int (*mkdir)(const char *); + int (*rmdir)(const char *); +}; + +enum { + TRACEFS_EVENT_INODE = 2, + TRACEFS_EVENT_TOP_INODE = 4, + TRACEFS_GID_PERM_SET = 8, + TRACEFS_UID_PERM_SET = 16, + TRACEFS_INSTANCE_INODE = 32, +}; + +struct tracefs_inode { + unsigned long flags; + void *private; + struct inode vfs_inode; +}; + +struct tracefs_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +struct tracefs_fs_info { + struct tracefs_mount_opts mount_opts; +}; + +enum { + EVENTFS_SAVE_MODE = 65536, + EVENTFS_SAVE_UID = 131072, + EVENTFS_SAVE_GID = 262144, + EVENTFS_TOPLEVEL = 524288, +}; + +struct eventfs_attr { + int mode; + kuid_t uid; + kgid_t gid; +}; + +struct eventfs_inode { + struct list_head list; + const struct eventfs_entry *entries; + const char *name; + struct list_head children; + struct dentry *dentry; + struct dentry *d_parent; + struct dentry **d_children; + struct eventfs_attr *entry_attrs; + struct eventfs_attr attr; + void *data; + union { + struct llist_node llist; + struct callback_head rcu; + }; + unsigned int is_freed : 1; + unsigned int is_events : 1; + unsigned int nr_entries : 30; +}; + +struct dentry_list { + void *cursor; + struct dentry **dentries; +}; + +enum pstore_type_id { + PSTORE_TYPE_DMESG = 0, + PSTORE_TYPE_MCE = 1, + PSTORE_TYPE_CONSOLE = 2, + PSTORE_TYPE_FTRACE = 3, + PSTORE_TYPE_PPC_RTAS = 4, + PSTORE_TYPE_PPC_OF = 5, + PSTORE_TYPE_PPC_COMMON = 6, + PSTORE_TYPE_PMSG = 7, + PSTORE_TYPE_PPC_OPAL = 8, + PSTORE_TYPE_MAX = 9, +}; + +enum { + Opt_kmsg_bytes = 0, + Opt_err___3 = 1, +}; + +struct pstore_record; + +struct pstore_private { + struct list_head list; + struct dentry *dentry; + struct pstore_record *record; + size_t total_size; +}; + +struct pstore_info; + +struct pstore_record { + struct pstore_info *psi; + enum pstore_type_id type; + u64 id; + struct timespec64 time; + char *buf; + ssize_t size; + ssize_t ecc_notice_size; + void *priv; + int count; + enum kmsg_dump_reason reason; + unsigned int part; + bool compressed; +}; + +struct pstore_info { + struct module *owner; + const char *name; + spinlock_t buf_lock; + char *buf; + size_t bufsize; + struct mutex read_mutex; + int flags; + int max_reason; + void *data; + int (*open)(struct pstore_info *); + int (*close)(struct pstore_info *); + ssize_t (*read)(struct pstore_record *); + int (*write)(struct pstore_record *); + int (*write_user)(struct pstore_record *, + const char __attribute__((btf_type_tag("user"))) *); + int (*erase)(struct pstore_record *); +}; + +struct pstore_ftrace_record { + unsigned long ip; + unsigned long parent_ip; + u64 ts; +}; + +struct pstore_ftrace_seq_data { + const void *ptr; + size_t off; + size_t size; +}; + +typedef unsigned char Byte; + +typedef unsigned long uLong; + +struct internal_state; + +struct z_stream_s { + const Byte *next_in; + uLong avail_in; + uLong total_in; + Byte *next_out; + uLong avail_out; + uLong total_out; + char *msg; + struct internal_state *state; + void *workspace; + int data_type; + uLong adler; + uLong reserved; +}; + +struct internal_state { + int dummy; +}; + +typedef struct z_stream_s z_stream; + +typedef z_stream *z_streamp; + +typedef s32 compat_key_t; + +typedef u32 __compat_gid32_t; + +typedef u16 compat_ushort_t; + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + compat_mode_t mode; + unsigned char __pad1[2]; + compat_ushort_t seq; + compat_ushort_t __pad2; + compat_ulong_t unused1; + compat_ulong_t unused2; +}; + +typedef unsigned int __kernel_mode_t; + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned char __pad1[0]; + unsigned short seq; + unsigned short __pad2; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; +}; + +struct compat_ipc_perm { + key_t key; + __compat_uid_t uid; + __compat_gid_t gid; + __compat_uid_t cuid; + __compat_gid_t cgid; + compat_mode_t mode; + unsigned short seq; +}; + +struct ipc_params; + +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); +}; + +struct ipc_params { + key_t key; + int flg; + union { + size_t size; + int nsems; + } u; +}; + +struct ipc_proc_iface { + const char *path; + const char *header; + int ids; + int (*show)(struct seq_file *, void *); +}; + +struct ipc_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; +}; + +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct pid_namespace *pid_ns; + struct ipc_proc_iface *iface; +}; + +struct msg_msgseg; + +struct msg_msg { + struct list_head m_list; + long m_type; + size_t m_ts; + struct msg_msgseg *next; + void *security; +}; + +struct msg_msgseg { + struct msg_msgseg *next; +}; + +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; + time64_t q_rtime; + time64_t q_ctime; + unsigned long q_cbytes; + unsigned long q_qnum; + unsigned long q_qbytes; + struct pid *q_lspid; + struct pid *q_lrpid; + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; + long : 64; + long : 64; +}; + +struct msg; + +typedef int __kernel_ipc_pid_t; + +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; + struct msg *msg_last; + __kernel_old_time_t msg_stime; + __kernel_old_time_t msg_rtime; + __kernel_old_time_t msg_ctime; + unsigned long msg_lcbytes; + unsigned long msg_lqbytes; + unsigned short msg_cbytes; + unsigned short msg_qnum; + unsigned short msg_qbytes; + __kernel_ipc_pid_t msg_lspid; + __kernel_ipc_pid_t msg_lrpid; +}; + +struct msg_receiver { + struct list_head r_list; + struct task_struct *r_tsk; + int r_mode; + long r_msgtype; + long r_maxsize; + struct msg_msg *r_msg; +}; + +struct msg_sender { + struct list_head list; + struct task_struct *tsk; + size_t msgsz; +}; + +struct msgbuf { + __kernel_long_t mtype; + char mtext[1]; +}; + +typedef s32 compat_ssize_t; + +struct msqid64_ds { + struct ipc64_perm msg_perm; + long msg_stime; + long msg_rtime; + long msg_ctime; + unsigned long msg_cbytes; + unsigned long msg_qnum; + unsigned long msg_qbytes; + __kernel_pid_t msg_lspid; + __kernel_pid_t msg_lrpid; + unsigned long __unused4; + unsigned long __unused5; +}; + +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + unsigned short msgseg; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +typedef u16 compat_ipc_pid_t; + +struct compat_msqid_ds { + struct compat_ipc_perm msg_perm; + compat_uptr_t msg_first; + compat_uptr_t msg_last; + old_time32_t msg_stime; + old_time32_t msg_rtime; + old_time32_t msg_ctime; + compat_ulong_t msg_lcbytes; + compat_ulong_t msg_lqbytes; + unsigned short msg_cbytes; + unsigned short msg_qnum; + unsigned short msg_qbytes; + compat_ipc_pid_t msg_lspid; + compat_ipc_pid_t msg_lrpid; +}; + +struct compat_msgbuf { + compat_long_t mtype; + char mtext[1]; +}; + +struct sem_undo_list { + refcount_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + +struct sem_undo { + struct list_head list_proc; + struct callback_head rcu; + struct sem_undo_list *ulp; + struct list_head list_id; + int semid; + short semadj[0]; +}; + +struct sem { + int semval; + struct pid *sempid; + spinlock_t lock; + struct list_head pending_alter; + struct list_head pending_const; + time64_t sem_otime; +}; + +struct sem_array { + struct kern_ipc_perm sem_perm; + time64_t sem_ctime; + struct list_head pending_alter; + struct list_head pending_const; + struct list_head list_id; + int sem_nsems; + int complex_count; + unsigned int use_global_lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct sem sems[0]; +}; + +struct sembuf; + +struct sem_queue { + struct list_head list; + struct task_struct *sleeper; + struct sem_undo *undo; + struct pid *pid; + int status; + struct sembuf *sops; + struct sembuf *blocking; + int nsops; + bool alter; + bool dupsop; +}; + +struct sembuf { + unsigned short sem_num; + short sem_op; + short sem_flg; +}; + +struct semid64_ds { + struct ipc64_perm sem_perm; + __kernel_long_t sem_otime; + __kernel_ulong_t __unused1; + __kernel_long_t sem_ctime; + __kernel_ulong_t __unused2; + __kernel_ulong_t sem_nsems; + __kernel_ulong_t __unused3; + __kernel_ulong_t __unused4; +}; + +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + unsigned short sem_nsems; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_semid_ds { + struct compat_ipc_perm sem_perm; + old_time32_t sem_otime; + old_time32_t sem_ctime; + compat_uptr_t sem_base; + compat_uptr_t sem_pending; + compat_uptr_t sem_pending_last; + compat_uptr_t undo; + unsigned short sem_nsems; +}; + +struct shmid_kernel { + struct kern_ipc_perm shm_perm; + struct file *shm_file; + unsigned long shm_nattch; + unsigned long shm_segsz; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct ucounts *mlock_ucounts; + struct task_struct *shm_creator; + struct list_head shm_clist; + struct ipc_namespace *ns; + long : 64; + long : 64; + long : 64; +}; + +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + unsigned short shm_nattch; + unsigned short shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + +struct shmid64_ds { + struct ipc64_perm shm_perm; + __kernel_size_t shm_segsz; + long shm_atime; + long shm_dtime; + long shm_ctime; + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + unsigned long shm_nattch; + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; + +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct compat_shmid_ds { + struct compat_ipc_perm shm_perm; + int shm_segsz; + old_time32_t shm_atime; + old_time32_t shm_dtime; + old_time32_t shm_ctime; + compat_ipc_pid_t shm_cpid; + compat_ipc_pid_t shm_lpid; + unsigned short shm_nattch; + unsigned short shm_unused; + compat_uptr_t shm_unused2; + compat_uptr_t shm_unused3; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shm_info { + compat_int_t used_ids; + compat_ulong_t shm_tot; + compat_ulong_t shm_rss; + compat_ulong_t shm_swp; + compat_ulong_t swap_attempts; + compat_ulong_t swap_successes; +}; + +struct compat_shminfo64 { + compat_ulong_t shmmax; + compat_ulong_t shmmin; + compat_ulong_t shmmni; + compat_ulong_t shmseg; + compat_ulong_t shmall; + compat_ulong_t __unused1; + compat_ulong_t __unused2; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_ipc_kludge { + compat_uptr_t msgp; + compat_long_t msgtyp; +}; + +struct ext_wait_queue { + struct task_struct *task; + struct list_head list; + struct msg_msg *msg; + int state; +}; + +struct posix_msg_tree_node; + +struct mqueue_inode_info { + spinlock_t lock; + struct inode vfs_inode; + wait_queue_head_t wait_q; + struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; + struct posix_msg_tree_node *node_cache; + struct mq_attr attr; + struct sigevent notify; + struct pid *notify_owner; + u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct ucounts *ucounts; + struct sock *notify_sock; + struct sk_buff *notify_cookie; + struct ext_wait_queue e_wait_q[2]; + unsigned long qsize; +}; + +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; + +struct compat_mq_attr { + compat_long_t mq_flags; + compat_long_t mq_maxmsg; + compat_long_t mq_msgsize; + compat_long_t mq_curmsgs; + compat_long_t __reserved[4]; +}; + +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; + bool newns; +}; + +struct key_user { + struct rb_node node; + struct mutex cons_lock; + spinlock_t lock; + refcount_t usage; + atomic_t nkeys; + atomic_t nikeys; + kuid_t uid; + int qnkeys; + int qnbytes; +}; + +enum key_state { + KEY_IS_UNINSTANTIATED = 0, + KEY_IS_POSITIVE = 1, +}; + +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, + NOTIFY_KEY_UPDATED = 1, + NOTIFY_KEY_LINKED = 2, + NOTIFY_KEY_UNLINKED = 3, + NOTIFY_KEY_CLEARED = 4, + NOTIFY_KEY_REVOKED = 5, + NOTIFY_KEY_INVALIDATED = 6, + NOTIFY_KEY_SETATTR = 7, +}; + +struct assoc_array_ops { + unsigned long (*get_key_chunk)(const void *, int); + unsigned long (*get_object_key_chunk)(const void *, int); + bool (*compare_object)(const void *, const void *); + int (*diff_objects)(const void *, const void *); + void (*free_object)(void *); +}; + +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + unsigned long index_key[0]; +}; + +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[16]; + unsigned long nr_leaves_on_branch; +}; + +struct assoc_array_edit { + struct callback_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[16]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[17]; +}; + +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + struct key_match_data match_data; + unsigned int flags; + int (*iterator)(const void *, void *); + int skipped_ret; + bool possessed; + key_ref_t result; + time64_t now; +}; + +struct keyring_read_iterator_context { + size_t buflen; + size_t count; + key_serial_t *buffer; +}; + +struct keyctl_dh_params { + union { + __s32 private; + __s32 priv; + }; + __s32 prime; + __s32 base; +}; + +struct keyctl_kdf_params { + char __attribute__((btf_type_tag("user"))) * hashname; + char __attribute__((btf_type_tag("user"))) * otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct keyctl_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + __u32 __spare[7]; +}; + +struct request_key_auth { + struct callback_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +}; + +struct user_key_payload { + struct callback_head rcu; + unsigned short datalen; + long : 0; + char data[0]; +}; + +struct compat_keyctl_kdf_params { + compat_uptr_t hashname; + compat_uptr_t otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct crypto_kpp; + +struct kpp_request; + +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); + int (*generate_public_key)(struct kpp_request *); + int (*compute_shared_secret)(struct kpp_request *); + unsigned int (*max_size)(struct crypto_kpp *); + int (*init)(struct crypto_kpp *); + void (*exit)(struct crypto_kpp *); + struct crypto_alg base; +}; + +struct crypto_kpp { + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct dh { + const void *key; + const void *p; + const void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int g_size; +}; + +enum { + Opt_err___4 = 0, + Opt_enc = 1, + Opt_hash = 2, +}; + +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; + +enum devcg_behavior { + DEVCG_DEFAULT_NONE = 0, + DEVCG_DEFAULT_ALLOW = 1, + DEVCG_DEFAULT_DENY = 2, +}; + +struct dev_cgroup { + struct cgroup_subsys_state css; + struct list_head exceptions; + enum devcg_behavior behavior; +}; + +struct dev_exception_item { + u32 major; + u32 minor; + short type; + short access; + struct list_head list; + struct callback_head rcu; +}; + +enum { + CRYPTO_MSG_ALG_REQUEST = 0, + CRYPTO_MSG_ALG_REGISTER = 1, + CRYPTO_MSG_ALG_LOADED = 2, +}; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; + bool test_started; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +enum { + CRYPTOA_UNSPEC = 0, + CRYPTOA_ALG = 1, + CRYPTOA_TYPE = 2, + __CRYPTOA_MAX = 3, +}; + +struct rtattr { + unsigned short rta_len; + unsigned short rta_type; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct crypto_attr_alg { + char name[128]; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + unsigned int qlen; + unsigned int max_qlen; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +enum crypto_attr_type_t { + CRYPTOCFGA_UNSPEC = 0, + CRYPTOCFGA_PRIORITY_VAL = 1, + CRYPTOCFGA_REPORT_LARVAL = 2, + CRYPTOCFGA_REPORT_HASH = 3, + CRYPTOCFGA_REPORT_BLKCIPHER = 4, + CRYPTOCFGA_REPORT_AEAD = 5, + CRYPTOCFGA_REPORT_COMPRESS = 6, + CRYPTOCFGA_REPORT_RNG = 7, + CRYPTOCFGA_REPORT_CIPHER = 8, + CRYPTOCFGA_REPORT_AKCIPHER = 9, + CRYPTOCFGA_REPORT_KPP = 10, + CRYPTOCFGA_REPORT_ACOMP = 11, + CRYPTOCFGA_STAT_LARVAL = 12, + CRYPTOCFGA_STAT_HASH = 13, + CRYPTOCFGA_STAT_BLKCIPHER = 14, + CRYPTOCFGA_STAT_AEAD = 15, + CRYPTOCFGA_STAT_COMPRESS = 16, + CRYPTOCFGA_STAT_RNG = 17, + CRYPTOCFGA_STAT_CIPHER = 18, + CRYPTOCFGA_STAT_AKCIPHER = 19, + CRYPTOCFGA_STAT_KPP = 20, + CRYPTOCFGA_STAT_ACOMP = 21, + __CRYPTOCFGA_MAX = 22, +}; + +struct crypto_aead; + +struct aead_request; + +struct aead_alg { + int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); + int (*setauthsize)(struct crypto_aead *, unsigned int); + int (*encrypt)(struct aead_request *); + int (*decrypt)(struct aead_request *); + int (*init)(struct crypto_aead *); + void (*exit)(struct crypto_aead *); + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct aead_request { + struct crypto_async_request base; + unsigned int assoclen; + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + void *__ctx[0]; +}; + +struct aead_instance { + void (*free)(struct aead_instance *); + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_istat_aead { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_aead { + char type[64]; + char geniv[64]; + unsigned int blocksize; + unsigned int maxauthsize; + unsigned int ivsize; +}; + +struct crypto_rng; + +struct rng_alg { + int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); + int (*seed)(struct crypto_rng *, const u8 *, unsigned int); + void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); + unsigned int seedsize; + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +struct crypto_sync_skcipher; + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[0]; +}; + +struct skcipher_alg_common { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + struct crypto_alg base; +}; + +struct crypto_lskcipher; + +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *, const u8 *, unsigned int); + int (*encrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, bool); + int (*decrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, bool); + int (*init)(struct crypto_lskcipher *); + void (*exit)(struct crypto_lskcipher *); + struct skcipher_alg_common co; +}; + +struct crypto_lskcipher { + struct crypto_tfm base; +}; + +struct crypto_skcipher { + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *); + union { + struct { + char head[56]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + +struct skcipher_request { + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + struct crypto_async_request base; + void *__ctx[0]; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } src; + union { + struct { + struct page *page; + unsigned long offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } dst; + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + unsigned int ivsize; + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_blkcipher { + char type[64]; + char geniv[64]; + unsigned int blocksize; + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +enum { + SKCIPHER_WALK_PHYS = 1, + SKCIPHER_WALK_SLOW = 2, + SKCIPHER_WALK_COPY = 4, + SKCIPHER_WALK_DIFF = 8, + SKCIPHER_WALK_SLEEP = 16, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[0]; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); + int (*encrypt)(struct skcipher_request *); + int (*decrypt)(struct skcipher_request *); + int (*init)(struct crypto_skcipher *); + void (*exit)(struct crypto_skcipher *); + unsigned int walksize; + union { + struct { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + struct crypto_alg base; + }; + struct skcipher_alg_common co; + }; +}; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *); + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_istat_cipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct ahash_request; + +struct crypto_ahash; + +struct ahash_alg { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_ahash *); + void (*exit_tfm)(struct crypto_ahash *); + int (*clone_tfm)(struct crypto_ahash *, struct crypto_ahash *); + struct hash_alg_common halg; +}; + +struct ahash_request { + struct crypto_async_request base; + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + void *priv; + void *__ctx[0]; +}; + +struct crypto_ahash { + bool using_shash; + unsigned int statesize; + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *); + union { + struct { + char head[96]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct crypto_hash_walk { + char *data; + unsigned int offset; + unsigned int flags; + struct page *pg; + unsigned int entrylen; + unsigned int total; + struct scatterlist *sg; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_hash { + char type[64]; + unsigned int blocksize; + unsigned int digestsize; +}; + +struct shash_instance { + void (*free)(struct shash_instance *); + union { + struct { + char head[104]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_istat_hash { + atomic64_t hash_cnt; + atomic64_t hash_tlen; + atomic64_t err_cnt; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +struct crypto_akcipher { + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct akcipher_request; + +struct akcipher_alg { + int (*sign)(struct akcipher_request *); + int (*verify)(struct akcipher_request *); + int (*encrypt)(struct akcipher_request *); + int (*decrypt)(struct akcipher_request *); + int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); + unsigned int (*max_size)(struct crypto_akcipher *); + int (*init)(struct crypto_akcipher *); + void (*exit)(struct crypto_akcipher *); + struct crypto_alg base; +}; + +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct akcipher_instance { + void (*free)(struct akcipher_instance *); + union { + struct { + char head[72]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_istat_akcipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t verify_cnt; + atomic64_t sign_cnt; + atomic64_t err_cnt; +}; + +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_akcipher { + char type[64]; +}; + +struct crypto_sig { + struct crypto_tfm base; +}; + +struct kpp_instance { + void (*free)(struct kpp_instance *); + union { + struct { + char head[48]; + struct crypto_instance base; + } s; + struct kpp_alg alg; + }; +}; + +struct crypto_istat_kpp { + atomic64_t setsecret_cnt; + atomic64_t generate_public_key_cnt; + atomic64_t compute_shared_secret_cnt; + atomic64_t err_cnt; +}; + +struct crypto_kpp_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_kpp { + char type[64]; +}; + +struct gcry_mpi; + +typedef struct gcry_mpi *MPI; + +struct dh_ctx { + MPI p; + MPI g; + MPI xa; +}; + +typedef unsigned long mpi_limb_t; + +struct gcry_mpi { + int alloced; + int nlimbs; + int nbits; + int sign; + unsigned int flags; + mpi_limb_t *d; +}; + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN = 0, + CRYPTO_KPP_SECRET_TYPE_DH = 1, + CRYPTO_KPP_SECRET_TYPE_ECDH = 2, +}; + +struct kpp_secret { + unsigned short type; + unsigned short len; +}; + +typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; + MPI p; + MPI q; + MPI dp; + MPI dq; + MPI qinv; +}; + +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +struct rsa_asn1_template { + const char *name; + const u8 *data; + size_t size; +}; + +struct pkcs1pad_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct rsa_asn1_template *digest_info; +}; + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct pkcs1pad_request { + struct scatterlist in_sg[2]; + struct scatterlist out_sg[1]; + uint8_t *in_buf; + uint8_t *out_buf; + struct akcipher_request child_req; +}; + +struct acomp_alg { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + int (*init)(struct crypto_acomp *); + void (*exit)(struct crypto_acomp *); + unsigned int reqsize; + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct crypto_istat_compress { + atomic64_t compress_cnt; + atomic64_t compress_tlen; + atomic64_t decompress_cnt; + atomic64_t decompress_tlen; + atomic64_t err_cnt; +}; + +struct crypto_report_acomp { + char type[64]; +}; + +struct scomp_scratch { + spinlock_t lock; + void *src; + void *dst; +}; + +struct crypto_scomp; + +struct scomp_alg { + void *(*alloc_ctx)(struct crypto_scomp *); + void (*free_ctx)(struct crypto_scomp *, void *); + int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, + unsigned int *, void *); + int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, + unsigned int *, void *); + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct crypto_scomp { + struct crypto_tfm base; +}; + +struct crypto_report_comp { + char type[64]; +}; + +struct cryptomgr_param { + struct rtattr *tb[34]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } attrs[32]; + char template[128]; + struct crypto_larval *larval; + u32 otype; + u32 omask; +}; + +struct crypto_link { + int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); +}; + +enum { + CRYPTO_MSG_BASE = 16, + CRYPTO_MSG_NEWALG = 16, + CRYPTO_MSG_DELALG = 17, + CRYPTO_MSG_UPDATEALG = 18, + CRYPTO_MSG_GETALG = 19, + CRYPTO_MSG_DELRNG = 20, + CRYPTO_MSG_GETSTAT = 21, + __CRYPTO_MSG_MAX = 22, +}; + +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = 1, + NL_VALIDATE_MAXTYPE = 2, + NL_VALIDATE_UNSPEC = 4, + NL_VALIDATE_STRICT_ATTRS = 8, + NL_VALIDATE_NESTED = 16, +}; + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + struct netlink_ext_ack *extack; + void *data; + struct module *module; + u32 min_dump_alloc; +}; + +struct crypto_user_alg { + char cru_name[64]; + char cru_driver_name[64]; + char cru_module_name[64]; + __u32 cru_type; + __u32 cru_mask; + __u32 cru_refcnt; + __u32 cru_flags; +}; + +struct crypto_report_larval { + char type[64]; +}; + +struct crypto_report_cipher { + char type[64]; + unsigned int blocksize; + unsigned int min_keysize; + unsigned int max_keysize; +}; + +struct crypto_dump_info { + struct sk_buff *in_skb; + struct sk_buff *out_skb; + u32 nlmsg_seq; + u16 nlmsg_flags; +}; + +struct hmac_ctx { + struct crypto_shash *hash; + u8 pads[0]; +}; + +struct md5_state { + u32 hash[4]; + u32 block[16]; + u64 byte_count; +}; + +struct sha1_state; + +typedef void sha1_block_fn(struct sha1_state *, const u8 *, int); + +struct sha1_state { + u32 state[5]; + u64 count; + u8 buffer[64]; +}; + +struct sha256_state { + u32 state[8]; + u64 count; + u8 buf[64]; +}; + +struct sha512_state; + +typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); + +struct sha512_state { + u64 state[8]; + u64 count[2]; + u8 buf[128]; +}; + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + unsigned int partial; + u8 buf[144]; +}; + +struct crypto_rfc3686_ctx { + struct crypto_skcipher *child; + u8 nonce[4]; +}; + +struct crypto_rfc3686_req_ctx { + u8 iv[16]; + struct skcipher_request subreq; +}; + +struct crypto_rfc4543_instance_ctx { + struct crypto_aead_spawn aead; +}; + +struct gcm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; +}; + +typedef struct { + __be64 a; + __be64 b; +} be128; + +struct crypto_gcm_ghash_ctx { + unsigned int cryptlen; + struct scatterlist *src; + int (*complete)(struct aead_request *, u32); +}; + +struct crypto_gcm_req_priv_ctx { + u8 iv[16]; + u8 auth_tag[16]; + u8 iauth_tag[16]; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct scatterlist sg; + struct crypto_gcm_ghash_ctx ghash_ctx; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + } u; +}; + +struct crypto_gcm_ctx { + struct crypto_skcipher *ctr; + struct crypto_ahash *ghash; +}; + +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + struct crypto_sync_skcipher *null; + u8 nonce[4]; +}; + +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4106_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct aead_request subreq; +}; + +struct crypto_rfc4543_req_ctx { + struct aead_request subreq; +}; + +struct crypto_aes_ctx { + u32 key_enc[60]; + u32 key_dec[60]; + u32 key_length; +}; + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC = 0, + CRYPTO_AUTHENC_KEYA_PARAM = 1, +}; + +struct authenc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + unsigned int authkeylen; + unsigned int enckeylen; +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct crypto_authenc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct authenc_esn_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; +}; + +struct crypto_authenc_esn_ctx { + unsigned int reqoff; + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_esn_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +struct crypto_istat_rng { + atomic64_t generate_cnt; + atomic64_t generate_tlen; + atomic64_t seed_cnt; + atomic64_t err_cnt; +}; + +struct crypto_report_rng { + char type[64]; + unsigned int seedsize; +}; + +struct drbg_state; + +struct drbg_state_ops { + int (*update)(struct drbg_state *, struct list_head *, int); + int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); + int (*crypto_init)(struct drbg_state *); + int (*crypto_fini)(struct drbg_state *); +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED = 0, + DRBG_SEED_STATE_PARTIAL = 1, + DRBG_SEED_STATE_FULL = 2, +}; + +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +struct drbg_core; + +struct drbg_state { + struct mutex drbg_mutex; + unsigned char *V; + unsigned char *Vbuf; + unsigned char *C; + unsigned char *Cbuf; + size_t reseed_ctr; + size_t reseed_threshold; + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; + struct crypto_skcipher *ctr_handle; + struct skcipher_request *ctr_req; + __u8 *outscratchpadbuf; + __u8 *outscratchpad; + struct crypto_wait ctr_wait; + struct scatterlist sg_in; + struct scatterlist sg_out; + enum drbg_seed_state seeded; + unsigned long last_seed_time; + bool pr; + bool fips_primed; + unsigned char *prev; + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; + __u8 statelen; + __u8 blocklen_bytes; + char cra_name[128]; + char backend_cra_name[128]; +}; + +enum drbg_prefixes { + DRBG_PREFIX0 = 0, + DRBG_PREFIX1 = 1, + DRBG_PREFIX2 = 2, + DRBG_PREFIX3 = 3, +}; + +struct s { + __be32 conv; +}; + +struct sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +typedef unsigned char u8___2; + +struct rand_data { + void *hash_state; + __u64 prev_time; + __u64 last_delta; + __s64 last_delta2; + unsigned int flags; + unsigned int osr; + unsigned char *mem; + unsigned int memlocation; + unsigned int memblocks; + unsigned int memblocksize; + unsigned int memaccessloops; + unsigned int rct_count; + unsigned int apt_cutoff; + unsigned int apt_cutoff_permanent; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + unsigned int health_failure; + unsigned int apt_base_set : 1; +}; + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data *entropy_collector; + struct crypto_shash *tfm; + struct shash_desc *sdesc; +}; + +struct gf128mul_4k { + be128 t[256]; +}; + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[16]; + u32 bytes; +}; + +typedef enum { + ZSTD_fast = 1, + ZSTD_dfast = 2, + ZSTD_greedy = 3, + ZSTD_lazy = 4, + ZSTD_lazy2 = 5, + ZSTD_btlazy2 = 6, + ZSTD_btopt = 7, + ZSTD_btultra = 8, + ZSTD_btultra2 = 9, +} ZSTD_strategy; + +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int minMatch; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef struct { + int contentSizeFlag; + int checksumFlag; + int noDictIDFlag; +} ZSTD_frameParameters; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +typedef ZSTD_parameters zstd_parameters; + +struct ZSTD_CCtx_s; + +typedef struct ZSTD_CCtx_s ZSTD_CCtx; + +typedef ZSTD_CCtx zstd_cctx; + +struct ZSTD_DCtx_s; + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +typedef ZSTD_DCtx zstd_dctx; + +struct zstd_ctx { + zstd_cctx *cctx; + zstd_dctx *dctx; + void *cwksp; + void *dwksp; +}; + +typedef enum { + ZSTDcs_created = 0, + ZSTDcs_init = 1, + ZSTDcs_ongoing = 2, + ZSTDcs_ending = 3, +} ZSTD_compressionStage_e; + +typedef enum { + ZSTD_f_zstd1 = 0, + ZSTD_f_zstd1_magicless = 1, +} ZSTD_format_e; + +typedef enum { + ZSTD_dictDefaultAttach = 0, + ZSTD_dictForceAttach = 1, + ZSTD_dictForceCopy = 2, + ZSTD_dictForceLoad = 3, +} ZSTD_dictAttachPref_e; + +typedef enum { + ZSTD_ps_auto = 0, + ZSTD_ps_enable = 1, + ZSTD_ps_disable = 2, +} ZSTD_paramSwitch_e; + +typedef uint32_t U32; + +typedef struct { + ZSTD_paramSwitch_e enableLdm; + U32 hashLog; + U32 bucketSizeLog; + U32 minMatchLength; + U32 hashRateLog; + U32 windowLog; +} ldmParams_t; + +typedef enum { + ZSTD_bm_buffered = 0, + ZSTD_bm_stable = 1, +} ZSTD_bufferMode_e; + +typedef enum { + ZSTD_sf_noBlockDelimiters = 0, + ZSTD_sf_explicitBlockDelimiters = 1, +} ZSTD_sequenceFormat_e; + +typedef void *(*ZSTD_allocFunction)(void *, size_t); + +typedef void (*ZSTD_freeFunction)(void *, void *); + +typedef struct { + ZSTD_allocFunction customAlloc; + ZSTD_freeFunction customFree; + void *opaque; +} ZSTD_customMem; + +struct ZSTD_CCtx_params_s { + ZSTD_format_e format; + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; + int compressionLevel; + int forceWindow; + size_t targetCBlockSize; + int srcSizeHint; + ZSTD_dictAttachPref_e attachDictPref; + ZSTD_paramSwitch_e literalCompressionMode; + int nbWorkers; + size_t jobSize; + int overlapLog; + int rsyncable; + ldmParams_t ldmParams; + int enableDedicatedDictSearch; + ZSTD_bufferMode_e inBufferMode; + ZSTD_bufferMode_e outBufferMode; + ZSTD_sequenceFormat_e blockDelimiters; + int validateSequences; + ZSTD_paramSwitch_e useBlockSplitter; + ZSTD_paramSwitch_e useRowMatchFinder; + int deterministicRefPrefix; + ZSTD_customMem customMem; +}; + +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; + +typedef uint8_t BYTE; + +typedef enum { + ZSTD_cwksp_alloc_objects = 0, + ZSTD_cwksp_alloc_buffers = 1, + ZSTD_cwksp_alloc_aligned = 2, +} ZSTD_cwksp_alloc_phase_e; + +typedef enum { + ZSTD_cwksp_dynamic_alloc = 0, + ZSTD_cwksp_static_alloc = 1, +} ZSTD_cwksp_static_alloc_e; + +typedef struct { + void *workspace; + void *workspaceEnd; + void *objectEnd; + void *tableEnd; + void *tableValidEnd; + void *allocStart; + BYTE allocFailed; + int workspaceOversizedDuration; + ZSTD_cwksp_alloc_phase_e phase; + ZSTD_cwksp_static_alloc_e isStatic; +} ZSTD_cwksp; + +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +struct POOL_ctx_s; + +typedef struct POOL_ctx_s ZSTD_threadPool; + +typedef struct { + unsigned int offset; + unsigned int litLength; + unsigned int matchLength; + unsigned int rep; +} ZSTD_Sequence; + +typedef struct { + int collectSequences; + ZSTD_Sequence *seqStart; + size_t seqIndex; + size_t maxSequences; +} SeqCollector; + +typedef enum { + ZSTD_llt_none = 0, + ZSTD_llt_literalLength = 1, + ZSTD_llt_matchLength = 2, +} ZSTD_longLengthType_e; + +struct seqDef_s; + +typedef struct seqDef_s seqDef; + +typedef struct { + seqDef *sequencesStart; + seqDef *sequences; + BYTE *litStart; + BYTE *lit; + BYTE *llCode; + BYTE *mlCode; + BYTE *ofCode; + size_t maxNbSeq; + size_t maxNbLit; + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; +} seqStore_t; + +typedef struct { + const BYTE *nextSrc; + const BYTE *base; + const BYTE *dictBase; + U32 dictLimit; + U32 lowLimit; + U32 nbOverflowCorrections; +} ZSTD_window_t; + +typedef struct { + U32 offset; + U32 checksum; +} ldmEntry_t; + +typedef struct { + const BYTE *split; + U32 hash; + U32 checksum; + ldmEntry_t *bucket; +} ldmMatchCandidate_t; + +typedef struct { + ZSTD_window_t window; + ldmEntry_t *hashTable; + U32 loadedDictEnd; + BYTE *bucketOffsets; + size_t splitIndices[64]; + ldmMatchCandidate_t matchCandidates[64]; +} ldmState_t; + +typedef struct { + U32 offset; + U32 litLength; + U32 matchLength; +} rawSeq; + +typedef struct { + rawSeq *seq; + size_t pos; + size_t posInSequence; + size_t size; + size_t capacity; +} rawSeqStore_t; + +typedef size_t HUF_CElt; + +typedef enum { + HUF_repeat_none = 0, + HUF_repeat_check = 1, + HUF_repeat_valid = 2, +} HUF_repeat; + +typedef struct { + HUF_CElt CTable[257]; + HUF_repeat repeatMode; +} ZSTD_hufCTables_t; + +typedef unsigned int FSE_CTable; + +typedef enum { + FSE_repeat_none = 0, + FSE_repeat_check = 1, + FSE_repeat_valid = 2, +} FSE_repeat; + +typedef struct { + FSE_CTable offcodeCTable[193]; + FSE_CTable matchlengthCTable[363]; + FSE_CTable litlengthCTable[329]; + FSE_repeat offcode_repeatMode; + FSE_repeat matchlength_repeatMode; + FSE_repeat litlength_repeatMode; +} ZSTD_fseCTables_t; + +typedef struct { + ZSTD_hufCTables_t huf; + ZSTD_fseCTables_t fse; +} ZSTD_entropyCTables_t; + +typedef struct { + ZSTD_entropyCTables_t entropy; + U32 rep[3]; +} ZSTD_compressedBlockState_t; + +typedef uint16_t U16; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + int price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[3]; +} ZSTD_optimal_t; + +typedef enum { + zop_dynamic = 0, + zop_predef = 1, +} ZSTD_OptPrice_e; + +typedef struct { + unsigned int *litFreq; + unsigned int *litLengthFreq; + unsigned int *matchLengthFreq; + unsigned int *offCodeFreq; + ZSTD_match_t *matchTable; + ZSTD_optimal_t *priceTable; + U32 litSum; + U32 litLengthSum; + U32 matchLengthSum; + U32 offCodeSum; + U32 litSumBasePrice; + U32 litLengthSumBasePrice; + U32 matchLengthSumBasePrice; + U32 offCodeSumBasePrice; + ZSTD_OptPrice_e priceType; + const ZSTD_entropyCTables_t *symbolCosts; + ZSTD_paramSwitch_e literalCompressionMode; +} optState_t; + +struct ZSTD_matchState_t; + +typedef struct ZSTD_matchState_t ZSTD_matchState_t; + +struct ZSTD_matchState_t { + ZSTD_window_t window; + U32 loadedDictEnd; + U32 nextToUpdate; + U32 hashLog3; + U32 rowHashLog; + U16 *tagTable; + U32 hashCache[8]; + U32 *hashTable; + U32 *hashTable3; + U32 *chainTable; + U32 forceNonContiguous; + int dedicatedDictSearch; + optState_t opt; + const ZSTD_matchState_t *dictMatchState; + ZSTD_compressionParameters cParams; + const rawSeqStore_t *ldmSeqStore; +}; + +typedef struct { + ZSTD_compressedBlockState_t *prevCBlock; + ZSTD_compressedBlockState_t *nextCBlock; + ZSTD_matchState_t matchState; +} ZSTD_blockState_t; + +typedef enum { + ZSTDb_not_buffered = 0, + ZSTDb_buffered = 1, +} ZSTD_buffered_policy_e; + +typedef enum { + zcss_init = 0, + zcss_load = 1, + zcss_flush = 2, +} ZSTD_cStreamStage; + +struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; + +typedef enum { + ZSTD_dct_auto = 0, + ZSTD_dct_rawContent = 1, + ZSTD_dct_fullDict = 2, +} ZSTD_dictContentType_e; + +struct ZSTD_CDict_s; + +typedef struct ZSTD_CDict_s ZSTD_CDict; + +typedef struct { + void *dictBuffer; + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; + ZSTD_CDict *cdict; +} ZSTD_localDict; + +struct ZSTD_prefixDict_s { + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; +}; + +typedef struct ZSTD_prefixDict_s ZSTD_prefixDict; + +typedef enum { + set_basic = 0, + set_rle = 1, + set_compressed = 2, + set_repeat = 3, +} symbolEncodingType_e; + +typedef struct { + symbolEncodingType_e hType; + BYTE hufDesBuffer[128]; + size_t hufDesSize; +} ZSTD_hufCTablesMetadata_t; + +typedef struct { + symbolEncodingType_e llType; + symbolEncodingType_e ofType; + symbolEncodingType_e mlType; + BYTE fseTablesBuffer[133]; + size_t fseTablesSize; + size_t lastCountSize; +} ZSTD_fseCTablesMetadata_t; + +typedef struct { + ZSTD_hufCTablesMetadata_t hufMetadata; + ZSTD_fseCTablesMetadata_t fseMetadata; +} ZSTD_entropyCTablesMetadata_t; + +typedef struct { + seqStore_t fullSeqStoreChunk; + seqStore_t firstHalfSeqStore; + seqStore_t secondHalfSeqStore; + seqStore_t currSeqStore; + seqStore_t nextSeqStore; + U32 partitions[196]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +} ZSTD_blockSplitCtx; + +struct ZSTD_CCtx_s { + ZSTD_compressionStage_e stage; + int cParamsChanged; + int bmi2; + ZSTD_CCtx_params requestedParams; + ZSTD_CCtx_params appliedParams; + ZSTD_CCtx_params simpleApiParams; + U32 dictID; + size_t dictContentSize; + ZSTD_cwksp workspace; + size_t blockSize; + unsigned long long pledgedSrcSizePlusOne; + unsigned long long consumedSrcSize; + unsigned long long producedCSize; + struct xxh64_state xxhState; + ZSTD_customMem customMem; + ZSTD_threadPool *pool; + size_t staticSize; + SeqCollector seqCollector; + int isFirstBlock; + int initialized; + seqStore_t seqStore; + ldmState_t ldmState; + rawSeq *ldmSequences; + size_t maxNbLdmSequences; + rawSeqStore_t externSeqStore; + ZSTD_blockState_t blockState; + U32 *entropyWorkspace; + ZSTD_buffered_policy_e bufferedPolicy; + char *inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + char *outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage streamStage; + U32 frameEnded; + ZSTD_inBuffer expectedInBuffer; + size_t expectedOutBufferSize; + ZSTD_localDict localDict; + const ZSTD_CDict *cdict; + ZSTD_prefixDict prefixDict; + ZSTD_blockSplitCtx blockSplitCtx; +}; + +typedef struct { + U16 nextState; + BYTE nbAdditionalBits; + BYTE nbBits; + U32 baseValue; +} ZSTD_seqSymbol; + +typedef U32 HUF_DTable; + +typedef struct { + ZSTD_seqSymbol LLTable[513]; + ZSTD_seqSymbol OFTable[257]; + ZSTD_seqSymbol MLTable[513]; + HUF_DTable hufTable[4097]; + U32 rep[3]; + U32 workspace[157]; +} ZSTD_entropyDTables_t; + +typedef enum { + ZSTD_frame = 0, + ZSTD_skippableFrame = 1, +} ZSTD_frameType_e; + +typedef struct { + unsigned long long frameContentSize; + unsigned long long windowSize; + unsigned int blockSizeMax; + ZSTD_frameType_e frameType; + unsigned int headerSize; + unsigned int dictID; + unsigned int checksumFlag; +} ZSTD_frameHeader; + +typedef uint64_t U64; + +typedef enum { + bt_raw = 0, + bt_rle = 1, + bt_compressed = 2, + bt_reserved = 3, +} blockType_e; + +typedef enum { + ZSTDds_getFrameHeaderSize = 0, + ZSTDds_decodeFrameHeader = 1, + ZSTDds_decodeBlockHeader = 2, + ZSTDds_decompressBlock = 3, + ZSTDds_decompressLastBlock = 4, + ZSTDds_checkChecksum = 5, + ZSTDds_decodeSkippableHeader = 6, + ZSTDds_skipFrame = 7, +} ZSTD_dStage; + +typedef enum { + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1, +} ZSTD_forceIgnoreChecksum_e; + +typedef enum { + ZSTD_use_indefinitely = -1, + ZSTD_dont_use = 0, + ZSTD_use_once = 1, +} ZSTD_dictUses_e; + +struct ZSTD_DDict_s; + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +typedef struct { + const ZSTD_DDict **ddictPtrTable; + size_t ddictPtrTableSize; + size_t ddictPtrCount; +} ZSTD_DDictHashSet; + +typedef enum { + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1, +} ZSTD_refMultipleDDicts_e; + +typedef enum { + zdss_init = 0, + zdss_loadHeader = 1, + zdss_read = 2, + zdss_load = 3, + zdss_flush = 4, +} ZSTD_dStreamStage; + +struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; + +typedef enum { + ZSTD_not_in_dst = 0, + ZSTD_in_dst = 1, + ZSTD_split = 2, +} ZSTD_litLocation_e; + +struct ZSTD_DCtx_s { + const ZSTD_seqSymbol *LLTptr; + const ZSTD_seqSymbol *MLTptr; + const ZSTD_seqSymbol *OFTptr; + const HUF_DTable *HUFptr; + ZSTD_entropyDTables_t entropy; + U32 workspace[640]; + const void *previousDstEnd; + const void *prefixStart; + const void *virtualStart; + const void *dictEnd; + size_t expected; + ZSTD_frameHeader fParams; + U64 processedCSize; + U64 decodedSize; + blockType_e bType; + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + struct xxh64_state xxhState; + size_t headerSize; + ZSTD_format_e format; + ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; + U32 validateChecksum; + const BYTE *litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + size_t staticSize; + ZSTD_DDict *ddictLocal; + const ZSTD_DDict *ddict; + U32 dictID; + int ddictIsCold; + ZSTD_dictUses_e dictUses; + ZSTD_DDictHashSet *ddictSet; + ZSTD_refMultipleDDicts_e refMultipleDDicts; + ZSTD_dStreamStage streamStage; + char *inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char *outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t lhSize; + U32 hostageByte; + int noForwardProgress; + ZSTD_bufferMode_e outBufferMode; + ZSTD_outBuffer expectedOutBuffer; + BYTE *litBuffer; + const BYTE *litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[65568]; + BYTE headerBuffer[18]; + size_t oversizedDuration; +}; + +typedef ZSTD_compressionParameters zstd_compression_parameters; + +enum asymmetric_payload_bits { + asym_crypto = 0, + asym_subtype = 1, + asym_key_ids = 2, + asym_auth = 3, +}; + +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + int (*parse)(struct key_preparsed_payload *); +}; + +struct asymmetric_key_ids { + void *id[3]; +}; + +struct asymmetric_key_id { + unsigned short len; + unsigned char data[0]; +}; + +struct public_key_signature; + +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + unsigned short name_len; + void (*describe)(const struct key *, struct seq_file *); + void (*destroy)(void *, void *); + int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*verify_signature)(const struct key *, const struct public_key_signature *); +}; + +struct public_key_signature { + struct asymmetric_key_id *auth_ids[3]; + u8 *s; + u8 *digest; + u32 s_size; + u32 digest_size; + const char *pkey_algo; + const char *hash_algo; + const char *encoding; +}; + +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; + unsigned long key_eflags; +}; + +enum asn1_tag { + ASN1_EOC = 0, + ASN1_BOOL = 1, + ASN1_INT = 2, + ASN1_BTS = 3, + ASN1_OTS = 4, + ASN1_NULL = 5, + ASN1_OID = 6, + ASN1_ODE = 7, + ASN1_EXT = 8, + ASN1_REAL = 9, + ASN1_ENUM = 10, + ASN1_EPDV = 11, + ASN1_UTF8STR = 12, + ASN1_RELOID = 13, + ASN1_SEQ = 16, + ASN1_SET = 17, + ASN1_NUMSTR = 18, + ASN1_PRNSTR = 19, + ASN1_TEXSTR = 20, + ASN1_VIDSTR = 21, + ASN1_IA5STR = 22, + ASN1_UNITIM = 23, + ASN1_GENTIM = 24, + ASN1_GRASTR = 25, + ASN1_VISSTR = 26, + ASN1_GENSTR = 27, + ASN1_UNISTR = 28, + ASN1_CHRSTR = 29, + ASN1_BMPSTR = 30, + ASN1_LONG_TAG = 31, +}; + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; + struct public_key *pub; + struct public_key_signature *sig; + char *issuer; + char *subject; + struct asymmetric_key_id *id; + struct asymmetric_key_id *skid; + time64_t valid_from; + time64_t valid_to; + const void *tbs; + unsigned int tbs_size; + unsigned int raw_sig_size; + const void *raw_sig; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_subject; + unsigned int raw_subject_size; + unsigned int raw_skid_size; + const void *raw_skid; + unsigned int index; + bool seen; + bool verified; + bool self_signed; + bool unsupported_sig; + bool blacklisted; +}; + +struct x509_parse_context { + struct x509_certificate *cert; + unsigned long data; + const void *key; + size_t key_size; + const void *params; + size_t params_size; + enum OID key_algo; + enum OID last_oid; + enum OID sig_algo; + u8 o_size; + u8 cn_size; + u8 email_size; + u16 o_offset; + u16 cn_offset; + u16 email_offset; + unsigned int raw_akid_size; + const void *raw_akid; + const void *akid_raw_issuer; + unsigned int akid_raw_issuer_size; +}; + +enum blacklist_hash_type { + BLACKLIST_HASH_X509_TBS = 1, + BLACKLIST_HASH_BINARY = 2, +}; + +enum asn1_class { + ASN1_UNIV = 0, + ASN1_APPL = 1, + ASN1_CONT = 2, + ASN1_PRIV = 3, +}; + +struct pkcs7_signed_info { + struct pkcs7_signed_info *next; + struct x509_certificate *signer; + unsigned int index; + bool unsupported_crypto; + bool blacklisted; + const void *msgdigest; + unsigned int msgdigest_len; + unsigned int authattrs_len; + const void *authattrs; + unsigned long aa_set; + time64_t signing_time; + struct public_key_signature *sig; +}; + +struct pkcs7_parse_context { + struct pkcs7_message *msg; + struct pkcs7_signed_info *sinfo; + struct pkcs7_signed_info **ppsinfo; + struct x509_certificate *certs; + struct x509_certificate **ppcerts; + unsigned long data; + enum OID last_oid; + unsigned int x509_index; + unsigned int sinfo_index; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_skid; + unsigned int raw_skid_size; + bool expect_skid; +}; + +enum hash_algo { + HASH_ALGO_MD4 = 0, + HASH_ALGO_MD5 = 1, + HASH_ALGO_SHA1 = 2, + HASH_ALGO_RIPE_MD_160 = 3, + HASH_ALGO_SHA256 = 4, + HASH_ALGO_SHA384 = 5, + HASH_ALGO_SHA512 = 6, + HASH_ALGO_SHA224 = 7, + HASH_ALGO_RIPE_MD_128 = 8, + HASH_ALGO_RIPE_MD_256 = 9, + HASH_ALGO_RIPE_MD_320 = 10, + HASH_ALGO_WP_256 = 11, + HASH_ALGO_WP_384 = 12, + HASH_ALGO_WP_512 = 13, + HASH_ALGO_TGR_128 = 14, + HASH_ALGO_TGR_160 = 15, + HASH_ALGO_TGR_192 = 16, + HASH_ALGO_SM3_256 = 17, + HASH_ALGO_STREEBOG_256 = 18, + HASH_ALGO_STREEBOG_512 = 19, + HASH_ALGO_SHA3_256 = 20, + HASH_ALGO_SHA3_384 = 21, + HASH_ALGO_SHA3_512 = 22, + HASH_ALGO__LAST = 23, +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + unsigned short interval; + unsigned char tuple_size; + const char *disk_name; +}; + +enum { + DISK_EVENT_FLAG_POLL = 1, + DISK_EVENT_FLAG_UEVENT = 2, + DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 4, +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1, + DISK_EVENT_EJECT_REQUEST = 2, +}; + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +enum { + DIO_SHOULD_DIRTY = 1, + DIO_IS_SYNC = 2, +}; + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + unsigned int flags; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct bio bio; +}; + +enum rq_qos_id { + RQ_QOS_WBT = 0, + RQ_QOS_LATENCY = 1, + RQ_QOS_COST = 2, +}; + +struct rq_qos_ops; + +struct rq_qos { + const struct rq_qos_ops *ops; + struct gendisk *disk; + enum rq_qos_id id; + struct rq_qos *next; + struct dentry *debugfs_dir; +}; + +struct blk_mq_debugfs_attr; + +struct rq_qos_ops { + void (*throttle)(struct rq_qos *, struct bio *); + void (*track)(struct rq_qos *, struct request *, struct bio *); + void (*merge)(struct rq_qos *, struct request *, struct bio *); + void (*issue)(struct rq_qos *, struct request *); + void (*requeue)(struct rq_qos *, struct request *); + void (*done)(struct rq_qos *, struct request *); + void (*done_bio)(struct rq_qos *, struct bio *); + void (*cleanup)(struct rq_qos *, struct bio *); + void (*queue_depth_changed)(struct rq_qos *); + void (*exit)(struct rq_qos *); + const struct blk_mq_debugfs_attr *debugfs_attrs; +}; + +struct blk_mq_debugfs_attr { + const char *name; + umode_t mode; + int (*show)(void *, struct seq_file *); + ssize_t (*write)(void *, const char __attribute__((btf_type_tag("user"))) *, + size_t, loff_t *); + const struct seq_operations *seq_ops; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned int done; +}; + +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __attribute__((btf_type_tag("percpu"))) * queue_ctx; +}; + +struct elevator_type; + +struct elevator_queue { + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + unsigned long flags; + struct hlist_head hash[64]; +}; + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +typedef unsigned int blk_insert_t; + +struct blk_mq_alloc_data; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, blk_insert_t); + struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request *(*former_request)(struct request_queue *, struct request *); + struct request *(*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +struct elv_fs_entry; + +struct elevator_type { + struct kmem_cache *icq_cache; + struct elevator_mq_ops ops; + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + const unsigned int elevator_features; + struct module *elevator_owner; + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; + char icq_cache_name[22]; + struct list_head list; +}; + +typedef __u32 blk_mq_req_flags_t; + +struct blk_mq_alloc_data { + struct request_queue *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + unsigned int nr_tags; + struct request **cached_rq; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +enum { + BLK_MQ_F_SHOULD_MERGE = 1, + BLK_MQ_F_TAG_QUEUE_SHARED = 2, + BLK_MQ_F_STACKING = 4, + BLK_MQ_F_TAG_HCTX_SHARED = 8, + BLK_MQ_F_BLOCKING = 32, + BLK_MQ_F_NO_SCHED = 64, + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 128, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_MAX_DEPTH = 10240, + BLK_MQ_CPU_WORK_BATCH = 8, +}; + +typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_complete)(void *, struct request *, blk_status_t, + unsigned int); + +typedef void (*btf_trace_block_rq_error)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_insert)(void *, struct request *); + +typedef void (*btf_trace_block_rq_issue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_merge)(void *, struct request *); + +typedef void (*btf_trace_block_io_start)(void *, struct request *); + +typedef void (*btf_trace_block_io_done)(void *, struct request *); + +typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); + +typedef void (*btf_trace_block_getrq)(void *, struct bio *); + +typedef void (*btf_trace_block_plug)(void *, struct request_queue *); + +typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); + +typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); + +typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); + +typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); + +enum { + BLK_MQ_REQ_NOWAIT = 1, + BLK_MQ_REQ_RESERVED = 2, + BLK_MQ_REQ_PM = 4, +}; + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ = 0, + BLKG_RWSTAT_WRITE = 1, + BLKG_RWSTAT_SYNC = 2, + BLKG_RWSTAT_ASYNC = 3, + BLKG_RWSTAT_DISCARD = 4, + BLKG_RWSTAT_NR = 5, + BLKG_RWSTAT_TOTAL = 5, +}; + +enum stat_group { + STAT_READ = 0, + STAT_WRITE = 1, + STAT_DISCARD = 2, + STAT_FLUSH = 3, + NR_STAT_GROUPS = 4, +}; + +struct blk_plug_cb; + +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); + +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; + +struct trace_event_raw_block_buffer { + struct trace_entry ent; + dev_t dev; + sector_t sector; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_block_rq_requeue { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq_completion { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + unsigned int bytes; + char rwbs[8]; + char comm[16]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_bio_complete { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_bio { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_plug { + struct trace_entry ent; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_unplug { + struct trace_entry ent; + int nr_rq; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_split { + struct trace_entry ent; + dev_t dev; + sector_t sector; + sector_t new_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_rq_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + unsigned int nr_bios; + char rwbs[8]; + char __data[0]; +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; + struct list_head queued[2]; + unsigned int nr_queued[2]; + struct rb_root_cached pending_tree; + unsigned int nr_pending; + unsigned long first_pending_disptime; + struct timer_list pending_timer; +}; + +struct throtl_grp; + +struct throtl_qnode { + struct list_head node; + struct bio_list bios; + struct throtl_grp *tg; +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[5]; + atomic64_t aux_cnt[5]; +}; + +struct throtl_grp { + struct blkg_policy_data pd; + struct rb_node rb_node; + struct throtl_data *td; + struct throtl_service_queue service_queue; + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + unsigned long disptime; + unsigned int flags; + bool has_rules_bps[2]; + bool has_rules_iops[2]; + uint64_t bps[4]; + uint64_t bps_conf[4]; + unsigned int iops[4]; + unsigned int iops_conf[4]; + uint64_t bytes_disp[2]; + unsigned int io_disp[2]; + unsigned long last_low_overflow_time[2]; + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + long long carryover_bytes[2]; + int carryover_ios[2]; + unsigned long last_check_time; + unsigned long latency_target; + unsigned long latency_target_conf; + unsigned long slice_start[2]; + unsigned long slice_end[2]; + unsigned long last_finish_time; + unsigned long checked_last_finish_time; + unsigned long avg_idletime; + unsigned long idletime_threshold; + unsigned long idletime_threshold_conf; + unsigned int bio_cnt; + unsigned int bad_bio_cnt; + unsigned long bio_cnt_reset_time; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); + +typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); + +typedef struct blkg_policy_data * +blkcg_pol_alloc_pd_fn(struct gendisk *, struct blkcg *, gfp_t); + +typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_stat_pd_fn(struct blkg_policy_data *, struct seq_file *); + +struct blkcg_policy { + int plid; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +struct trace_event_data_offsets_block_buffer {}; + +struct trace_event_data_offsets_block_rq_requeue { + u32 cmd; +}; + +struct trace_event_data_offsets_block_rq_completion { + u32 cmd; +}; + +struct trace_event_data_offsets_block_rq { + u32 cmd; +}; + +struct trace_event_data_offsets_block_bio_complete {}; + +struct trace_event_data_offsets_block_bio {}; + +struct trace_event_data_offsets_block_plug {}; + +struct trace_event_data_offsets_block_unplug {}; + +struct trace_event_data_offsets_block_split {}; + +struct trace_event_data_offsets_block_bio_remap {}; + +struct trace_event_data_offsets_block_rq_remap {}; + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct request_queue *, char *); + ssize_t (*store)(struct request_queue *, const char *, size_t); +}; + +enum { + REQ_FSEQ_PREFLUSH = 1, + REQ_FSEQ_DATA = 2, + REQ_FSEQ_POSTFLUSH = 4, + REQ_FSEQ_DONE = 8, + REQ_FSEQ_ACTIONS = 7, + FLUSH_PENDING_TIMEOUT = 5000, +}; + +enum { + BLK_MQ_NO_TAG = 4294967295, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = 4294967294, +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT = 0, + HCTX_TYPE_READ = 1, + HCTX_TYPE_POLL = 2, + HCTX_MAX_TYPES = 3, +}; + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 4294967295, +}; + +enum { + ICQ_EXITED = 4, + ICQ_DESTROYED = 8, +}; + +enum { + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, + IOPRIO_CLASS_INVALID = 7, +}; + +enum { + IOPRIO_HINT_NONE = 0, + IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, + IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, + IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, + IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, + IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, + IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, + IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, +}; + +struct rq_map_data { + struct page **pages; + unsigned long offset; + unsigned short page_order; + unsigned short nr_entries; + bool null_mapped; + bool from_user; +}; + +struct bio_map_data { + bool is_our_pages : 1; + bool is_null_mapped : 1; + struct iov_iter iter; + struct iovec iov[0]; +}; + +enum bio_merge_status { + BIO_MERGE_OK = 0, + BIO_MERGE_NONE = 1, + BIO_MERGE_FAILED = 2, +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +enum prep_dispatch { + PREP_DISPATCH_OK = 0, + PREP_DISPATCH_NO_TAG = 1, + PREP_DISPATCH_NO_BUDGET = 2, +}; + +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +typedef bool busy_tag_iter_fn(struct request *, void *); + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +struct flush_busy_ctx_data { + struct blk_mq_hw_ctx *hctx; + struct list_head *list; +}; + +struct dispatch_rq_data { + struct blk_mq_hw_ctx *hctx; + struct request *rq; +}; + +struct blk_expired_data { + bool has_timedout_rq; + unsigned long next; + unsigned long timeout_start; +}; + +struct rq_iter_data { + struct blk_mq_hw_ctx *hctx; + bool has_rq; +}; + +struct mq_inflight { + struct block_device *part; + unsigned int inflight[2]; +}; + +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = 65535, +}; + +struct sbq_wait { + struct sbitmap_queue *sbq; + struct wait_queue_entry wait; +}; + +struct bt_iter_data { + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + busy_tag_iter_fn *fn; + void *data; + bool reserved; +}; + +struct bt_tags_iter_data { + struct blk_mq_tags *tags; + busy_tag_iter_fn *fn; + void *data; + unsigned int flags; +}; + +struct blk_rq_stat; + +struct blk_stat_callback { + struct list_head list; + struct timer_list timer; + struct blk_rq_stat __attribute__((btf_type_tag("percpu"))) * cpu_stat; + int (*bucket_fn)(const struct request *); + unsigned int buckets; + struct blk_rq_stat *stat; + void (*timer_fn)(struct blk_stat_callback *); + void *data; + struct callback_head rcu; +}; + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + u64 batch; +}; + +struct blk_queue_stats { + struct list_head callbacks; + spinlock_t lock; + int accounting; +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); +}; + +typedef u32 compat_caddr_t; + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + unsigned long start; +}; + +struct pr_keys { + u32 generation; + u32 num_keys; + u64 keys[0]; +}; + +struct pr_held_reservation { + u64 key; + u32 generation; + enum pr_type type; +}; + +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void __attribute__((btf_type_tag("user"))) * data; +}; + +struct blkpg_partition { + long long start; + long long length; + int pno; + char devname[64]; + char volname[64]; +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct compat_hd_geometry { + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + u32 start; +}; + +struct compat_blkpg_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_caddr_t data; +}; + +struct badblocks { + struct device *dev; + int count; + int unacked_exist; + int shift; + u64 *page; + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; +}; + +struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; + void (*probe)(dev_t); +}; + +enum { + GENHD_FL_REMOVABLE = 1, + GENHD_FL_HIDDEN = 2, + GENHD_FL_NO_PART = 4, +}; + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + +struct subsys_private; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; + struct subsys_private *sp; +}; + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP = 2, + IOPRIO_WHO_USER = 3, +}; + +struct badblocks_context { + sector_t start; + sector_t len; + int ack; +}; + +struct parsed_partitions { + struct gendisk *disk; + char name[32]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +typedef struct { + struct folio *v; +} Sector; + +struct msdos_partition { + u8 boot_ind; + u8 head; + u8 sector; + u8 cyl; + u8 sys_ind; + u8 end_head; + u8 end_sector; + u8 end_cyl; + __le32 start_sect; + __le32 nr_sects; +}; + +struct tocblock { + u8 bitmap1_name[16]; + u64 bitmap1_start; + u64 bitmap1_size; + u8 bitmap2_name[16]; + u64 bitmap2_start; + u64 bitmap2_size; +}; + +struct vblk_comp { + u8 state[16]; + u64 parent_id; + u8 type; + u8 children; + u16 chunksize; +}; + +struct vblk_dgrp { + u8 disk_id[64]; +}; + +struct vblk_disk { + uuid_t disk_id; + u8 alt_name[128]; +}; + +struct vblk_part { + u64 start; + u64 size; + u64 volume_offset; + u64 parent_id; + u64 disk_id; + u8 partnum; +}; + +struct vblk_volu { + u8 volume_type[16]; + u8 volume_state[16]; + u8 guid[16]; + u8 drive_hint[4]; + u64 size; + u8 partition_type; +}; + +struct vblk { + u8 name[64]; + u64 obj_id; + u32 sequence; + u8 flags; + u8 type; + union { + struct vblk_comp comp; + struct vblk_dgrp dgrp; + struct vblk_disk disk; + struct vblk_part part; + struct vblk_volu volu; + } vblk; + struct list_head list; +}; + +struct frag { + struct list_head list; + u32 group; + u8 num; + u8 rec; + u8 map; + u8 data[0]; +}; + +struct privhead { + u16 ver_major; + u16 ver_minor; + u64 logical_disk_start; + u64 logical_disk_size; + u64 config_start; + u64 config_size; + uuid_t disk_id; +}; + +struct vmdb { + u16 ver_major; + u16 ver_minor; + u32 vblk_size; + u32 vblk_offset; + u32 last_vblk_seq; +}; + +struct ldmdb { + struct privhead ph; + struct tocblock toc; + struct vmdb vm; + struct list_head v_dgrp; + struct list_head v_disk; + struct list_head v_volu; + struct list_head v_comp; + struct list_head v_part; +}; + +enum msdos_sys_ind { + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 133, + WIN98_EXTENDED_PARTITION = 15, + LINUX_DATA_PARTITION = 131, + LINUX_LVM_PARTITION = 142, + LINUX_RAID_PARTITION = 253, + SOLARIS_X86_PARTITION = 130, + NEW_SOLARIS_X86_PARTITION = 191, + DM6_AUX1PARTITION = 81, + DM6_AUX3PARTITION = 83, + DM6_PARTITION = 84, + EZD_PARTITION = 85, + FREEBSD_PARTITION = 165, + OPENBSD_PARTITION = 166, + NETBSD_PARTITION = 169, + BSDI_PARTITION = 183, + MINIX_PARTITION = 129, + UNIXWARE_PARTITION = 99, +}; + +struct fat_boot_sector { + __u8 ignored[3]; + __u8 system_id[8]; + __u8 sector_size[2]; + __u8 sec_per_clus; + __le16 reserved; + __u8 fats; + __u8 dir_entries[2]; + __u8 sectors[2]; + __u8 media; + __le16 fat_length; + __le16 secs_track; + __le16 heads; + __le32 hidden; + __le32 total_sect; + union { + struct { + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat16; + struct { + __le32 length; + __le16 flags; + __u8 version[2]; + __le32 root_cluster; + __le16 info_sector; + __le16 backup_boot; + __le16 reserved2[6]; + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat32; + }; +}; + +struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; +} __attribute__((packed)); + +typedef struct _gpt_header gpt_header; + +struct _gpt_entry_attributes { + u64 required_to_function : 1; + u64 reserved : 47; + u64 type_guid_specific : 16; +}; + +typedef struct _gpt_entry_attributes gpt_entry_attributes; + +struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + __le16 partition_name[36]; +}; + +typedef struct _gpt_entry gpt_entry; + +struct _gpt_mbr_record { + u8 boot_indicator; + u8 start_head; + u8 start_sector; + u8 start_track; + u8 os_type; + u8 end_head; + u8 end_sector; + u8 end_track; + __le32 starting_lba; + __le32 size_in_lba; +}; + +typedef struct _gpt_mbr_record gpt_mbr_record; + +struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __attribute__((packed)); + +typedef struct _legacy_mbr legacy_mbr; + +struct rq_wait; + +typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); + +struct rq_qos_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wait *rqw; + acquire_inflight_cb_t *cb; + void *private_data; + bool got_token; +}; + +struct rq_wait { + wait_queue_head_t wait; + atomic_t inflight; +}; + +struct rq_depth { + unsigned int max_depth; + int scale_step; + bool scaled_max; + unsigned int queue_depth; + unsigned int default_depth; +}; + +typedef void cleanup_cb_t(struct rq_wait *, void *); + +struct disk_events { + struct list_head node; + struct gendisk *disk; + spinlock_t lock; + struct mutex block_mutex; + int block; + unsigned int pending; + unsigned int clearing; + long poll_msecs; + struct delayed_work dwork; +}; + +struct blk_ia_range_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_independent_access_range *, char *); +}; + +struct uuidcmp { + const char *uuid; + int len; +}; + +enum blkg_iostat_type { + BLKG_IOSTAT_READ = 0, + BLKG_IOSTAT_WRITE = 1, + BLKG_IOSTAT_DISCARD = 2, + BLKG_IOSTAT_NR = 3, +}; + +struct blkg_conf_ctx { + char *input; + char *body; + struct block_device *bdev; + struct blkcg_gq *blkg; +}; + +struct blkg_rwstat_sample { + u64 cnt[5]; +}; + +struct latency_bucket { + unsigned long total_latency; + int samples; +}; + +struct avg_latency_bucket { + unsigned long latency; + bool valid; +}; + +struct throtl_data { + struct throtl_service_queue service_queue; + struct request_queue *queue; + unsigned int nr_queued[2]; + unsigned int throtl_slice; + struct work_struct dispatch_work; + unsigned int limit_index; + bool limit_valid[2]; + unsigned long low_upgrade_time; + unsigned long low_downgrade_time; + unsigned int scale; + struct latency_bucket tmp_buckets[18]; + struct avg_latency_bucket avg_buckets[18]; + struct latency_bucket __attribute__((btf_type_tag("percpu"))) * latency_buckets[2]; + unsigned long last_calculate_time; + unsigned long filtered_latency; + bool track_bio_latency; +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1, + THROTL_TG_WAS_EMPTY = 2, + THROTL_TG_CANCELING = 4, +}; + +enum { + LIMIT_LOW = 0, + LIMIT_MAX = 1, + LIMIT_CNT = 2, +}; + +enum prio_policy { + POLICY_NO_CHANGE = 0, + POLICY_PROMOTE_TO_RT = 1, + POLICY_RESTRICT_TO_BE = 2, + POLICY_ALL_TO_IDLE = 3, + POLICY_NONE_TO_RT = 4, +}; + +struct ioprio_blkcg { + struct blkcg_policy_data cpd; + enum prio_policy prio_policy; +}; + +struct ioprio_blkg { + struct blkg_policy_data pd; +}; + +struct percentile_stats { + u64 total; + u64 missed; +}; + +struct latency_stat { + union { + struct percentile_stats ps; + struct blk_rq_stat rqs; + }; +}; + +struct iolatency_grp; + +struct child_latency_info { + spinlock_t lock; + u64 last_scale_event; + u64 scale_lat; + u64 nr_samples; + struct iolatency_grp *scale_grp; + atomic_t scale_cookie; +}; + +struct blk_iolatency; + +struct iolatency_grp { + struct blkg_policy_data pd; + struct latency_stat __attribute__((btf_type_tag("percpu"))) * stats; + struct latency_stat cur_stat; + struct blk_iolatency *blkiolat; + unsigned int max_depth; + struct rq_wait rq_wait; + atomic64_t window_start; + atomic_t scale_cookie; + u64 min_lat_nsec; + u64 cur_win_nsec; + u64 lat_avg; + u64 nr_samples; + bool ssd; + struct child_latency_info child_lat; +}; + +struct blk_iolatency { + struct rq_qos rqos; + struct timer_list timer; + bool enabled; + atomic_t enable_cnt; + struct work_struct enable_work; +}; + +struct ioc_gq; + +struct ioc_now; + +typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, + struct ioc_now *, u64, u64, u64); + +struct iocg_stat { + u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; +}; + +struct ioc; + +struct iocg_pcpu_stat; + +struct ioc_gq { + struct blkg_policy_data pd; + struct ioc *ioc; + u32 cfg_weight; + u32 weight; + u32 active; + u32 inuse; + u32 last_inuse; + s64 saved_margin; + sector_t cursor; + atomic64_t vtime; + atomic64_t done_vtime; + u64 abs_vdebt; + u64 delay; + u64 delay_at; + atomic64_t active_period; + struct list_head active_list; + u64 child_active_sum; + u64 child_inuse_sum; + u64 child_adjusted_sum; + int hweight_gen; + u32 hweight_active; + u32 hweight_inuse; + u32 hweight_donating; + u32 hweight_after_donation; + struct list_head walk_list; + struct list_head surplus_list; + struct wait_queue_head waitq; + struct hrtimer waitq_timer; + u64 activated_at; + struct iocg_pcpu_stat __attribute__((btf_type_tag("percpu"))) * pcpu_stat; + struct iocg_stat stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; + int level; + struct ioc_gq *ancestors[0]; +}; + +struct ioc_params { + u32 qos[6]; + u64 i_lcoefs[6]; + u64 lcoefs[6]; + u32 too_fast_vrate_pct; + u32 too_slow_vrate_pct; +}; + +struct ioc_margins { + s64 min; + s64 low; + s64 target; +}; + +enum ioc_running { + IOC_IDLE = 0, + IOC_RUNNING = 1, + IOC_STOP = 2, +}; + +struct ioc_pcpu_stat; + +struct ioc { + struct rq_qos rqos; + bool enabled; + struct ioc_params params; + struct ioc_margins margins; + u32 period_us; + u32 timer_slack_ns; + u64 vrate_min; + u64 vrate_max; + spinlock_t lock; + struct timer_list timer; + struct list_head active_iocgs; + struct ioc_pcpu_stat __attribute__((btf_type_tag("percpu"))) * pcpu_stat; + enum ioc_running running; + atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; + seqcount_spinlock_t period_seqcount; + u64 period_at; + u64 period_at_vtime; + atomic64_t cur_period; + int busy_level; + bool weights_updated; + atomic_t hweight_gen; + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; + u64 autop_too_fast_at; + u64 autop_too_slow_at; + int autop_idx; + bool user_qos_params : 1; + bool user_cost_model : 1; +}; + +struct ioc_missed { + local_t nr_met; + local_t nr_missed; + u32 last_met; + u32 last_missed; +}; + +struct ioc_pcpu_stat { + struct ioc_missed missed[2]; + local64_t rq_wait_ns; + u64 last_rq_wait_ns; +}; + +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct ioc_now { + u64 now_ns; + u64 now; + u64 vnow; +}; + +typedef void (*btf_trace_iocost_iocg_idle)(void *, struct ioc_gq *, const char *, + struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, + struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, + struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, + struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); + +typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, + struct ioc_now *, u32, u64, u64, u64, u64); + +enum { + MILLION = 1000000, + MIN_PERIOD = 1000, + MAX_PERIOD = 1000000, + MARGIN_MIN_PCT = 10, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + INUSE_ADJ_STEP_PCT = 25, + TIMER_SLACK_PCT = 1, + WEIGHT_ONE = 65536, +}; + +enum { + QOS_RPPM = 0, + QOS_RLAT = 1, + QOS_WPPM = 2, + QOS_WLAT = 3, + QOS_MIN = 4, + QOS_MAX = 5, + NR_QOS_PARAMS = 6, +}; + +enum { + QOS_ENABLE = 0, + QOS_CTRL = 1, + NR_QOS_CTRL_PARAMS = 2, +}; + +enum { + VTIME_PER_SEC_SHIFT = 37ULL, + VTIME_PER_SEC = 137438953472ULL, + VTIME_PER_USEC = 137438ULL, + VTIME_PER_NSEC = 137ULL, + VRATE_MIN_PPM = 10000ULL, + VRATE_MAX_PPM = 100000000ULL, + VRATE_MIN = 1374ULL, + VRATE_CLAMP_ADJ_PCT = 4ULL, + AUTOP_CYCLE_NSEC = 10000000000ULL, +}; + +enum { + AUTOP_INVALID = 0, + AUTOP_HDD = 1, + AUTOP_SSD_QD1 = 2, + AUTOP_SSD_DFL = 3, + AUTOP_SSD_FAST = 4, +}; + +enum { + RQ_WAIT_BUSY_PCT = 5, + UNBUSY_THR_PCT = 75, + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250000, + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100000, + MAX_LAGGING_PERIODS = 10, + IOC_PAGE_SHIFT = 12, + IOC_PAGE_SIZE = 4096, + IOC_SECT_TO_PAGE_SHIFT = 3, + LCOEF_RANDIO_PAGES = 4096, +}; + +enum { + I_LCOEF_RBPS = 0, + I_LCOEF_RSEQIOPS = 1, + I_LCOEF_RRANDIOPS = 2, + I_LCOEF_WBPS = 3, + I_LCOEF_WSEQIOPS = 4, + I_LCOEF_WRANDIOPS = 5, + NR_I_LCOEFS = 6, +}; + +enum { + LCOEF_RPAGE = 0, + LCOEF_RSEQIO = 1, + LCOEF_RRANDIO = 2, + LCOEF_WPAGE = 3, + LCOEF_WSEQIO = 4, + LCOEF_WRANDIO = 5, + NR_LCOEFS = 6, +}; + +enum { + COST_CTRL = 0, + COST_MODEL = 1, + NR_COST_CTRL_PARAMS = 2, +}; + +struct trace_event_raw_iocost_iocg_state { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u64 vrate; + u64 last_period; + u64 cur_period; + u64 vtime; + u32 weight; + u32 inuse; + u64 hweight_active; + u64 hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocg_inuse_update { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u32 old_inuse; + u32 new_inuse; + u64 old_hweight_inuse; + u64 new_hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocost_ioc_vrate_adj { + struct trace_entry ent; + u32 __data_loc_devname; + u64 old_vrate; + u64 new_vrate; + int busy_level; + u32 read_missed_ppm; + u32 write_missed_ppm; + u32 rq_wait_pct; + int nr_lagging; + int nr_shortages; + char __data[0]; +}; + +struct trace_event_raw_iocost_iocg_forgive_debt { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u32 usage_pct; + u64 old_debt; + u64 new_debt; + u64 old_delay; + u64 new_delay; + char __data[0]; +}; + +struct ioc_cgrp { + struct blkcg_policy_data cpd; + unsigned int dfl_weight; +}; + +struct iocg_wait { + struct wait_queue_entry wait; + struct bio *bio; + u64 abs_cost; + bool committed; +}; + +struct trace_event_data_offsets_iocost_ioc_vrate_adj { + u32 devname; +}; + +struct trace_event_data_offsets_iocost_iocg_state { + u32 devname; + u32 cgroup; +}; + +struct trace_event_data_offsets_iocg_inuse_update { + u32 devname; + u32 cgroup; +}; + +struct trace_event_data_offsets_iocost_iocg_forgive_debt { + u32 devname; + u32 cgroup; +}; + +struct iocg_wake_ctx { + struct ioc_gq *iocg; + u32 hw_inuse; + s64 vbudget; +}; + +enum dd_prio { + DD_RT_PRIO = 0, + DD_BE_PRIO = 1, + DD_IDLE_PRIO = 2, + DD_PRIO_MAX = 2, +}; + +enum dd_data_dir { + DD_READ = 0, + DD_WRITE = 1, +}; + +enum { + DD_DISPATCHING = 0, +}; + +struct io_stats_per_prio { + uint32_t inserted; + uint32_t merged; + uint32_t dispatched; + atomic_t completed; +}; + +struct dd_per_prio { + struct list_head dispatch; + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + sector_t latest_pos[2]; + struct io_stats_per_prio stats; +}; + +struct deadline_data { + struct { + spinlock_t lock; + spinlock_t insert_lock; + spinlock_t zone_lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + }; + unsigned long run_state; + struct list_head at_head; + struct list_head at_tail; + struct dd_per_prio per_prio[3]; + enum dd_data_dir last_dir; + unsigned int batching; + unsigned int starved; + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; + u32 async_depth; + int prio_aging_expire; + long : 64; + long : 64; +}; + +typedef void (*btf_trace_kyber_latency)(void *, dev_t, const char *, const char *, unsigned int, + unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_kyber_adjust)(void *, dev_t, const char *, unsigned int); + +typedef void (*btf_trace_kyber_throttled)(void *, dev_t, const char *); + +enum { + KYBER_READ = 0, + KYBER_WRITE = 1, + KYBER_DISCARD = 2, + KYBER_OTHER = 3, + KYBER_NUM_DOMAINS = 4, +}; + +enum { + KYBER_TOTAL_LATENCY = 0, + KYBER_IO_LATENCY = 1, +}; + +enum { + KYBER_LATENCY_SHIFT = 2, + KYBER_GOOD_BUCKETS = 4, + KYBER_LATENCY_BUCKETS = 8, +}; + +enum { + KYBER_ASYNC_PERCENT = 75, +}; + +struct trace_event_raw_kyber_latency { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char type[8]; + u8 percentile; + u8 numerator; + u8 denominator; + unsigned int samples; + char __data[0]; +}; + +struct trace_event_raw_kyber_adjust { + struct trace_entry ent; + dev_t dev; + char domain[16]; + unsigned int depth; + char __data[0]; +}; + +struct trace_event_raw_kyber_throttled { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char __data[0]; +}; + +struct kyber_cpu_latency { + atomic_t buckets[48]; +}; + +struct kyber_queue_data { + struct request_queue *q; + dev_t dev; + struct sbitmap_queue domain_tokens[4]; + unsigned int async_depth; + struct kyber_cpu_latency __attribute__((btf_type_tag("percpu"))) * cpu_latency; + struct timer_list timer; + unsigned int latency_buckets[48]; + unsigned long latency_timeout[3]; + int domain_p99[3]; + u64 latency_targets[3]; +}; + +struct kyber_ctx_queue { + spinlock_t lock; + struct list_head rq_list[4]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct kyber_hctx_data { + spinlock_t lock; + struct list_head rqs[4]; + unsigned int cur_domain; + unsigned int batching; + struct kyber_ctx_queue *kcqs; + struct sbitmap kcq_map[4]; + struct sbq_wait domain_wait[4]; + struct sbq_wait_state *domain_ws[4]; + atomic_t wait_index[4]; +}; + +struct flush_kcq_data { + struct kyber_hctx_data *khd; + unsigned int sched_domain; + struct list_head *list; +}; + +struct trace_event_data_offsets_kyber_latency {}; + +struct trace_event_data_offsets_kyber_adjust {}; + +struct trace_event_data_offsets_kyber_throttled {}; + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1, + BIP_MAPPED_INTEGRITY = 2, + BIP_CTRL_NOCHECK = 4, + BIP_DISK_NOCHECK = 8, + BIP_IP_CHECKSUM = 16, +}; + +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1, + BLK_INTEGRITY_GENERATE = 2, + BLK_INTEGRITY_DEVICE_CAPABLE = 4, + BLK_INTEGRITY_IP_CHECKSUM = 8, +}; + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; + +struct vringh_config_ops; + +struct virtio_config_ops; + +struct virtio_device { + int index; + bool failed; + bool config_enabled; + bool config_change_pending; + spinlock_t config_lock; + spinlock_t vqs_list_lock; + struct device dev; + struct virtio_device_id id; + const struct virtio_config_ops *config; + const struct vringh_config_ops *vringh_config; + struct list_head vqs; + u64 features; + void *priv; +}; + +struct virtqueue; + +typedef void vq_callback_t(struct virtqueue *); + +struct virtio_shm_region; + +struct virtio_config_ops { + void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); + void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); + u32 (*generation)(struct virtio_device *); + u8 (*get_status)(struct virtio_device *); + void (*set_status)(struct virtio_device *, u8); + void (*reset)(struct virtio_device *); + int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, + vq_callback_t **, const char *const *, const bool *, + struct irq_affinity *); + void (*del_vqs)(struct virtio_device *); + void (*synchronize_cbs)(struct virtio_device *); + u64 (*get_features)(struct virtio_device *); + int (*finalize_features)(struct virtio_device *); + const char *(*bus_name)(struct virtio_device *); + int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); + const struct cpumask *(*get_vq_affinity)(struct virtio_device *, int); + bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); + int (*disable_vq_and_reset)(struct virtqueue *); + int (*enable_vq_after_reset)(struct virtqueue *); +}; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + unsigned int num_max; + bool reset; + void *priv; +}; + +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); + +typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, unsigned long); + +typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, + unsigned long, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, + unsigned int); + +enum { + WBT_STATE_ON_DEFAULT = 1, + WBT_STATE_ON_MANUAL = 2, + WBT_STATE_OFF_DEFAULT = 3, + WBT_STATE_OFF_MANUAL = 4, +}; + +enum { + WBT_RWQ_BG = 0, + WBT_RWQ_KSWAPD = 1, + WBT_RWQ_DISCARD = 2, + WBT_NUM_RWQ = 3, +}; + +enum { + RWB_DEF_DEPTH = 16, + RWB_WINDOW_NSEC = 100000000, + RWB_MIN_WRITE_SAMPLES = 3, + RWB_UNKNOWN_BUMP = 5, +}; + +enum { + LAT_OK = 1, + LAT_UNKNOWN = 2, + LAT_UNKNOWN_WRITES = 3, + LAT_EXCEEDED = 4, +}; + +enum wbt_flags { + WBT_TRACKED = 1, + WBT_READ = 2, + WBT_KSWAPD = 4, + WBT_DISCARD = 8, + WBT_NR_BITS = 4, +}; + +struct trace_event_raw_wbt_stat { + struct trace_entry ent; + char name[32]; + s64 rmean; + u64 rmin; + u64 rmax; + s64 rnr_samples; + s64 rtime; + s64 wmean; + u64 wmin; + u64 wmax; + s64 wnr_samples; + s64 wtime; + char __data[0]; +}; + +struct trace_event_raw_wbt_lat { + struct trace_entry ent; + char name[32]; + unsigned long lat; + char __data[0]; +}; + +struct trace_event_raw_wbt_step { + struct trace_entry ent; + char name[32]; + const char *msg; + int step; + unsigned long window; + unsigned int bg; + unsigned int normal; + unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_wbt_timer { + struct trace_entry ent; + char name[32]; + unsigned int status; + int step; + unsigned int inflight; + char __data[0]; +}; + +struct rq_wb { + unsigned int wb_background; + unsigned int wb_normal; + short enable_state; + unsigned int unknown_cnt; + u64 win_nsec; + u64 cur_win_nsec; + struct blk_stat_callback *cb; + u64 sync_issue; + void *sync_cookie; + unsigned int wc; + unsigned long last_issue; + unsigned long last_comp; + unsigned long min_lat_nsec; + struct rq_qos rqos; + struct rq_wait rq_wait[3]; + struct rq_depth rq_depth; +}; + +struct wbt_wait_data { + struct rq_wb *rwb; + enum wbt_flags wb_acct; + blk_opf_t opf; +}; + +struct trace_event_data_offsets_wbt_stat {}; + +struct trace_event_data_offsets_wbt_lat {}; + +struct trace_event_data_offsets_wbt_step {}; + +struct trace_event_data_offsets_wbt_timer {}; + +struct show_busy_params { + struct seq_file *m; + struct blk_mq_hw_ctx *hctx; +}; + +enum opal_response_token { + OPAL_DTA_TOKENID_BYTESTRING = 224, + OPAL_DTA_TOKENID_SINT = 225, + OPAL_DTA_TOKENID_UINT = 226, + OPAL_DTA_TOKENID_TOKEN = 227, + OPAL_DTA_TOKENID_INVALID = 0, +}; + +enum opal_atom_width { + OPAL_WIDTH_TINY = 0, + OPAL_WIDTH_SHORT = 1, + OPAL_WIDTH_MEDIUM = 2, + OPAL_WIDTH_LONG = 3, + OPAL_WIDTH_TOKEN = 4, +}; + +enum { + TCG_SECP_00 = 0, + TCG_SECP_01 = 1, +}; + +enum opal_user { + OPAL_ADMIN1 = 0, + OPAL_USER1 = 1, + OPAL_USER2 = 2, + OPAL_USER3 = 3, + OPAL_USER4 = 4, + OPAL_USER5 = 5, + OPAL_USER6 = 6, + OPAL_USER7 = 7, + OPAL_USER8 = 8, + OPAL_USER9 = 9, +}; + +enum opal_uid { + OPAL_SMUID_UID = 0, + OPAL_THISSP_UID = 1, + OPAL_ADMINSP_UID = 2, + OPAL_LOCKINGSP_UID = 3, + OPAL_ENTERPRISE_LOCKINGSP_UID = 4, + OPAL_ANYBODY_UID = 5, + OPAL_SID_UID = 6, + OPAL_ADMIN1_UID = 7, + OPAL_USER1_UID = 8, + OPAL_USER2_UID = 9, + OPAL_PSID_UID = 10, + OPAL_ENTERPRISE_BANDMASTER0_UID = 11, + OPAL_ENTERPRISE_ERASEMASTER_UID = 12, + OPAL_TABLE_TABLE = 13, + OPAL_LOCKINGRANGE_GLOBAL = 14, + OPAL_LOCKINGRANGE_ACE_START_TO_KEY = 15, + OPAL_LOCKINGRANGE_ACE_RDLOCKED = 16, + OPAL_LOCKINGRANGE_ACE_WRLOCKED = 17, + OPAL_MBRCONTROL = 18, + OPAL_MBR = 19, + OPAL_AUTHORITY_TABLE = 20, + OPAL_C_PIN_TABLE = 21, + OPAL_LOCKING_INFO_TABLE = 22, + OPAL_ENTERPRISE_LOCKING_INFO_TABLE = 23, + OPAL_DATASTORE = 24, + OPAL_C_PIN_MSID = 25, + OPAL_C_PIN_SID = 26, + OPAL_C_PIN_ADMIN1 = 27, + OPAL_HALF_UID_AUTHORITY_OBJ_REF = 28, + OPAL_HALF_UID_BOOLEAN_ACE = 29, + OPAL_UID_HEXFF = 30, +}; + +enum opal_method { + OPAL_PROPERTIES = 0, + OPAL_STARTSESSION = 1, + OPAL_REVERT = 2, + OPAL_ACTIVATE = 3, + OPAL_EGET = 4, + OPAL_ESET = 5, + OPAL_NEXT = 6, + OPAL_EAUTHENTICATE = 7, + OPAL_GETACL = 8, + OPAL_GENKEY = 9, + OPAL_REVERTSP = 10, + OPAL_GET = 11, + OPAL_SET = 12, + OPAL_AUTHENTICATE = 13, + OPAL_RANDOM = 14, + OPAL_ERASE = 15, +}; + +enum opal_token { + OPAL_TRUE = 1, + OPAL_FALSE = 0, + OPAL_BOOLEAN_EXPR = 3, + OPAL_TABLE = 0, + OPAL_STARTROW = 1, + OPAL_ENDROW = 2, + OPAL_STARTCOLUMN = 3, + OPAL_ENDCOLUMN = 4, + OPAL_VALUES = 1, + OPAL_TABLE_UID = 0, + OPAL_TABLE_NAME = 1, + OPAL_TABLE_COMMON = 2, + OPAL_TABLE_TEMPLATE = 3, + OPAL_TABLE_KIND = 4, + OPAL_TABLE_COLUMN = 5, + OPAL_TABLE_COLUMNS = 6, + OPAL_TABLE_ROWS = 7, + OPAL_TABLE_ROWS_FREE = 8, + OPAL_TABLE_ROW_BYTES = 9, + OPAL_TABLE_LASTID = 10, + OPAL_TABLE_MIN = 11, + OPAL_TABLE_MAX = 12, + OPAL_PIN = 3, + OPAL_RANGESTART = 3, + OPAL_RANGELENGTH = 4, + OPAL_READLOCKENABLED = 5, + OPAL_WRITELOCKENABLED = 6, + OPAL_READLOCKED = 7, + OPAL_WRITELOCKED = 8, + OPAL_ACTIVEKEY = 10, + OPAL_LIFECYCLE = 6, + OPAL_MAXRANGES = 4, + OPAL_MBRENABLE = 1, + OPAL_MBRDONE = 2, + OPAL_HOSTPROPERTIES = 0, + OPAL_STARTLIST = 240, + OPAL_ENDLIST = 241, + OPAL_STARTNAME = 242, + OPAL_ENDNAME = 243, + OPAL_CALL = 248, + OPAL_ENDOFDATA = 249, + OPAL_ENDOFSESSION = 250, + OPAL_STARTTRANSACTON = 251, + OPAL_ENDTRANSACTON = 252, + OPAL_EMPTYATOM = 255, + OPAL_WHERE = 0, +}; + +enum opal_lock_state { + OPAL_RO = 1, + OPAL_RW = 2, + OPAL_LK = 4, +}; + +enum opal_lock_flags { + OPAL_SAVE_FOR_LOCK = 1, +}; + +enum opal_key_type { + OPAL_INCLUDED = 0, + OPAL_KEYRING = 1, +}; + +enum opal_parameter { + OPAL_SUM_SET_LIST = 393216, +}; + +enum opal_mbr { + OPAL_MBR_ENABLE = 0, + OPAL_MBR_DISABLE = 1, +}; + +enum opal_mbr_done_flag { + OPAL_MBR_NOT_DONE = 0, + OPAL_MBR_DONE = 1, +}; + +enum opal_table_ops { + OPAL_READ_TABLE = 0, + OPAL_WRITE_TABLE = 1, +}; + +enum opal_revertlsp { + OPAL_KEEP_GLOBAL_RANGE_KEY = 393216, +}; + +enum opal_revert_lsp_opts { + OPAL_PRESERVE = 1, +}; + +struct opal_key { + __u8 lr; + __u8 key_len; + __u8 key_type; + __u8 __align[5]; + __u8 key[256]; +}; + +struct opal_session_info { + __u32 sum; + __u32 who; + struct opal_key opal_key; +}; + +struct opal_lock_unlock { + struct opal_session_info session; + __u32 l_state; + __u16 flags; + __u8 __align[2]; +}; + +struct opal_suspend_data { + struct opal_lock_unlock unlk; + u8 lr; + struct list_head node; +}; + +struct d0_header { + __be32 length; + __be32 revision; + __be32 reserved01; + __be32 reserved02; + u8 ignored[32]; +}; + +struct d0_features { + __be16 code; + u8 r_version; + u8 length; + u8 features[0]; +}; + +struct opal_compacket { + __be32 reserved0; + u8 extendedComID[4]; + __be32 outstandingData; + __be32 minTransfer; + __be32 length; +}; + +struct opal_packet { + __be32 tsn; + __be32 hsn; + __be32 seq_number; + __be16 reserved0; + __be16 ack_type; + __be32 acknowledgment; + __be32 length; +}; + +struct opal_data_subpacket { + u8 reserved0[6]; + __be16 kind; + __be32 length; +}; + +struct opal_header { + struct opal_compacket cp; + struct opal_packet pkt; + struct opal_data_subpacket subpkt; +}; + +typedef int sec_send_recv(void *, u16, u8, void *, size_t, bool); + +struct opal_resp_tok { + const u8 *pos; + size_t len; + enum opal_response_token type; + enum opal_atom_width width; + union { + u64 u; + s64 s; + } stored; +}; + +struct parsed_resp { + int num; + struct opal_resp_tok toks[64]; +}; + +struct opal_dev { + u32 flags; + void *data; + sec_send_recv *send_recv; + struct mutex dev_lock; + u16 comid; + u32 hsn; + u32 tsn; + u64 align; + u64 lowest_lba; + u32 logical_block_size; + u8 align_required; + size_t pos; + u8 *cmd; + u8 *resp; + struct parsed_resp parsed; + size_t prev_d_len; + void *prev_data; + struct list_head unlk_lst; +}; + +struct opal_step { + int (*fn)(struct opal_dev *, void *); + void *data; +}; + +typedef unsigned char u_char; + +struct opal_read_write_table { + struct opal_key key; + const __u64 data; + const __u8 table_uid[8]; + __u64 offset; + __u64 size; + __u64 flags; + __u64 priv; +}; + +struct opal_discovery { + __u64 data; + __u64 size; +}; + +struct d0_locking_features { + u8 supported_features; + u8 reserved01[3]; + __be32 reserved02; + __be32 reserved03; +}; + +struct d0_single_user_mode { + __be32 num_locking_objects; + u8 reserved01; + u8 reserved02; + __be16 reserved03; + __be32 reserved04; +}; + +struct d0_tper_features { + u8 supported_features; + u8 reserved01[3]; + __be32 reserved02; + __be32 reserved03; +}; + +struct d0_geometry_features { + u8 header[4]; + u8 reserved01; + u8 reserved02[7]; + __be32 logical_block_size; + __be64 alignment_granularity; + __be64 lowest_aligned_lba; +}; + +typedef int cont_fn(struct opal_dev *); + +struct opal_user_lr_setup { + __u64 range_start; + __u64 range_length; + __u32 RLE; + __u32 WLE; + struct opal_session_info session; +}; + +struct opal_lr_act { + struct opal_key key; + __u32 sum; + __u8 num_lrs; + __u8 lr[9]; + __u8 align[2]; +}; + +struct opal_new_pw { + struct opal_session_info session; + struct opal_session_info new_user_pw; +}; + +struct opal_mbr_data { + struct opal_key key; + __u8 enable_disable; + __u8 __align[7]; +}; + +struct opal_mbr_done { + struct opal_key key; + __u8 done_flag; + __u8 __align[7]; +}; + +struct opal_shadow_mbr { + struct opal_key key; + const __u64 data; + __u64 offset; + __u64 size; +}; + +struct opal_status { + __u32 flags; + __u32 reserved; +}; + +struct opal_lr_status { + struct opal_session_info session; + __u64 range_start; + __u64 range_length; + __u32 RLE; + __u32 WLE; + __u32 l_state; + __u8 align[4]; +}; + +struct opal_geometry { + __u8 align; + __u32 logical_block_size; + __u64 alignment_granularity; + __u64 lowest_aligned_lba; + __u8 __align[3]; +}; + +struct opal_revert_lsp { + struct opal_key key; + __u32 options; + __u32 __pad; +}; + +struct bd_holder_disk { + struct list_head list; + struct kobject *holder_dir; + int refcnt; +}; + +struct io_cache_entry { + struct io_wq_work_node node; +}; + +struct io_rsrc_put { + u64 tag; + union { + void *rsrc; + struct file *file; + struct io_mapped_ubuf *buf; + }; +}; + +struct io_rsrc_node { + union { + struct io_cache_entry cache; + struct io_ring_ctx *ctx; + }; + int refs; + bool empty; + u16 type; + struct list_head node; + struct io_rsrc_put item; +}; + +struct io_mapped_ubuf { + u64 ubuf; + u64 ubuf_end; + unsigned int nr_bvecs; + unsigned long acct_pages; + struct bio_vec bvec[0]; +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __u32 len; + __u16 bid; + __u16 bgid; +}; + +struct io_uring_buf_ring; + +struct io_buffer_list { + union { + struct list_head buf_list; + struct { + struct page **buf_pages; + struct io_uring_buf_ring *buf_ring; + }; + struct callback_head rcu; + }; + __u16 bgid; + __u16 buf_nr_pages; + __u16 nr_entries; + __u16 head; + __u16 mask; + __u8 is_mapped; + __u8 is_mmap; + __u8 is_ready; +}; + +struct io_uring_buf { + __u64 addr; + __u32 len; + __u16 bid; + __u16 resv; +}; + +struct io_uring_buf_ring { + union { + struct { + __u64 resv1; + __u32 resv2; + __u16 resv3; + __u16 tail; + }; + struct { + struct { + } __empty_bufs; + struct io_uring_buf bufs[0]; + }; + }; +}; + +struct io_poll { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + int retries; + struct wait_queue_entry wait; +}; + +struct async_poll { + union { + struct io_poll poll; + struct io_cache_entry cache; + }; + struct io_poll *double_poll; +}; + +struct io_sq_data { + refcount_t refs; + atomic_t park_pending; + struct mutex lock; + struct list_head ctx_list; + struct task_struct *thread; + struct wait_queue_head wait; + unsigned int sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + unsigned long state; + struct completion exited; +}; + +struct io_rsrc_data { + struct io_ring_ctx *ctx; + u64 **tags; + unsigned int nr; + u16 rsrc_type; + bool quiesce; +}; + +typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); + +typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, + unsigned int, long); + +typedef void (*btf_trace_io_uring_file_get)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_queue_async_work)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_defer)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); + +typedef void (*btf_trace_io_uring_fail_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_complete)(void *, void *, void *, u64, int, + unsigned int, u64, u64); + +typedef void (*btf_trace_io_uring_submit_req)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_poll_arm)(void *, struct io_kiocb *, int, int); + +typedef void (*btf_trace_io_uring_task_add)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_req_failed)(void *, const struct io_uring_sqe *, + struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_cqe_overflow)(void *, void *, unsigned long long, s32, + u32, void *); + +typedef void (*btf_trace_io_uring_task_work_run)(void *, void *, unsigned int, unsigned int); + +typedef void (*btf_trace_io_uring_short_write)(void *, void *, u64, u64, u64); + +typedef void (*btf_trace_io_uring_local_work_run)(void *, void *, int, unsigned int); + +struct creds; + +enum { + REQ_F_FIXED_FILE = 1, + REQ_F_IO_DRAIN = 2, + REQ_F_LINK = 4, + REQ_F_HARDLINK = 8, + REQ_F_FORCE_ASYNC = 16, + REQ_F_BUFFER_SELECT = 32, + REQ_F_CQE_SKIP = 64, + REQ_F_FAIL = 256, + REQ_F_INFLIGHT = 512, + REQ_F_CUR_POS = 1024, + REQ_F_NOWAIT = 2048, + REQ_F_LINK_TIMEOUT = 4096, + REQ_F_NEED_CLEANUP = 8192, + REQ_F_POLLED = 16384, + REQ_F_BUFFER_SELECTED = 32768, + REQ_F_BUFFER_RING = 65536, + REQ_F_REISSUE = 131072, + REQ_F_SUPPORT_NOWAIT = 536870912, + REQ_F_ISREG = 1073741824, + REQ_F_CREDS = 262144, + REQ_F_REFCOUNT = 524288, + REQ_F_ARM_LTIMEOUT = 1048576, + REQ_F_ASYNC_DATA = 2097152, + REQ_F_SKIP_LINK_CQES = 4194304, + REQ_F_SINGLE_POLL = 8388608, + REQ_F_DOUBLE_POLL = 16777216, + REQ_F_PARTIAL_IO = 33554432, + REQ_F_APOLL_MULTISHOT = 67108864, + REQ_F_CLEAR_POLLIN = 134217728, + REQ_F_HASH_LOCKED = 268435456, + REQ_F_POLL_NO_LAZY = 2147483648, +}; + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_CONCURRENT = 16, + IO_WQ_HASH_SHIFT = 24, +}; + +enum { + IO_CHECK_CQ_OVERFLOW_BIT = 0, + IO_CHECK_CQ_DROPPED_BIT = 1, +}; + +enum io_uring_cmd_flags { + IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, + IO_URING_F_MULTISHOT = 4, + IO_URING_F_IOWQ = 8, + IO_URING_F_NONBLOCK = -2147483648, + IO_URING_F_SQE128 = 256, + IO_URING_F_CQE32 = 512, + IO_URING_F_IOPOLL = 1024, + IO_URING_F_CANCEL = 2048, + IO_URING_F_COMPAT = 4096, +}; + +enum { + IO_APOLL_OK = 0, + IO_APOLL_ABORTED = 1, + IO_APOLL_READY = 2, +}; + +enum { + IO_EVENTFD_OP_SIGNAL_BIT = 0, + IO_EVENTFD_OP_FREE_BIT = 1, +}; + +enum { + IORING_CQE_BUFFER_SHIFT = 16, +}; + +enum { + IOU_F_TWQ_LAZY_WAKE = 1, +}; + +enum { + IOU_OK = 0, + IOU_ISSUE_SKIP_COMPLETE = -529, + IOU_STOP_MULTISHOT = -125, +}; + +enum { + REQ_F_FIXED_FILE_BIT = 0, + REQ_F_IO_DRAIN_BIT = 1, + REQ_F_LINK_BIT = 2, + REQ_F_HARDLINK_BIT = 3, + REQ_F_FORCE_ASYNC_BIT = 4, + REQ_F_BUFFER_SELECT_BIT = 5, + REQ_F_CQE_SKIP_BIT = 6, + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT = 9, + REQ_F_CUR_POS_BIT = 10, + REQ_F_NOWAIT_BIT = 11, + REQ_F_LINK_TIMEOUT_BIT = 12, + REQ_F_NEED_CLEANUP_BIT = 13, + REQ_F_POLLED_BIT = 14, + REQ_F_BUFFER_SELECTED_BIT = 15, + REQ_F_BUFFER_RING_BIT = 16, + REQ_F_REISSUE_BIT = 17, + REQ_F_CREDS_BIT = 18, + REQ_F_REFCOUNT_BIT = 19, + REQ_F_ARM_LTIMEOUT_BIT = 20, + REQ_F_ASYNC_DATA_BIT = 21, + REQ_F_SKIP_LINK_CQES_BIT = 22, + REQ_F_SINGLE_POLL_BIT = 23, + REQ_F_DOUBLE_POLL_BIT = 24, + REQ_F_PARTIAL_IO_BIT = 25, + REQ_F_APOLL_MULTISHOT_BIT = 26, + REQ_F_CLEAR_POLLIN_BIT = 27, + REQ_F_HASH_LOCKED_BIT = 28, + REQ_F_SUPPORT_NOWAIT_BIT = 29, + REQ_F_ISREG_BIT = 30, + REQ_F_POLL_NO_LAZY_BIT = 31, + __REQ_F_LAST_BIT = 32, +}; + +enum io_uring_op { + IORING_OP_NOP = 0, + IORING_OP_READV = 1, + IORING_OP_WRITEV = 2, + IORING_OP_FSYNC = 3, + IORING_OP_READ_FIXED = 4, + IORING_OP_WRITE_FIXED = 5, + IORING_OP_POLL_ADD = 6, + IORING_OP_POLL_REMOVE = 7, + IORING_OP_SYNC_FILE_RANGE = 8, + IORING_OP_SENDMSG = 9, + IORING_OP_RECVMSG = 10, + IORING_OP_TIMEOUT = 11, + IORING_OP_TIMEOUT_REMOVE = 12, + IORING_OP_ACCEPT = 13, + IORING_OP_ASYNC_CANCEL = 14, + IORING_OP_LINK_TIMEOUT = 15, + IORING_OP_CONNECT = 16, + IORING_OP_FALLOCATE = 17, + IORING_OP_OPENAT = 18, + IORING_OP_CLOSE = 19, + IORING_OP_FILES_UPDATE = 20, + IORING_OP_STATX = 21, + IORING_OP_READ = 22, + IORING_OP_WRITE = 23, + IORING_OP_FADVISE = 24, + IORING_OP_MADVISE = 25, + IORING_OP_SEND = 26, + IORING_OP_RECV = 27, + IORING_OP_OPENAT2 = 28, + IORING_OP_EPOLL_CTL = 29, + IORING_OP_SPLICE = 30, + IORING_OP_PROVIDE_BUFFERS = 31, + IORING_OP_REMOVE_BUFFERS = 32, + IORING_OP_TEE = 33, + IORING_OP_SHUTDOWN = 34, + IORING_OP_RENAMEAT = 35, + IORING_OP_UNLINKAT = 36, + IORING_OP_MKDIRAT = 37, + IORING_OP_SYMLINKAT = 38, + IORING_OP_LINKAT = 39, + IORING_OP_MSG_RING = 40, + IORING_OP_FSETXATTR = 41, + IORING_OP_SETXATTR = 42, + IORING_OP_FGETXATTR = 43, + IORING_OP_GETXATTR = 44, + IORING_OP_SOCKET = 45, + IORING_OP_URING_CMD = 46, + IORING_OP_SEND_ZC = 47, + IORING_OP_SENDMSG_ZC = 48, + IORING_OP_READ_MULTISHOT = 49, + IORING_OP_WAITID = 50, + IORING_OP_FUTEX_WAIT = 51, + IORING_OP_FUTEX_WAKE = 52, + IORING_OP_FUTEX_WAITV = 53, + IORING_OP_LAST = 54, +}; + +enum { + IOSQE_FIXED_FILE_BIT = 0, + IOSQE_IO_DRAIN_BIT = 1, + IOSQE_IO_LINK_BIT = 2, + IOSQE_IO_HARDLINK_BIT = 3, + IOSQE_ASYNC_BIT = 4, + IOSQE_BUFFER_SELECT_BIT = 5, + IOSQE_CQE_SKIP_SUCCESS_BIT = 6, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK = 0, + IO_WQ_CANCEL_RUNNING = 1, + IO_WQ_CANCEL_NOTFOUND = 2, +}; + +enum { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + IORING_REGISTER_PBUF_RING = 22, + IORING_UNREGISTER_PBUF_RING = 23, + IORING_REGISTER_SYNC_CANCEL = 24, + IORING_REGISTER_FILE_ALLOC_RANGE = 25, + IORING_REGISTER_LAST = 26, + IORING_REGISTER_USE_REGISTERED_RING = 2147483648, +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + +enum { + IORING_RESTRICTION_REGISTER_OP = 0, + IORING_RESTRICTION_SQE_OP = 1, + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + IORING_RESTRICTION_LAST = 4, +}; + +struct trace_event_raw_io_uring_create { + struct trace_entry ent; + int fd; + void *ctx; + u32 sq_entries; + u32 cq_entries; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_register { + struct trace_entry ent; + void *ctx; + unsigned int opcode; + unsigned int nr_files; + unsigned int nr_bufs; + long ret; + char __data[0]; +}; + +struct trace_event_raw_io_uring_file_get { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int fd; + char __data[0]; +}; + +struct trace_event_raw_io_uring_queue_async_work { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + u8 opcode; + unsigned int flags; + struct io_wq_work *work; + int rw; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_defer { + struct trace_entry ent; + void *ctx; + void *req; + unsigned long long data; + u8 opcode; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_link { + struct trace_entry ent; + void *ctx; + void *req; + void *target_req; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqring_wait { + struct trace_entry ent; + void *ctx; + int min_events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_fail_link { + struct trace_entry ent; + void *ctx; + void *req; + unsigned long long user_data; + u8 opcode; + void *link; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_complete { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int res; + unsigned int cflags; + u64 extra1; + u64 extra2; + char __data[0]; +}; + +struct trace_event_raw_io_uring_submit_req { + struct trace_entry ent; + void *ctx; + void *req; + unsigned long long user_data; + u8 opcode; + u32 flags; + bool sq_thread; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_arm { + struct trace_entry ent; + void *ctx; + void *req; + unsigned long long user_data; + u8 opcode; + int mask; + int events; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_add { + struct trace_entry ent; + void *ctx; + void *req; + unsigned long long user_data; + u8 opcode; + int mask; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_req_failed { + struct trace_entry ent; + void *ctx; + void *req; + unsigned long long user_data; + u8 opcode; + u8 flags; + u8 ioprio; + u64 off; + u64 addr; + u32 len; + u32 op_flags; + u16 buf_index; + u16 personality; + u32 file_index; + u64 pad1; + u64 addr3; + int error; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqe_overflow { + struct trace_entry ent; + void *ctx; + unsigned long long user_data; + s32 res; + u32 cflags; + void *ocqe; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_work_run { + struct trace_entry ent; + void *tctx; + unsigned int count; + unsigned int loops; + char __data[0]; +}; + +struct trace_event_raw_io_uring_short_write { + struct trace_entry ent; + void *ctx; + u64 fpos; + u64 wanted; + u64 got; + char __data[0]; +}; + +struct trace_event_raw_io_uring_local_work_run { + struct trace_entry ent; + void *ctx; + int count; + unsigned int loops; + char __data[0]; +}; + +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; +}; + +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct io_ring_ctx *ctx; +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned int cq_tail; + unsigned int nr_timeouts; + ktime_t timeout; +}; + +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; + +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 user_addr; +}; + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 user_addr; +}; + +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; + +struct trace_event_data_offsets_io_uring_queue_async_work { + u32 op_str; +}; + +struct trace_event_data_offsets_io_uring_defer { + u32 op_str; +}; + +struct trace_event_data_offsets_io_uring_fail_link { + u32 op_str; +}; + +struct trace_event_data_offsets_io_uring_submit_req { + u32 op_str; +}; + +struct trace_event_data_offsets_io_uring_poll_arm { + u32 op_str; +}; + +struct trace_event_data_offsets_io_uring_task_add { + u32 op_str; +}; + +struct trace_event_data_offsets_io_uring_req_failed { + u32 op_str; +}; + +struct io_cold_def { + unsigned short async_size; + const char *name; + int (*prep_async)(struct io_kiocb *); + void (*cleanup)(struct io_kiocb *); + void (*fail)(struct io_kiocb *); +}; + +struct io_issue_def { + unsigned int needs_file : 1; + unsigned int plug : 1; + unsigned int hash_reg_file : 1; + unsigned int unbound_nonreg_file : 1; + unsigned int pollin : 1; + unsigned int pollout : 1; + unsigned int poll_exclusive : 1; + unsigned int buffer_select : 1; + unsigned int not_supported : 1; + unsigned int audit_skip : 1; + unsigned int ioprio : 1; + unsigned int iopoll : 1; + unsigned int iopoll_queue : 1; + unsigned int manual_alloc : 1; + unsigned int vectored : 1; + int (*issue)(struct io_kiocb *, unsigned int); + int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); +}; + +typedef bool work_cancel_fn(struct io_wq_work *, void *); + +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad; + __u64 ts; +}; + +struct io_uring_file_index_range { + __u32 off; + __u32 len; + __u64 resv; +}; + +struct trace_event_data_offsets_io_uring_create {}; + +struct trace_event_data_offsets_io_uring_register {}; + +struct trace_event_data_offsets_io_uring_file_get {}; + +struct trace_event_data_offsets_io_uring_link {}; + +struct trace_event_data_offsets_io_uring_cqring_wait {}; + +struct trace_event_data_offsets_io_uring_complete {}; + +struct trace_event_data_offsets_io_uring_cqe_overflow {}; + +struct trace_event_data_offsets_io_uring_task_work_run {}; + +struct trace_event_data_offsets_io_uring_short_write {}; + +struct trace_event_data_offsets_io_uring_local_work_run {}; + +struct io_task_cancel { + struct task_struct *task; + bool all; +}; + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; + +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; + __u8 sqe_op; + __u8 sqe_flags; + }; + __u8 resv; + __u32 resv2[3]; +}; + +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; + +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, +}; + +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, +}; + +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, +}; + +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST = 3, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS = 4, +}; + +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, +}; + +enum ethtool_link_ext_substate_module { + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, +}; + +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, + ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, + ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, + ETHTOOL_LINK_EXT_STATE_MODULE = 10, +}; + +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + enum ethtool_link_ext_substate_module module; + u32 __link_ext_substate; + }; +}; + +struct ethtool_link_ext_stats { + u64 link_down_events; +}; + +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; + u32 tx_aggr_max_bytes; + u32 tx_aggr_max_frames; + u32 tx_aggr_time_usecs; +}; + +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +struct kernel_ethtool_ringparam { + u32 rx_buf_len; + u8 tcp_data_split; + u8 tx_push; + u8 rx_push; + u32 cqe_size; + u32 tx_push_buf_len; + u32 tx_push_buf_max_len; +}; + +enum ethtool_mac_stats_src { + ETHTOOL_MAC_STATS_SRC_AGGREGATE = 0, + ETHTOOL_MAC_STATS_SRC_EMAC = 1, + ETHTOOL_MAC_STATS_SRC_PMAC = 2, +}; + +struct ethtool_pause_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + }; + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + } stats; + }; +}; + +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; + +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; + +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; +}; + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 rate_matching; + __u32 reserved[7]; + __u32 link_mode_masks[0]; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + unsigned long supported[2]; + unsigned long advertising[2]; + unsigned long lp_advertising[2]; + } link_modes; + u32 lanes; +}; + +struct ethtool_fec_stat { + u64 total; + u64 lanes[8]; +}; + +struct ethtool_fec_stats { + struct ethtool_fec_stat corrected_blocks; + struct ethtool_fec_stat uncorrectable_blocks; + struct ethtool_fec_stat corrected_bits; +}; + +struct ethtool_fecparam { + __u32 cmd; + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +struct ethtool_module_eeprom { + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 *data; +}; + +struct ethtool_eth_phy_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 SymbolErrorDuringCarrier; + }; + struct { + u64 SymbolErrorDuringCarrier; + } stats; + }; +}; + +struct ethtool_eth_mac_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + }; + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + } stats; + }; +}; + +struct ethtool_eth_ctrl_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + }; + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + } stats; + }; +}; + +struct ethtool_rmon_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + }; + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + } stats; + }; +}; + +struct ethtool_rmon_hist_range { + u16 low; + u16 high; +}; + +enum ethtool_module_power_mode_policy { + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO = 2, +}; + +enum ethtool_module_power_mode { + ETHTOOL_MODULE_POWER_MODE_LOW = 1, + ETHTOOL_MODULE_POWER_MODE_HIGH = 2, +}; + +struct ethtool_module_power_mode_params { + enum ethtool_module_power_mode_policy policy; + enum ethtool_module_power_mode mode; +}; + +enum ethtool_mm_verify_status { + ETHTOOL_MM_VERIFY_STATUS_UNKNOWN = 0, + ETHTOOL_MM_VERIFY_STATUS_INITIAL = 1, + ETHTOOL_MM_VERIFY_STATUS_VERIFYING = 2, + ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED = 3, + ETHTOOL_MM_VERIFY_STATUS_FAILED = 4, + ETHTOOL_MM_VERIFY_STATUS_DISABLED = 5, +}; + +struct ethtool_mm_state { + u32 verify_time; + u32 max_verify_time; + enum ethtool_mm_verify_status verify_status; + bool tx_enabled; + bool tx_active; + bool pmac_enabled; + bool verify_enabled; + u32 tx_min_frag_size; + u32 rx_min_frag_size; +}; + +struct ethtool_mm_cfg { + u32 verify_time; + bool verify_enabled; + bool tx_enabled; + bool pmac_enabled; + u32 tx_min_frag_size; +}; + +struct ethtool_mm_stats { + u64 MACMergeFrameAssErrorCount; + u64 MACMergeFrameSmdErrorCount; + u64 MACMergeFrameAssOkCount; + u64 MACMergeFragCountRx; + u64 MACMergeFragCountTx; + u64 MACMergeHoldCount; +}; + +struct io_xattr { + struct file *file; + struct xattr_ctx ctx; + struct filename *filename; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_mkdir { + struct file *file; + int dfd; + umode_t mode; + struct filename *filename; +}; + +struct io_link { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_splice { + struct file *file_out; + loff_t off_out; + loff_t off_in; + u64 len; + int splice_fd_in; + unsigned int flags; +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +struct io_madvise { + struct file *file; + u64 addr; + u32 len; + u32 advice; +}; + +struct io_fadvise { + struct file *file; + u64 offset; + u32 len; + u32 advice; +}; + +struct io_open { + struct file *file; + int dfd; + u32 file_slot; + struct filename *filename; + struct open_how how; + unsigned long nofile; +}; + +struct io_close { + struct file *file; + int fd; + u32 file_slot; +}; + +enum { + SOCKET_URING_OP_SIOCINQ = 0, + SOCKET_URING_OP_SIOCOUTQ = 1, + SOCKET_URING_OP_GETSOCKOPT = 2, + SOCKET_URING_OP_SETSOCKOPT = 3, +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + struct filename *filename; + struct statx __attribute__((btf_type_tag("user"))) * buffer; +}; + +enum { + SKBFL_ZEROCOPY_ENABLE = 1, + SKBFL_SHARED_FRAG = 2, + SKBFL_PURE_ZEROCOPY = 4, + SKBFL_DONT_ORPHAN = 8, + SKBFL_MANAGED_FRAG_REFS = 16, +}; + +struct io_shutdown { + struct file *file; + int how; +}; + +struct compat_msghdr; + +struct user_msghdr; + +struct io_sr_msg { + struct file *file; + union { + struct compat_msghdr __attribute__((btf_type_tag("user"))) * umsg_compat; + struct user_msghdr __attribute__((btf_type_tag("user"))) * umsg; + void __attribute__((btf_type_tag("user"))) * buf; + }; + unsigned int len; + unsigned int done_io; + unsigned int msg_flags; + u16 flags; + u16 addr_len; + u16 buf_group; + void __attribute__((btf_type_tag("user"))) * addr; + void __attribute__((btf_type_tag("user"))) * msg_control; + struct io_kiocb *notif; +}; + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct user_msghdr { + void __attribute__((btf_type_tag("user"))) * msg_name; + int msg_namelen; + struct iovec __attribute__((btf_type_tag("user"))) * msg_iov; + __kernel_size_t msg_iovlen; + void __attribute__((btf_type_tag("user"))) * msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + +struct io_accept { + struct file *file; + struct sockaddr __attribute__((btf_type_tag("user"))) * addr; + int __attribute__((btf_type_tag("user"))) * addr_len; + int flags; + u32 file_slot; + unsigned long nofile; +}; + +struct io_socket { + struct file *file; + int domain; + int type; + int protocol; + int flags; + u32 file_slot; + unsigned long nofile; +}; + +struct io_connect { + struct file *file; + struct sockaddr __attribute__((btf_type_tag("user"))) * addr; + int addr_len; + bool in_progress; + bool seen_econnaborted; +}; + +struct io_async_msghdr { + union { + struct iovec fast_iov[8]; + struct { + struct iovec fast_iov_one; + __kernel_size_t controllen; + int namelen; + __kernel_size_t payloadlen; + }; + struct io_cache_entry cache; + }; + struct iovec *free_iov; + struct sockaddr __attribute__((btf_type_tag("user"))) * uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_notif_data { + struct file *file; + struct ubuf_info uarg; + unsigned long account_pages; + bool zc_report; + bool zc_used; + bool zc_copied; +}; + +struct io_uring_recvmsg_out { + __u32 namelen; + __u32 controllen; + __u32 payloadlen; + __u32 flags; +}; + +struct io_recvmsg_multishot_hdr { + struct io_uring_recvmsg_out msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_async_connect { + struct __kernel_sockaddr_storage address; +}; + +enum { + IORING_MSG_DATA = 0, + IORING_MSG_SEND_FD = 1, +}; + +struct io_msg { + struct file *file; + struct file *src_file; + struct callback_head tw; + u64 user_data; + u32 len; + u32 cmd; + u32 src_fd; + union { + u32 dst_fd; + u32 cqe_flags; + }; + u32 flags; +}; + +struct io_timeout { + struct file *file; + u32 off; + u32 target_seq; + u32 repeats; + struct list_head list; + struct io_kiocb *head; + struct io_kiocb *prev; +}; + +struct io_timeout_rem { + struct file *file; + u64 addr; + struct timespec64 ts; + u32 flags; + bool ltimeout; +}; + +struct io_timeout_data { + struct io_kiocb *req; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; + u32 flags; +}; + +struct io_cancel_data { + struct io_ring_ctx *ctx; + union { + u64 data; + struct file *file; + }; + u8 opcode; + u32 flags; + int seq; +}; + +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK = 1, +}; + +typedef void io_wq_work_fn(struct io_wq_work *); + +typedef struct io_wq_work *free_work_fn(struct io_wq_work *); + +struct io_wq_data { + struct io_wq_hash *hash; + struct task_struct *task; + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __u64 data; +}; + +enum { + IOU_POLL_DONE = 0, + IOU_POLL_NO_ACTION = 1, + IOU_POLL_REMOVE_POLL_USE_RES = 2, + IOU_POLL_REISSUE = 3, +}; + +struct io_poll_update { + struct file *file; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int nr_entries; + int error; + bool owning; + __poll_t result_mask; +}; + +struct io_cancel { + struct file *file; + u64 addr; + u32 flags; + s32 fd; + u8 opcode; +}; + +struct io_uring_sync_cancel_reg { + __u64 addr; + __s32 fd; + __u32 flags; + struct __kernel_timespec timeout; + __u8 opcode; + __u8 pad[7]; + __u64 pad2[3]; +}; + +enum { + IOU_PBUF_RING_MMAP = 1, +}; + +struct io_provide_buf { + struct file *file; + __u64 addr; + __u32 len; + __u32 bgid; + __u32 nbufs; + __u16 bid; +}; + +struct io_buf_free { + struct hlist_node list; + void *mem; + size_t size; + int inuse; +}; + +struct io_uring_buf_reg { + __u64 ring_addr; + __u32 ring_entries; + __u16 bgid; + __u16 flags; + __u64 resv[3]; +}; + +struct io_rsrc_update { + struct file *file; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct scm_fp_list; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + u32 consumed; +}; + +struct scm_fp_list { + short count; + short max; + struct user_struct *user; + struct file *fp[253]; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __u64 data; + __u64 tags; + __u32 nr; + __u32 resv2; +}; + +struct io_uring_rsrc_register { + __u32 nr; + __u32 flags; + __u64 resv2; + __u64 data; + __u64 tags; +}; + +struct io_rw { + struct kiocb kiocb; + u64 addr; + u32 len; + rwf_t flags; +}; + +struct iov_iter_state { + size_t iov_offset; + size_t count; + unsigned long nr_segs; +}; + +struct io_rw_state { + struct iov_iter iter; + struct iov_iter_state iter_state; + struct iovec fast_iov[8]; +}; + +struct io_async_rw { + struct io_rw_state s; + const struct iovec *free_iovec; + size_t bytes_done; + struct wait_page_queue wpq; +}; + +struct io_waitid { + struct file *file; + int which; + pid_t upid; + int options; + atomic_t refs; + struct wait_queue_head *head; + struct siginfo __attribute__((btf_type_tag("user"))) * infop; + struct waitid_info info; +}; + +struct io_waitid_async { + struct io_kiocb *req; + struct wait_opts wo; +}; + +enum { + IO_WQ_BIT_EXIT = 0, +}; + +enum { + IO_WORKER_F_UP = 1, + IO_WORKER_F_RUNNING = 2, + IO_WORKER_F_FREE = 4, + IO_WORKER_F_BOUND = 8, +}; + +enum { + IO_ACCT_STALLED_BIT = 0, +}; + +enum { + IO_WQ_ACCT_BOUND = 0, + IO_WQ_ACCT_UNBOUND = 1, + IO_WQ_ACCT_NR = 2, +}; + +struct io_wq_acct { + unsigned int nr_workers; + unsigned int max_workers; + int index; + atomic_t nr_running; + raw_spinlock_t lock; + struct io_wq_work_list work_list; + unsigned long flags; +}; + +struct io_wq { + unsigned long state; + free_work_fn *free_work; + io_wq_work_fn *do_work; + struct io_wq_hash *hash; + atomic_t worker_refs; + struct completion worker_done; + struct hlist_node cpuhp_node; + struct task_struct *task; + struct io_wq_acct acct[2]; + raw_spinlock_t lock; + struct hlist_nulls_head free_list; + struct list_head all_list; + struct wait_queue_entry wait; + struct io_wq_work *hash_tail[64]; + cpumask_var_t cpu_mask; +}; + +struct io_worker { + refcount_t ref; + unsigned int flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wq *wq; + struct io_wq_work *cur_work; + struct io_wq_work *next_work; + raw_spinlock_t lock; + struct completion ref_done; + unsigned long create_state; + struct callback_head create_work; + int create_index; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +struct online_data { + unsigned int cpu; + bool online; +}; + +struct io_futex { + struct file *file; + union { + u32 __attribute__((btf_type_tag("user"))) * uaddr; + struct futex_waitv __attribute__((btf_type_tag("user"))) * uwaitv; + }; + unsigned long futex_val; + unsigned long futex_mask; + unsigned long futexv_owned; + u32 futex_flags; + unsigned int futex_nr; + bool futexv_unqueued; +}; + +struct io_futex_data { + union { + struct futex_q q; + struct io_cache_entry cache; + }; + struct io_kiocb *req; +}; + +struct wrapper { + cmp_func_t cmp; + swap_func_t swap; +}; + +enum { + MAX_OPT_ARGS = 3, +}; + +typedef void sg_free_fn(struct scatterlist *, unsigned int); + +typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); + +struct sg_append_table { + struct sg_table sgt; + struct scatterlist *prv; + unsigned int total_nents; +}; + +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + unsigned int __nents; + int __pg_advance; +}; + +struct sg_mapping_iter { + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +typedef size_t (*iov_ustep_f)(void __attribute__((btf_type_tag("user"))) *, size_t, + size_t, void *, void *); + +typedef size_t (*iov_step_f)(void *, size_t, size_t, void *, void *); + +union nested_table { + union nested_table __attribute__((btf_type_tag("rcu"))) * table; + struct rhash_lock_head __attribute__((btf_type_tag("rcu"))) * bucket; +}; + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +struct once_work { + struct work_struct work; + struct static_key_true *key; + struct module *module; +}; + +struct genradix_node { + union { + struct genradix_node *children[512]; + u8 data[4096]; + }; +}; + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +struct region { + unsigned int start; + unsigned int off; + unsigned int group_len; + unsigned int end; + unsigned int nbits; +}; + +typedef __kernel_long_t __kernel_ptrdiff_t; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +struct strarray { + char **array; + size_t n; +}; + +struct reciprocal_value_adv { + u32 m; + u8 sh; + u8 exp; + bool is_wide_m; +}; + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +typedef struct { + __le64 b; + __le64 a; +} le128; + +enum blake2s_iv { + BLAKE2S_IV0 = 1779033703, + BLAKE2S_IV1 = 3144134277, + BLAKE2S_IV2 = 1013904242, + BLAKE2S_IV3 = 2773480762, + BLAKE2S_IV4 = 1359893119, + BLAKE2S_IV5 = 2600822924, + BLAKE2S_IV6 = 528734635, + BLAKE2S_IV7 = 1541459225, +}; + +typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); + +typedef int mpi_size_t; + +typedef mpi_limb_t *mpi_ptr_t; + +typedef mpi_limb_t UWtype; + +typedef unsigned int UHWtype; + +struct mpi_ec_ctx; + +struct field_table { + const char *p; + void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); + void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); +}; + +struct barrett_ctx_s; + +typedef struct barrett_ctx_s *mpi_barrett_t; + +enum gcry_mpi_ec_models { + MPI_EC_WEIERSTRASS = 0, + MPI_EC_MONTGOMERY = 1, + MPI_EC_EDWARDS = 2, +}; + +enum ecc_dialects { + ECC_DIALECT_STANDARD = 0, + ECC_DIALECT_ED25519 = 1, + ECC_DIALECT_SAFECURVE = 2, +}; + +struct gcry_mpi_point; + +typedef struct gcry_mpi_point *MPI_POINT; + +struct mpi_ec_ctx { + enum gcry_mpi_ec_models model; + enum ecc_dialects dialect; + int flags; + unsigned int nbits; + MPI p; + MPI a; + MPI b; + MPI_POINT G; + MPI n; + unsigned int h; + MPI_POINT Q; + MPI d; + const char *name; + struct { + struct { + unsigned int a_is_pminus3 : 1; + unsigned int two_inv_p : 1; + } valid; + int a_is_pminus3; + MPI two_inv_p; + mpi_barrett_t p_barrett; + MPI scratch[11]; + } t; + void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); + void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); +}; + +struct gcry_mpi_point { + MPI x; + MPI y; + MPI z; +}; + +enum gcry_mpi_constants { + MPI_C_ZERO = 0, + MPI_C_ONE = 1, + MPI_C_TWO = 2, + MPI_C_THREE = 3, + MPI_C_FOUR = 4, + MPI_C_EIGHT = 5, +}; + +enum gcry_mpi_format { + GCRYMPI_FMT_NONE = 0, + GCRYMPI_FMT_STD = 1, + GCRYMPI_FMT_PGP = 2, + GCRYMPI_FMT_SSH = 3, + GCRYMPI_FMT_HEX = 4, + GCRYMPI_FMT_USG = 5, + GCRYMPI_FMT_OPAQUE = 8, +}; + +struct barrett_ctx_s { + MPI m; + int m_copied; + int k; + MPI y; + MPI r1; + MPI r2; + MPI r3; +}; + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + mpi_size_t tspace_size; + mpi_ptr_t tp; + mpi_size_t tp_size; +}; + +typedef long mpi_limb_signed_t; + +enum devm_ioremap_type { + DEVM_IOREMAP = 0, + DEVM_IOREMAP_UC = 1, + DEVM_IOREMAP_WC = 2, + DEVM_IOREMAP_NP = 3, +}; + +enum { + PCI_STD_RESOURCES = 0, + PCI_STD_RESOURCE_END = 5, + PCI_ROM_RESOURCE = 6, + PCI_IOV_RESOURCES = 7, + PCI_IOV_RESOURCE_END = 12, + PCI_BRIDGE_RESOURCES = 13, + PCI_BRIDGE_RESOURCE_END = 16, + PCI_NUM_RESOURCES = 17, + DEVICE_COUNT_RESOURCE = 17, +}; + +struct pcim_iomap_devres { + void *table[6]; +}; + +struct arch_io_reserve_memtype_wc_devres { + resource_size_t start; + resource_size_t size; +}; + +struct interval_tree_span_iter { + struct interval_tree_node *nodes[2]; + unsigned long first_index; + unsigned long last_index; + union { + unsigned long start_hole; + unsigned long start_used; + }; + union { + unsigned long last_hole; + unsigned long last_used; + }; + int is_hole; +}; + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty = 0, + assoc_array_walk_found_terminal_node = 1, + assoc_array_walk_found_wrong_shortcut = 2, +}; + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + unsigned long sc_segments; + unsigned long dissimilarity; + } wrong_shortcut; +}; + +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +struct gen_pool_chunk { + struct list_head next_chunk; + atomic_long_t avail; + phys_addr_t phys_addr; + void *owner; + unsigned long start_addr; + unsigned long end_addr; + unsigned long bits[0]; +}; + +struct genpool_data_align { + int align; +}; + +struct genpool_data_fixed { + unsigned long offset; +}; + +typedef enum { + HEAD = 0, + FLAGS = 1, + TIME = 2, + OS = 3, + EXLEN = 4, + EXTRA = 5, + NAME = 6, + COMMENT = 7, + HCRC = 8, + DICTID = 9, + DICT = 10, + TYPE = 11, + TYPEDO = 12, + STORED = 13, + COPY = 14, + TABLE = 15, + LENLENS = 16, + CODELENS = 17, + LEN = 18, + LENEXT = 19, + DIST = 20, + DISTEXT = 21, + MATCH = 22, + LIT = 23, + CHECK = 24, + LENGTH = 25, + DONE = 26, + BAD = 27, + MEM = 28, + SYNC = 29, +} inflate_mode; + +typedef struct { + unsigned char op; + unsigned char bits; + unsigned short val; +} code; + +struct inflate_state { + inflate_mode mode; + int last; + int wrap; + int havedict; + int flags; + unsigned int dmax; + unsigned long check; + unsigned long total; + unsigned int wbits; + unsigned int wsize; + unsigned int whave; + unsigned int write; + unsigned char *window; + unsigned long hold; + unsigned int bits; + unsigned int length; + unsigned int offset; + unsigned int extra; + const code *lencode; + const code *distcode; + unsigned int lenbits; + unsigned int distbits; + unsigned int ncode; + unsigned int nlen; + unsigned int ndist; + unsigned int have; + code *next; + unsigned short lens[320]; + unsigned short work[288]; + code codes[2048]; +}; + +struct inflate_workspace { + struct inflate_state inflate_state; + unsigned char working_window[32768]; +}; + +typedef enum { + CODES = 0, + LENS = 1, + DISTS = 2, +} codetype; + +typedef unsigned int uInt; + +typedef unsigned short ush; + +typedef enum { + need_more = 0, + block_done = 1, + finish_started = 2, + finish_done = 3, +} block_state; + +struct deflate_state; + +typedef struct deflate_state deflate_state; + +typedef block_state (*compress_func)(deflate_state *, int); + +struct config_s { + ush good_length; + ush max_lazy; + ush nice_length; + ush max_chain; + compress_func func; +}; + +typedef struct config_s config; + +typedef unsigned long ulg; + +typedef ush Pos; + +typedef unsigned int IPos; + +struct ct_data_s { + union { + ush freq; + ush code; + } fc; + union { + ush dad; + ush len; + } dl; +}; + +typedef struct ct_data_s ct_data; + +struct static_tree_desc_s; + +typedef struct static_tree_desc_s static_tree_desc; + +struct tree_desc_s { + ct_data *dyn_tree; + int max_code; + static_tree_desc *stat_desc; +}; + +typedef unsigned char uch; + +struct deflate_state { + z_streamp strm; + int status; + Byte *pending_buf; + ulg pending_buf_size; + Byte *pending_out; + int pending; + int noheader; + Byte data_type; + Byte method; + int last_flush; + uInt w_size; + uInt w_bits; + uInt w_mask; + Byte *window; + ulg window_size; + Pos *prev; + Pos *head; + uInt ins_h; + uInt hash_size; + uInt hash_bits; + uInt hash_mask; + uInt hash_shift; + long block_start; + uInt match_length; + IPos prev_match; + int match_available; + uInt strstart; + uInt match_start; + uInt lookahead; + uInt prev_length; + uInt max_chain_length; + uInt max_lazy_match; + int level; + int strategy; + uInt good_match; + int nice_match; + struct ct_data_s dyn_ltree[573]; + struct ct_data_s dyn_dtree[61]; + struct ct_data_s bl_tree[39]; + struct tree_desc_s l_desc; + struct tree_desc_s d_desc; + struct tree_desc_s bl_desc; + ush bl_count[16]; + int heap[573]; + int heap_len; + int heap_max; + uch depth[573]; + uch *l_buf; + uInt lit_bufsize; + uInt last_lit; + ush *d_buf; + ulg opt_len; + ulg static_len; + ulg compressed_len; + uInt matches; + int last_eob_len; + ush bi_buf; + int bi_valid; +}; + +struct static_tree_desc_s { + const ct_data *static_tree; + const int *extra_bits; + int extra_base; + int elems; + int max_length; +}; + +struct deflate_workspace { + deflate_state deflate_memory; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; +}; + +typedef struct deflate_workspace deflate_workspace; + +typedef struct tree_desc_s tree_desc; + +typedef enum { + ZSTD_reset_session_only = 1, + ZSTD_reset_parameters = 2, + ZSTD_reset_session_and_parameters = 3, +} ZSTD_ResetDirective; + +typedef enum { + ZSTD_c_compressionLevel = 100, + ZSTD_c_windowLog = 101, + ZSTD_c_hashLog = 102, + ZSTD_c_chainLog = 103, + ZSTD_c_searchLog = 104, + ZSTD_c_minMatch = 105, + ZSTD_c_targetLength = 106, + ZSTD_c_strategy = 107, + ZSTD_c_enableLongDistanceMatching = 160, + ZSTD_c_ldmHashLog = 161, + ZSTD_c_ldmMinMatch = 162, + ZSTD_c_ldmBucketSizeLog = 163, + ZSTD_c_ldmHashRateLog = 164, + ZSTD_c_contentSizeFlag = 200, + ZSTD_c_checksumFlag = 201, + ZSTD_c_dictIDFlag = 202, + ZSTD_c_nbWorkers = 400, + ZSTD_c_jobSize = 401, + ZSTD_c_overlapLog = 402, + ZSTD_c_experimentalParam1 = 500, + ZSTD_c_experimentalParam2 = 10, + ZSTD_c_experimentalParam3 = 1000, + ZSTD_c_experimentalParam4 = 1001, + ZSTD_c_experimentalParam5 = 1002, + ZSTD_c_experimentalParam6 = 1003, + ZSTD_c_experimentalParam7 = 1004, + ZSTD_c_experimentalParam8 = 1005, + ZSTD_c_experimentalParam9 = 1006, + ZSTD_c_experimentalParam10 = 1007, + ZSTD_c_experimentalParam11 = 1008, + ZSTD_c_experimentalParam12 = 1009, + ZSTD_c_experimentalParam13 = 1010, + ZSTD_c_experimentalParam14 = 1011, + ZSTD_c_experimentalParam15 = 1012, +} ZSTD_cParameter; + +typedef ZSTD_CCtx ZSTD_CStream; + +typedef ZSTD_CStream zstd_cstream; + +typedef ZSTD_outBuffer zstd_out_buffer; + +typedef ZSTD_inBuffer zstd_in_buffer; + +struct seqDef_s { + U32 offBase; + U16 litLength; + U16 mlBase; +}; + +struct ZSTD_CDict_s { + const void *dictContent; + size_t dictContentSize; + ZSTD_dictContentType_e dictContentType; + U32 *entropyWorkspace; + ZSTD_cwksp workspace; + ZSTD_matchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_customMem customMem; + U32 dictID; + int compressionLevel; + ZSTD_paramSwitch_e useRowMatchFinder; +}; + +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; + +typedef struct { + size_t bitContainer; + unsigned int bitPos; + char *startPtr; + char *ptr; + char *endPtr; +} BIT_CStream_t; + +typedef struct { + ptrdiff_t value; + const void *stateTable; + const void *symbolTT; + unsigned int stateLog; +} FSE_CState_t; + +typedef enum { + trustInput = 0, + checkMaxSymbolValue = 1, +} HIST_checkInput_e; + +typedef s16 int16_t; + +typedef int16_t S16; + +typedef struct { + FSE_CTable CTable[59]; + U32 scratchBuffer[41]; + unsigned int count[13]; + S16 norm[13]; +} HUF_CompressWeightsWksp; + +typedef struct { + HUF_CompressWeightsWksp wksp; + BYTE bitsToWeight[13]; + BYTE huffWeight[255]; +} HUF_WriteCTableWksp; + +struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +}; + +typedef struct nodeElt_s nodeElt; + +typedef nodeElt huffNodeTable[512]; + +typedef struct { + U16 base; + U16 curr; +} rankPos; + +typedef struct { + huffNodeTable huffNodeTbl; + rankPos rankPosition[192]; +} HUF_buildCTable_wksp_tables; + +typedef struct { + unsigned int count[256]; + HUF_CElt CTable[257]; + union { + HUF_buildCTable_wksp_tables buildCTable_wksp; + HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[1024]; + } wksps; +} HUF_compress_tables_t; + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + BYTE *startPtr; + BYTE *ptr; + BYTE *endPtr; +} HUF_CStream_t; + +typedef enum { + HUF_singleStream = 0, + HUF_fourStreams = 1, +} HUF_nbStreams_e; + +typedef size_t (*ZSTD_blockCompressor)(ZSTD_matchState_t *, seqStore_t *, U32 *, + const void *, size_t); + +typedef uint8_t U8; + +enum { + ZSTDbss_compress = 0, + ZSTDbss_noCompress = 1, +}; + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +typedef struct { + size_t error; + int lowerBound; + int upperBound; +} ZSTD_bounds; + +typedef enum { + ZSTD_cpm_noAttachDict = 0, + ZSTD_cpm_attachDict = 1, + ZSTD_cpm_createCDict = 2, + ZSTD_cpm_unknown = 3, +} ZSTD_cParamMode_e; + +typedef enum { + ZSTD_e_continue = 0, + ZSTD_e_flush = 1, + ZSTD_e_end = 2, +} ZSTD_EndDirective; + +typedef struct { + U32 LLtype; + U32 Offtype; + U32 MLtype; + size_t size; + size_t lastCountSize; +} ZSTD_symbolEncodingTypeStats_t; + +typedef struct { + U32 *splitLocations; + size_t idx; +} seqStoreSplits; + +struct repcodes_s { + U32 rep[3]; +}; + +typedef struct repcodes_s repcodes_t; + +typedef enum { + ZSTD_dtlm_fast = 0, + ZSTD_dtlm_full = 1, +} ZSTD_dictTableLoadMethod_e; + +typedef enum { + ZSTD_dlm_byCopy = 0, + ZSTD_dlm_byRef = 1, +} ZSTD_dictLoadMethod_e; + +typedef struct { + U32 idx; + U32 posInSequence; + size_t posInSrc; +} ZSTD_sequencePosition; + +typedef size_t (*ZSTD_sequenceCopier)(ZSTD_CCtx *, ZSTD_sequencePosition *, + const ZSTD_Sequence *const, size_t, const void *, + size_t); + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1, +} ZSTD_defaultPolicy_e; + +typedef enum { + ZSTD_noDict = 0, + ZSTD_extDict = 1, + ZSTD_dictMatchState = 2, + ZSTD_dedicatedDictSearch = 3, +} ZSTD_dictMode_e; + +typedef enum { + ZSTD_no_overlap = 0, + ZSTD_overlap_src_before_dst = 1, +} ZSTD_overlap_e; + +typedef struct { + unsigned long long ingested; + unsigned long long consumed; + unsigned long long produced; + unsigned long long flushed; + unsigned int currentJobID; + unsigned int nbActiveWorkers; +} ZSTD_frameProgression; + +typedef enum { + ZSTDcrp_makeClean = 0, + ZSTDcrp_leaveDirty = 1, +} ZSTD_compResetPolicy_e; + +typedef enum { + ZSTDirp_continue = 0, + ZSTDirp_reset = 1, +} ZSTD_indexResetPolicy_e; + +typedef enum { + ZSTD_resetTarget_CDict = 0, + ZSTD_resetTarget_CCtx = 1, +} ZSTD_resetTarget_e; + +typedef struct { + S16 norm[53]; + U32 wksp[285]; +} ZSTD_BuildCTableWksp; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_sequenceLength; + +typedef enum { + search_hashChain = 0, + search_binaryTree = 1, + search_rowHash = 2, +} searchMethod_e; + +typedef U64 ZSTD_VecMask; + +typedef struct { + U64 rolling; + U64 stopMask; +} ldmRollingHashState_t; + +typedef U32 (*ZSTD_getAllMatchesFn)(ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, + const BYTE *, const U32 *, const U32, const U32); + +typedef struct { + rawSeqStore_t seqStore; + U32 startPosInBlock; + U32 endPosInBlock; + U32 offset; +} ZSTD_optLdm_t; + +typedef ZSTD_DCtx ZSTD_DStream; + +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 10, + ZSTD_error_version_unsupported = 12, + ZSTD_error_frameParameter_unsupported = 14, + ZSTD_error_frameParameter_windowTooLarge = 16, + ZSTD_error_corruption_detected = 20, + ZSTD_error_checksum_wrong = 22, + ZSTD_error_dictionary_corrupted = 30, + ZSTD_error_dictionary_wrong = 32, + ZSTD_error_dictionaryCreation_failed = 34, + ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_outOfBound = 42, + ZSTD_error_tableLog_tooLarge = 44, + ZSTD_error_maxSymbolValue_tooLarge = 46, + ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stage_wrong = 60, + ZSTD_error_init_missing = 62, + ZSTD_error_memory_allocation = 64, + ZSTD_error_workSpace_tooSmall = 66, + ZSTD_error_dstSize_tooSmall = 70, + ZSTD_error_srcSize_wrong = 72, + ZSTD_error_dstBuffer_null = 74, + ZSTD_error_frameIndex_tooLarge = 100, + ZSTD_error_seekableIO = 102, + ZSTD_error_dstBuffer_wrong = 104, + ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_maxCode = 120, +} ZSTD_ErrorCode; + +typedef ZSTD_ErrorCode zstd_error_code; + +typedef ZSTD_DStream zstd_dstream; + +typedef ZSTD_frameHeader zstd_frame_header; + +typedef ZSTD_ErrorCode ERR_enum; + +typedef struct { + size_t compressedSize; + unsigned long long decompressedBound; +} ZSTD_frameSizeInfo; + +typedef struct { + U32 tableTime; + U32 decode256Time; +} algo_time_t; + +typedef struct { + BYTE nbBits; + BYTE byte; +} HUF_DEltX1; + +typedef struct { + U32 rankVal[13]; + U32 rankStart[13]; + U32 statsWksp[218]; + BYTE symbols[256]; + BYTE huffWeight[256]; +} HUF_ReadDTableX1_Workspace; + +typedef struct { + U16 sequence; + BYTE nbBits; + BYTE length; +} HUF_DEltX2; + +typedef U32 rankValCol_t[13]; + +typedef struct { + BYTE symbol; +} sortedSymbol_t; + +typedef struct { + rankValCol_t rankVal[12]; + U32 rankStats[13]; + U32 rankStart0[15]; + sortedSymbol_t sortedSymbol[256]; + BYTE weightList[256]; + U32 calleeWksp[218]; +} HUF_ReadDTableX2_Workspace; + +typedef struct { + BYTE maxTableLog; + BYTE tableType; + BYTE tableLog; + BYTE reserved; +} DTableDesc; + +typedef struct { + size_t bitContainer; + unsigned int bitsConsumed; + const char *ptr; + const char *start; + const char *limitPtr; +} BIT_DStream_t; + +typedef enum { + BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3, +} BIT_DStream_status; + +struct ZSTD_DDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictSize; + ZSTD_entropyDTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +typedef enum { + not_streaming = 0, + is_streaming = 1, +} streaming_operation; + +typedef enum { + ZSTD_d_windowLogMax = 100, + ZSTD_d_experimentalParam1 = 1000, + ZSTD_d_experimentalParam2 = 1001, + ZSTD_d_experimentalParam3 = 1002, + ZSTD_d_experimentalParam4 = 1003, +} ZSTD_dParameter; + +typedef enum { + ZSTDnit_frameHeader = 0, + ZSTDnit_blockHeader = 1, + ZSTDnit_block = 2, + ZSTDnit_lastBlock = 3, + ZSTDnit_checksum = 4, + ZSTDnit_skippableFrame = 5, +} ZSTD_nextInputType_e; + +typedef enum { + ZSTD_lo_isRegularOffset = 0, + ZSTD_lo_isLongOffset = 1, +} ZSTD_longOffset_e; + +typedef struct { + U32 fastMode; + U32 tableLog; +} ZSTD_seqSymbol_header; + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; +} seq_t; + +typedef struct { + size_t state; + const ZSTD_seqSymbol *table; +} ZSTD_fseState; + +typedef struct { + BIT_DStream_t DStream; + ZSTD_fseState stateLL; + ZSTD_fseState stateOffb; + ZSTD_fseState stateML; + size_t prevOffset[3]; +} seqState_t; + +typedef unsigned int FSE_DTable; + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; + +typedef struct { + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; + +typedef struct { + short ncount[256]; + FSE_DTable dtable[0]; +} FSE_DecompressWksp; + +typedef struct { + size_t state; + const void *table; +} FSE_DState_t; + +enum xz_ret { + XZ_OK = 0, + XZ_STREAM_END = 1, + XZ_UNSUPPORTED_CHECK = 2, + XZ_MEM_ERROR = 3, + XZ_MEMLIMIT_ERROR = 4, + XZ_FORMAT_ERROR = 5, + XZ_OPTIONS_ERROR = 6, + XZ_DATA_ERROR = 7, + XZ_BUF_ERROR = 8, +}; + +typedef uint64_t vli_type; + +struct xz_dec_hash { + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; +}; + +enum xz_check { + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10, +}; + +enum xz_mode { + XZ_SINGLE = 0, + XZ_PREALLOC = 1, + XZ_DYNALLOC = 2, +}; + +struct xz_dec_lzma2; + +struct xz_dec_bcj; + +struct xz_dec { + enum { + SEQ_STREAM_HEADER = 0, + SEQ_BLOCK_START = 1, + SEQ_BLOCK_HEADER = 2, + SEQ_BLOCK_UNCOMPRESS = 3, + SEQ_BLOCK_PADDING = 4, + SEQ_BLOCK_CHECK = 5, + SEQ_INDEX = 6, + SEQ_INDEX_PADDING = 7, + SEQ_INDEX_CRC32 = 8, + SEQ_STREAM_FOOTER = 9, + } sequence; + uint32_t pos; + vli_type vli; + size_t in_start; + size_t out_start; + uint32_t crc32; + enum xz_check check_type; + enum xz_mode mode; + bool allow_buf_error; + struct { + vli_type compressed; + vli_type uncompressed; + uint32_t size; + } block_header; + struct { + vli_type compressed; + vli_type uncompressed; + vli_type count; + struct xz_dec_hash hash; + } block; + struct { + enum { + SEQ_INDEX_COUNT = 0, + SEQ_INDEX_UNPADDED = 1, + SEQ_INDEX_UNCOMPRESSED = 2, + } sequence; + vli_type size; + vli_type count; + struct xz_dec_hash hash; + } index; + struct { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + struct xz_dec_lzma2 *lzma2; + struct xz_dec_bcj *bcj; + bool bcj_active; +}; + +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +enum lzma2_seq { + SEQ_CONTROL = 0, + SEQ_UNCOMPRESSED_1 = 1, + SEQ_UNCOMPRESSED_2 = 2, + SEQ_COMPRESSED_0 = 3, + SEQ_COMPRESSED_1 = 4, + SEQ_PROPERTIES = 5, + SEQ_LZMA_PREPARE = 6, + SEQ_LZMA_RUN = 7, + SEQ_COPY = 8, +}; + +enum lzma_state { + STATE_LIT_LIT = 0, + STATE_MATCH_LIT_LIT = 1, + STATE_REP_LIT_LIT = 2, + STATE_SHORTREP_LIT_LIT = 3, + STATE_MATCH_LIT = 4, + STATE_REP_LIT = 5, + STATE_SHORTREP_LIT = 6, + STATE_LIT_MATCH = 7, + STATE_LIT_LONGREP = 8, + STATE_LIT_SHORTREP = 9, + STATE_NONLIT_MATCH = 10, + STATE_NONLIT_REP = 11, +}; + +struct rc_dec { + uint32_t range; + uint32_t code; + uint32_t init_bytes_left; + const uint8_t *in; + size_t in_pos; + size_t in_limit; +}; + +struct dictionary { + uint8_t *buf; + size_t start; + size_t pos; + size_t full; + size_t limit; + size_t end; + uint32_t size; + uint32_t size_max; + uint32_t allocated; + enum xz_mode mode; +}; + +struct lzma2_dec { + enum lzma2_seq sequence; + enum lzma2_seq next_sequence; + uint32_t uncompressed; + uint32_t compressed; + bool need_dict_reset; + bool need_props; +}; + +struct lzma_len_dec { + uint16_t choice; + uint16_t choice2; + uint16_t low[128]; + uint16_t mid[128]; + uint16_t high[256]; +}; + +struct lzma_dec { + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + enum lzma_state state; + uint32_t len; + uint32_t lc; + uint32_t literal_pos_mask; + uint32_t pos_mask; + uint16_t is_match[192]; + uint16_t is_rep[12]; + uint16_t is_rep0[12]; + uint16_t is_rep1[12]; + uint16_t is_rep2[12]; + uint16_t is_rep0_long[192]; + uint16_t dist_slot[256]; + uint16_t dist_special[114]; + uint16_t dist_align[16]; + struct lzma_len_dec match_len_dec; + struct lzma_len_dec rep_len_dec; + uint16_t literal[12288]; +}; + +struct xz_dec_lzma2 { + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + struct { + uint32_t size; + uint8_t buf[63]; + } temp; +}; + +struct xz_dec_bcj { + enum { + BCJ_X86 = 4, + BCJ_POWERPC = 5, + BCJ_IA64 = 6, + BCJ_ARM = 7, + BCJ_ARMTHUMB = 8, + BCJ_SPARC = 9, + } type; + enum xz_ret ret; + bool single_call; + uint32_t pos; + uint32_t x86_prev_mask; + uint8_t *out; + size_t out_pos; + size_t out_size; + struct { + size_t filtered; + size_t size; + uint8_t buf[16]; + } temp; +}; + +struct ddebug_table { + struct list_head link; + struct list_head maps; + const char *mod_name; + unsigned int num_ddebugs; + struct _ddebug *ddebugs; +}; + +struct ddebug_class_param { + union { + unsigned long *bits; + unsigned int *lvl; + }; + char flags[8]; + const struct ddebug_class_map *map; +}; + +struct ddebug_query { + const char *filename; + const char *module; + const char *function; + const char *format; + const char *class_string; + unsigned int first_lineno; + unsigned int last_lineno; +}; + +struct flag_settings { + unsigned int flags; + unsigned int mask; +}; + +struct flagsbuf { + char buf[8]; +}; + +struct ddebug_iter { + struct ddebug_table *table; + int idx; +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE = 0, + NLA_VALIDATE_RANGE = 1, + NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, + NLA_VALIDATE_MIN = 3, + NLA_VALIDATE_MAX = 4, + NLA_VALIDATE_MASK = 5, + NLA_VALIDATE_RANGE_PTR = 6, + NLA_VALIDATE_FUNCTION = 7, +}; + +enum { + NLA_UNSPEC = 0, + NLA_U8 = 1, + NLA_U16 = 2, + NLA_U32 = 3, + NLA_U64 = 4, + NLA_STRING = 5, + NLA_FLAG = 6, + NLA_MSECS = 7, + NLA_NESTED = 8, + NLA_NESTED_ARRAY = 9, + NLA_NUL_STRING = 10, + NLA_BINARY = 11, + NLA_S8 = 12, + NLA_S16 = 13, + NLA_S32 = 14, + NLA_S64 = 15, + NLA_BITFIELD32 = 16, + NLA_REJECT = 17, + NLA_BE16 = 18, + NLA_BE32 = 19, + NLA_SINT = 20, + NLA_UINT = 21, + __NLA_TYPE_MAX = 22, +}; + +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; + +struct cpu_rmap { + struct kref refcount; + u16 size; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 pool_index : 16; + u32 offset : 10; + u32 valid : 1; + u32 extra : 5; + }; +}; + +struct stack_record { + struct stack_record *next; + u32 hash; + u32 size; + union handle_parts handle; + unsigned long entries[0]; +}; + +enum asn1_opcode { + ASN1_OP_MATCH = 0, + ASN1_OP_MATCH_OR_SKIP = 1, + ASN1_OP_MATCH_ACT = 2, + ASN1_OP_MATCH_ACT_OR_SKIP = 3, + ASN1_OP_MATCH_JUMP = 4, + ASN1_OP_MATCH_JUMP_OR_SKIP = 5, + ASN1_OP_MATCH_ANY = 8, + ASN1_OP_MATCH_ANY_OR_SKIP = 9, + ASN1_OP_MATCH_ANY_ACT = 10, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, + ASN1_OP_COND_MATCH_OR_SKIP = 17, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, + ASN1_OP_COND_MATCH_ANY = 24, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, + ASN1_OP_COND_MATCH_ANY_ACT = 26, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, + ASN1_OP_COND_FAIL = 28, + ASN1_OP_COMPLETE = 29, + ASN1_OP_ACT = 30, + ASN1_OP_MAYBE_ACT = 31, + ASN1_OP_END_SEQ = 32, + ASN1_OP_END_SET = 33, + ASN1_OP_END_SEQ_OF = 34, + ASN1_OP_END_SET_OF = 35, + ASN1_OP_END_SEQ_ACT = 36, + ASN1_OP_END_SET_ACT = 37, + ASN1_OP_END_SEQ_OF_ACT = 38, + ASN1_OP_END_SET_OF_ACT = 39, + ASN1_OP_RETURN = 40, + ASN1_OP__NR = 41, +}; + +enum asn1_method { + ASN1_PRIM = 0, + ASN1_CONS = 1, +}; + +struct font_desc { + int idx; + const char *name; + unsigned int width; + unsigned int height; + unsigned int charcount; + const void *data; + int pref; +}; + +struct font_data { + unsigned int extra[4]; + const unsigned char data[0]; +}; + +struct node_groups { + unsigned int id; + union { + unsigned int ngroups; + unsigned int ncpus; + }; +}; + +enum acpi_subtable_type { + ACPI_SUBTABLE_COMMON = 0, + ACPI_SUBTABLE_HMAT = 1, + ACPI_SUBTABLE_PRMT = 2, + ACPI_SUBTABLE_CEDT = 3, +}; + +struct acpi_subtable_entry { + union acpi_subtable_headers *hdr; + enum acpi_subtable_type type; +}; + +typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *, void *, + const unsigned long); + +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + acpi_tbl_entry_handler_arg handler_arg; + void *arg; + int count; +}; + +struct msr_info_completion { + struct msr_info msr; + struct completion done; +}; + +struct msr_regs_info { + u32 *regs; + int err; +}; + +typedef void (*btf_trace_read_msr)(void *, unsigned int, u64, int); + +typedef void (*btf_trace_write_msr)(void *, unsigned int, u64, int); + +typedef void (*btf_trace_rdpmc)(void *, unsigned int, u64, int); + +struct trace_event_raw_msr_trace_class { + struct trace_entry ent; + unsigned int msr; + u64 val; + int failed; + char __data[0]; +}; + +struct trace_event_data_offsets_msr_trace_class {}; + +enum { + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, +}; + +typedef u64 pci_bus_addr_t; + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +enum pci_fixup_pass { + pci_fixup_early = 0, + pci_fixup_header = 1, + pci_fixup_final = 2, + pci_fixup_enable = 3, + pci_fixup_resume = 4, + pci_fixup_suspend = 5, + pci_fixup_resume_early = 6, + pci_fixup_suspend_late = 7, +}; + +struct pci_bus_resource { + struct list_head list; + struct resource *res; + unsigned int flags; +}; + +enum pci_bar_type { + pci_bar_unknown = 0, + pci_bar_io = 1, + pci_bar_mem32 = 2, + pci_bar_mem64 = 3, +}; + +enum pci_bus_speed { + PCI_SPEED_33MHz = 0, + PCI_SPEED_66MHz = 1, + PCI_SPEED_66MHz_PCIX = 2, + PCI_SPEED_100MHz_PCIX = 3, + PCI_SPEED_133MHz_PCIX = 4, + PCI_SPEED_66MHz_PCIX_ECC = 5, + PCI_SPEED_100MHz_PCIX_ECC = 6, + PCI_SPEED_133MHz_PCIX_ECC = 7, + PCI_SPEED_66MHz_PCIX_266 = 9, + PCI_SPEED_100MHz_PCIX_266 = 10, + PCI_SPEED_133MHz_PCIX_266 = 11, + AGP_UNKNOWN = 12, + AGP_1X = 13, + AGP_2X = 14, + AGP_4X = 15, + AGP_8X = 16, + PCI_SPEED_66MHz_PCIX_533 = 17, + PCI_SPEED_100MHz_PCIX_533 = 18, + PCI_SPEED_133MHz_PCIX_533 = 19, + PCIE_SPEED_2_5GT = 20, + PCIE_SPEED_5_0GT = 21, + PCIE_SPEED_8_0GT = 22, + PCIE_SPEED_16_0GT = 23, + PCIE_SPEED_32_0GT = 24, + PCIE_SPEED_64_0GT = 25, + PCI_SPEED_UNKNOWN = 255, +}; + +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = 1, + PCI_BUS_FLAGS_NO_MMRBC = 2, + PCI_BUS_FLAGS_NO_AERSID = 4, + PCI_BUS_FLAGS_NO_EXTCFG = 8, +}; + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF = 0, + PCIE_BUS_DEFAULT = 1, + PCIE_BUS_SAFE = 2, + PCIE_BUS_PERFORMANCE = 3, + PCIE_BUS_PEER2PEER = 4, +}; + +enum { + PCI_REASSIGN_ALL_RSRC = 1, + PCI_REASSIGN_ALL_BUS = 2, + PCI_PROBE_ONLY = 4, + PCI_CAN_SKIP_ISA_ALIGN = 8, + PCI_ENABLE_PROC_DOMAINS = 16, + PCI_COMPAT_DOMAIN_0 = 32, + PCI_SCAN_ALL_PCIE_DEVS = 64, +}; + +enum pci_dev_flags { + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, + PCI_DEV_FLAGS_NO_D3 = 2, + PCI_DEV_FLAGS_ASSIGNED = 4, + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, + PCI_DEV_FLAGS_NO_BUS_RESET = 64, + PCI_DEV_FLAGS_NO_PM_RESET = 128, + PCI_DEV_FLAGS_VPD_REF_F0 = 256, + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, + PCI_DEV_FLAGS_NO_FLR_RESET = 1024, + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, + PCI_DEV_FLAGS_HAS_MSI_MASKING = 4096, +}; + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; + struct pci_ops *ops; + struct pci_ops *child_ops; + void *sysdata; + int busnr; + int domain_nr; + struct list_head windows; + struct list_head dma_ranges; + u8 (*swizzle_irq)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + unsigned int ignore_reset_delay : 1; + unsigned int no_ext_tags : 1; + unsigned int no_inc_mrrs : 1; + unsigned int native_aer : 1; + unsigned int native_pcie_hotplug : 1; + unsigned int native_shpc_hotplug : 1; + unsigned int native_pme : 1; + unsigned int native_ltr : 1; + unsigned int native_dpc : 1; + unsigned int native_cxl_error : 1; + unsigned int preserve_config : 1; + unsigned int size_windows : 1; + unsigned int msi_domain : 1; + resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, + resource_size_t, resource_size_t, resource_size_t); + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + unsigned long private[0]; +}; + +struct pci_domain_busn_res { + struct list_head list; + struct resource res; + int domain_nr; +}; + +struct acpi_device_status { + u32 present : 1; + u32 enabled : 1; + u32 show_in_ui : 1; + u32 functional : 1; + u32 battery_present : 1; + u32 reserved : 27; +}; + +struct acpi_device_flags { + u32 dynamic_status : 1; + u32 removable : 1; + u32 ejectable : 1; + u32 power_manageable : 1; + u32 match_driver : 1; + u32 initialized : 1; + u32 visited : 1; + u32 hotplug_notify : 1; + u32 is_dock_station : 1; + u32 of_compatible_ok : 1; + u32 coherent_dma : 1; + u32 cca_seen : 1; + u32 enumeration_by_parent : 1; + u32 honor_deps : 1; + u32 reserved : 18; +}; + +typedef char acpi_bus_id[8]; + +struct acpi_pnp_type { + u32 hardware_id : 1; + u32 bus_address : 1; + u32 platform_id : 1; + u32 backlight : 1; + u32 reserved : 28; +}; + +typedef u64 acpi_bus_address; + +typedef char acpi_device_name[40]; + +typedef char acpi_device_class[20]; + +struct acpi_device_pnp { + acpi_bus_id bus_id; + int instance_no; + struct acpi_pnp_type type; + acpi_bus_address bus_address; + char *unique_id; + struct list_head ids; + acpi_device_name device_name; + acpi_device_class device_class; + union acpi_object *str_obj; +}; + +struct acpi_device_power_flags { + u32 explicit_get : 1; + u32 power_resources : 1; + u32 inrush_current : 1; + u32 power_removed : 1; + u32 ignore_parent : 1; + u32 dsw_present : 1; + u32 reserved : 26; +}; + +struct acpi_device_power_state { + struct { + u8 valid : 1; + u8 explicit_set : 1; + u8 reserved : 6; + } flags; + int power; + int latency; + struct list_head resources; +}; + +struct acpi_device_power { + int state; + struct acpi_device_power_flags flags; + struct acpi_device_power_state states[5]; + u8 state_for_enumeration; +}; + +struct acpi_device_wakeup_flags { + u8 valid : 1; + u8 notifier_present : 1; +}; + +struct acpi_device_wakeup_context { + void (*func)(struct acpi_device_wakeup_context *); + struct device *dev; +}; + +struct acpi_device_wakeup { + acpi_handle gpe_device; + u64 gpe_number; + u64 sleep_state; + struct list_head resources; + struct acpi_device_wakeup_flags flags; + struct acpi_device_wakeup_context context; + struct wakeup_source *ws; + int prepare_count; + int enable_count; +}; + +struct acpi_device_perf_flags { + u8 reserved : 8; +}; + +struct acpi_device_perf_state; + +struct acpi_device_perf { + int state; + struct acpi_device_perf_flags flags; + int state_count; + struct acpi_device_perf_state *states; +}; + +struct acpi_device_dir { + struct proc_dir_entry *entry; +}; + +struct acpi_device_data { + const union acpi_object *pointer; + struct list_head properties; + const union acpi_object *of_compatible; + struct list_head subnodes; +}; + +struct acpi_scan_handler; + +struct acpi_hotplug_context; + +struct acpi_gpio_mapping; + +struct acpi_device { + u32 pld_crc; + int device_type; + acpi_handle handle; + struct fwnode_handle fwnode; + struct list_head wakeup_list; + struct list_head del_list; + struct acpi_device_status status; + struct acpi_device_flags flags; + struct acpi_device_pnp pnp; + struct acpi_device_power power; + struct acpi_device_wakeup wakeup; + struct acpi_device_perf performance; + struct acpi_device_dir dir; + struct acpi_device_data data; + struct acpi_scan_handler *handler; + struct acpi_hotplug_context *hp; + const struct acpi_gpio_mapping *driver_gpios; + void *driver_data; + struct device dev; + unsigned int physical_node_count; + unsigned int dep_unmet; + struct list_head physical_node_list; + struct mutex physical_node_lock; + void (*remove)(struct acpi_device *); +}; + +struct acpi_device_perf_state { + struct { + u8 valid : 1; + u8 reserved : 7; + } flags; + u8 power; + u8 performance; + int latency; +}; + +struct acpi_hotplug_profile { + struct kobject kobj; + int (*scan_dependent)(struct acpi_device *); + void (*notify_online)(struct acpi_device *); + bool enabled : 1; + bool demand_offline : 1; +}; + +struct acpi_scan_handler { + const struct acpi_device_id *ids; + struct list_head list_node; + bool (*match)(const char *, const struct acpi_device_id **); + int (*attach)(struct acpi_device *, const struct acpi_device_id *); + void (*detach)(struct acpi_device *); + void (*bind)(struct device *); + void (*unbind)(struct device *); + struct acpi_hotplug_profile hotplug; +}; + +struct acpi_hotplug_context { + struct acpi_device *self; + int (*notify)(struct acpi_device *, u32); + void (*uevent)(struct acpi_device *, u32); + void (*fixup)(struct acpi_device *); +}; + +struct acpi_gpio_params; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; + unsigned int quirks; +}; + +struct aer_stats { + u64 dev_cor_errs[16]; + u64 dev_fatal_errs[27]; + u64 dev_nonfatal_errs[27]; + u64 dev_total_cor_errs; + u64 dev_total_fatal_errs; + u64 dev_total_nonfatal_errs; + u64 rootport_total_cor_errs; + u64 rootport_total_fatal_errs; + u64 rootport_total_nonfatal_errs; +}; + +typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); + +struct pci_reset_fn_method { + int (*reset_fn)(struct pci_dev *, bool); + char *name; +}; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(const struct bus_type *, char *); + ssize_t (*store)(const struct bus_type *, const char *, size_t); +}; + +enum pcie_reset_state { + pcie_deassert_reset = 1, + pcie_warm_reset = 2, + pcie_hot_reset = 3, +}; + +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0, + PCIE_LNK_X1 = 1, + PCIE_LNK_X2 = 2, + PCIE_LNK_X4 = 4, + PCIE_LNK_X8 = 8, + PCIE_LNK_X12 = 12, + PCIE_LNK_X16 = 16, + PCIE_LNK_X32 = 32, + PCIE_LNK_WIDTH_UNKNOWN = 255, +}; + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct pci_pme_device { + struct list_head list; + struct pci_dev *dev; +}; + +struct pci_devres { + unsigned int enabled : 1; + unsigned int pinned : 1; + unsigned int orig_intx : 1; + unsigned int restore_intx : 1; + unsigned int mwi : 1; + u32 region_mask; +}; + +struct pci_saved_state { + u32 config_space[16]; + struct pci_cap_saved_data cap[0]; +}; + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char *); + ssize_t (*store)(struct device_driver *, const char *, size_t); +}; + +enum pci_ers_result { + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NO_AER_DRIVER = 6, +}; + +struct pci_dynid { + struct list_head node; + struct pci_device_id id; +}; + +struct pcie_device { + int irq; + struct pci_dev *port; + u32 service; + void *priv_data; + struct device device; +}; + +struct pcie_port_service_driver { + const char *name; + int (*probe)(struct pcie_device *); + void (*remove)(struct pcie_device *); + int (*suspend)(struct pcie_device *); + int (*resume_noirq)(struct pcie_device *); + int (*resume)(struct pcie_device *); + int (*runtime_suspend)(struct pcie_device *); + int (*runtime_resume)(struct pcie_device *); + int (*slot_reset)(struct pcie_device *); + int port_type; + u32 service; + struct device_driver driver; +}; + +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +enum pci_mmap_api { + PCI_MMAP_SYSFS = 0, + PCI_MMAP_PROCFS = 1, +}; + +enum pci_mmap_state { + pci_mmap_io = 0, + pci_mmap_mem = 1, +}; + +enum enable_type { + undefined = -1, + user_disabled = 0, + auto_disabled = 1, + user_enabled = 2, + auto_enabled = 3, +}; + +enum release_type { + leaf_only = 0, + whole_subtree = 1, +}; + +struct pci_dev_resource { + struct list_head list; + struct resource *res; + struct pci_dev *dev; + resource_size_t start; + resource_size_t end; + resource_size_t add_size; + resource_size_t min_align; + unsigned long flags; +}; + +enum support_mode { + ALLOW_LEGACY = 0, + DENY_LEGACY = 1, +}; + +struct msix_entry { + u32 vector; + u16 entry; +}; + +struct portdrv_service_data { + struct pcie_port_service_driver *drv; + struct device *dev; + u32 service; +}; + +typedef int (*pcie_callback_t)(struct pcie_device *); + +struct walk_rcec_data { + struct pci_dev *rcec; + int (*user_callback)(struct pci_dev *, void *); + void *user_data; +}; + +struct pcie_link_state { + struct pci_dev *pdev; + struct pci_dev *downstream; + struct pcie_link_state *root; + struct pcie_link_state *parent; + struct list_head sibling; + u32 aspm_support : 7; + u32 aspm_enabled : 7; + u32 aspm_capable : 7; + u32 aspm_default : 7; + int : 4; + u32 aspm_disable : 7; + u32 clkpm_capable : 1; + u32 clkpm_enabled : 1; + u32 clkpm_default : 1; + u32 clkpm_disable : 1; +}; + +struct aer_capability_regs; + +struct aer_recover_entry { + u8 bus; + u8 devfn; + u16 domain; + int severity; + struct aer_capability_regs *regs; +}; + +struct aer_header_log_regs { + unsigned int dw0; + unsigned int dw1; + unsigned int dw2; + unsigned int dw3; +}; + +struct aer_capability_regs { + u32 header; + u32 uncor_status; + u32 uncor_mask; + u32 uncor_severity; + u32 cor_status; + u32 cor_mask; + u32 cap_control; + struct aer_header_log_regs header_log; + u32 root_command; + u32 root_status; + u16 cor_err_source; + u16 uncor_err_source; +}; + +struct aer_err_source { + unsigned int status; + unsigned int id; +}; + +struct aer_err_info { + struct pci_dev *dev[5]; + int error_dev_num; + unsigned int id : 16; + unsigned int severity : 2; + unsigned int __pad1 : 5; + unsigned int multi_error_valid : 1; + unsigned int first_error : 5; + unsigned int __pad2 : 2; + unsigned int tlp_header_valid : 1; + unsigned int status; + unsigned int mask; + struct aer_header_log_regs tlp; +}; + +struct aer_rpc { + struct pci_dev *rpd; + struct { + union { + struct __kfifo kfifo; + struct aer_err_source *type; + const struct aer_err_source *const_type; + char (*rectype)[0]; + struct aer_err_source *ptr; + const struct aer_err_source *ptr_const; + }; + struct aer_err_source buf[128]; + } aer_fifo; +}; + +struct pcie_pme_service_data { + spinlock_t lock; + struct pcie_device *srv; + struct work_struct work; + bool noirq; +}; + +struct pci_filp_private { + enum pci_mmap_state mmap_state; + int write_combine; +}; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; + +struct hpx_type0 { + u32 revision; + u8 cache_line_size; + u8 latency_timer; + u8 enable_serr; + u8 enable_perr; +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = -1, + PM_QOS_FLAGS_NONE = 0, + PM_QOS_FLAGS_SOME = 1, + PM_QOS_FLAGS_ALL = 2, +}; + +enum hpx_type3_cfg_loc { + HPX_CFG_PCICFG = 0, + HPX_CFG_PCIE_CAP = 1, + HPX_CFG_PCIE_CAP_EXT = 2, + HPX_CFG_VEND_CAP = 3, + HPX_CFG_DVSEC = 4, + HPX_CFG_MAX = 5, +}; + +enum hpx_type3_fn_type { + HPX_FN_NORMAL = 1, + HPX_FN_SRIOV_PHYS = 2, + HPX_FN_SRIOV_VIRT = 4, +}; + +typedef u64 acpi_size; + +struct acpi_buffer { + acpi_size length; + void *pointer; +}; + +struct hpx_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +struct hpx_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +struct hpx_type3 { + u16 device_type; + u16 function_type; + u16 config_space_location; + u16 pci_exp_cap_id; + u16 pci_exp_cap_ver; + u16 pci_exp_vendor_id; + u16 dvsec_id; + u16 dvsec_rev; + u16 match_offset; + u32 match_mask_and; + u32 match_value; + u16 reg_offset; + u32 reg_mask_and; + u32 reg_mask_or; +}; + +struct acpi_pci_root { + struct acpi_device *device; + struct pci_bus *bus; + u16 segment; + int bridge_type; + struct resource secondary; + u32 osc_support_set; + u32 osc_control_set; + u32 osc_ext_support_set; + u32 osc_ext_control_set; + phys_addr_t mcfg_addr; +}; + +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + +struct acs_on_id { + unsigned short vendor; + unsigned short device; +}; + +struct pci_dev_reset_methods { + u16 vendor; + u16 device; + int (*reset)(struct pci_dev *, bool); +}; + +struct pci_dev_acs_enabled { + u16 vendor; + u16 device; + int (*acs_enabled)(struct pci_dev *, u16); +}; + +struct pci_dev_acs_ops { + u16 vendor; + u16 device; + int (*enable_acs)(struct pci_dev *); + int (*disable_acs_redir)(struct pci_dev *); +}; + +enum pci_irq_reroute_variant { + INTEL_IRQ_REROUTE_VARIANT = 1, + MAX_IRQ_REROUTE_VARIANTS = 3, +}; + +enum { + NVME_REG_CAP = 0, + NVME_REG_VS = 8, + NVME_REG_INTMS = 12, + NVME_REG_INTMC = 16, + NVME_REG_CC = 20, + NVME_REG_CSTS = 28, + NVME_REG_NSSR = 32, + NVME_REG_AQA = 36, + NVME_REG_ASQ = 40, + NVME_REG_ACQ = 48, + NVME_REG_CMBLOC = 56, + NVME_REG_CMBSZ = 60, + NVME_REG_BPINFO = 64, + NVME_REG_BPRSEL = 68, + NVME_REG_BPMBL = 72, + NVME_REG_CMBMSC = 80, + NVME_REG_CRTO = 104, + NVME_REG_PMRCAP = 3584, + NVME_REG_PMRCTL = 3588, + NVME_REG_PMRSTS = 3592, + NVME_REG_PMREBS = 3596, + NVME_REG_PMRSWTP = 3600, + NVME_REG_DBS = 4096, +}; + +enum { + NVME_CC_ENABLE = 1, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 96, + NVME_CC_CSS_MASK = 112, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 2048, + NVME_CC_AMS_VS = 14336, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 16384, + NVME_CC_SHN_ABRUPT = 32768, + NVME_CC_SHN_MASK = 49152, + NVME_CC_IOSQES = 393216, + NVME_CC_IOCQES = 4194304, + NVME_CC_CRIME = 16777216, +}; + +enum { + NVME_CSTS_RDY = 1, + NVME_CSTS_CFS = 2, + NVME_CSTS_NSSRO = 16, + NVME_CSTS_PP = 32, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 4, + NVME_CSTS_SHST_CMPLT = 8, + NVME_CSTS_SHST_MASK = 12, +}; + +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE = 1, + DL_STATE_CONSUMER_PROBE = 2, + DL_STATE_ACTIVE = 3, + DL_STATE_SUPPLIER_UNBIND = 4, +}; + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, + SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, + SWITCHTEC_GAS_NTB_OFFSET = 65536, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, +}; + +struct pci_fixup { + u16 vendor; + u16 device; + u32 class; + unsigned int class_shift; + int hook_offset; +}; + +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; +}; + +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + struct work_struct rm_work; + bool supplier_preactivated; +}; + +struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info ntp_info[48]; +} __attribute__((packed)); + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; + u64 lut_entry[512]; +}; + +enum smbios_attr_enum { + SMBIOS_ATTR_NONE = 0, + SMBIOS_ATTR_LABEL_SHOW = 1, + SMBIOS_ATTR_INSTANCE_SHOW = 2, +}; + +enum dmi_device_type { + DMI_DEV_TYPE_ANY = 0, + DMI_DEV_TYPE_OTHER = 1, + DMI_DEV_TYPE_UNKNOWN = 2, + DMI_DEV_TYPE_VIDEO = 3, + DMI_DEV_TYPE_SCSI = 4, + DMI_DEV_TYPE_ETHERNET = 5, + DMI_DEV_TYPE_TOKENRING = 6, + DMI_DEV_TYPE_SOUND = 7, + DMI_DEV_TYPE_PATA = 8, + DMI_DEV_TYPE_SATA = 9, + DMI_DEV_TYPE_SAS = 10, + DMI_DEV_TYPE_IPMI = -1, + DMI_DEV_TYPE_OEM_STRING = -2, + DMI_DEV_TYPE_DEV_ONBOARD = -3, + DMI_DEV_TYPE_DEV_SLOT = -4, +}; + +enum acpi_attr_enum { + ACPI_ATTR_LABEL_SHOW = 0, + ACPI_ATTR_INDEX_SHOW = 1, +}; + +struct dmi_device { + struct list_head list; + int type; + const char *name; + void *device_data; +}; + +struct dmi_dev_onboard { + struct dmi_device dev; + int instance; + int segment; + int bus; + int devfn; +}; + +struct pci_p2pdma_whitelist_entry { + unsigned short vendor; + unsigned short device; + enum { + REQ_SAME_HOST_BRIDGE = 1, + } flags; +}; + +struct pci_p2pdma_pagemap { + struct pci_dev *provider; + u64 bus_offset; + struct dev_pagemap pgmap; +}; + +struct nvme_remap_dev { + struct pci_dev *dev; + struct pci_bus *bus; + struct pci_sysdata sysdata; + int irq_base; + bool bar_sizing; + struct resource ahci_resources[17]; + struct resource remapped_dev_mem[3]; + int num_remapped_devices; +}; + +struct aperture_range { + struct device *dev; + resource_size_t base; + resource_size_t size; + struct list_head lh; + void (*detach)(struct device *); +}; + +enum hdmi_infoframe_type { + HDMI_INFOFRAME_TYPE_VENDOR = 129, + HDMI_INFOFRAME_TYPE_AVI = 130, + HDMI_INFOFRAME_TYPE_SPD = 131, + HDMI_INFOFRAME_TYPE_AUDIO = 132, + HDMI_INFOFRAME_TYPE_DRM = 135, +}; + +enum hdmi_colorspace { + HDMI_COLORSPACE_RGB = 0, + HDMI_COLORSPACE_YUV422 = 1, + HDMI_COLORSPACE_YUV444 = 2, + HDMI_COLORSPACE_YUV420 = 3, + HDMI_COLORSPACE_RESERVED4 = 4, + HDMI_COLORSPACE_RESERVED5 = 5, + HDMI_COLORSPACE_RESERVED6 = 6, + HDMI_COLORSPACE_IDO_DEFINED = 7, +}; + +enum hdmi_scan_mode { + HDMI_SCAN_MODE_NONE = 0, + HDMI_SCAN_MODE_OVERSCAN = 1, + HDMI_SCAN_MODE_UNDERSCAN = 2, + HDMI_SCAN_MODE_RESERVED = 3, +}; + +enum hdmi_colorimetry { + HDMI_COLORIMETRY_NONE = 0, + HDMI_COLORIMETRY_ITU_601 = 1, + HDMI_COLORIMETRY_ITU_709 = 2, + HDMI_COLORIMETRY_EXTENDED = 3, +}; + +enum hdmi_picture_aspect { + HDMI_PICTURE_ASPECT_NONE = 0, + HDMI_PICTURE_ASPECT_4_3 = 1, + HDMI_PICTURE_ASPECT_16_9 = 2, + HDMI_PICTURE_ASPECT_64_27 = 3, + HDMI_PICTURE_ASPECT_256_135 = 4, + HDMI_PICTURE_ASPECT_RESERVED = 5, +}; + +enum hdmi_active_aspect { + HDMI_ACTIVE_ASPECT_16_9_TOP = 2, + HDMI_ACTIVE_ASPECT_14_9_TOP = 3, + HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, + HDMI_ACTIVE_ASPECT_PICTURE = 8, + HDMI_ACTIVE_ASPECT_4_3 = 9, + HDMI_ACTIVE_ASPECT_16_9 = 10, + HDMI_ACTIVE_ASPECT_14_9 = 11, + HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, + HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, + HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15, +}; + +enum hdmi_extended_colorimetry { + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601 = 0, + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709 = 1, + HDMI_EXTENDED_COLORIMETRY_S_YCC_601 = 2, + HDMI_EXTENDED_COLORIMETRY_OPYCC_601 = 3, + HDMI_EXTENDED_COLORIMETRY_OPRGB = 4, + HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM = 5, + HDMI_EXTENDED_COLORIMETRY_BT2020 = 6, + HDMI_EXTENDED_COLORIMETRY_RESERVED = 7, +}; + +enum hdmi_quantization_range { + HDMI_QUANTIZATION_RANGE_DEFAULT = 0, + HDMI_QUANTIZATION_RANGE_LIMITED = 1, + HDMI_QUANTIZATION_RANGE_FULL = 2, + HDMI_QUANTIZATION_RANGE_RESERVED = 3, +}; + +enum hdmi_nups { + HDMI_NUPS_UNKNOWN = 0, + HDMI_NUPS_HORIZONTAL = 1, + HDMI_NUPS_VERTICAL = 2, + HDMI_NUPS_BOTH = 3, +}; + +enum hdmi_ycc_quantization_range { + HDMI_YCC_QUANTIZATION_RANGE_LIMITED = 0, + HDMI_YCC_QUANTIZATION_RANGE_FULL = 1, +}; + +enum hdmi_content_type { + HDMI_CONTENT_TYPE_GRAPHICS = 0, + HDMI_CONTENT_TYPE_PHOTO = 1, + HDMI_CONTENT_TYPE_CINEMA = 2, + HDMI_CONTENT_TYPE_GAME = 3, +}; + +enum hdmi_spd_sdi { + HDMI_SPD_SDI_UNKNOWN = 0, + HDMI_SPD_SDI_DSTB = 1, + HDMI_SPD_SDI_DVDP = 2, + HDMI_SPD_SDI_DVHS = 3, + HDMI_SPD_SDI_HDDVR = 4, + HDMI_SPD_SDI_DVC = 5, + HDMI_SPD_SDI_DSC = 6, + HDMI_SPD_SDI_VCD = 7, + HDMI_SPD_SDI_GAME = 8, + HDMI_SPD_SDI_PC = 9, + HDMI_SPD_SDI_BD = 10, + HDMI_SPD_SDI_SACD = 11, + HDMI_SPD_SDI_HDDVD = 12, + HDMI_SPD_SDI_PMP = 13, +}; + +enum hdmi_audio_coding_type { + HDMI_AUDIO_CODING_TYPE_STREAM = 0, + HDMI_AUDIO_CODING_TYPE_PCM = 1, + HDMI_AUDIO_CODING_TYPE_AC3 = 2, + HDMI_AUDIO_CODING_TYPE_MPEG1 = 3, + HDMI_AUDIO_CODING_TYPE_MP3 = 4, + HDMI_AUDIO_CODING_TYPE_MPEG2 = 5, + HDMI_AUDIO_CODING_TYPE_AAC_LC = 6, + HDMI_AUDIO_CODING_TYPE_DTS = 7, + HDMI_AUDIO_CODING_TYPE_ATRAC = 8, + HDMI_AUDIO_CODING_TYPE_DSD = 9, + HDMI_AUDIO_CODING_TYPE_EAC3 = 10, + HDMI_AUDIO_CODING_TYPE_DTS_HD = 11, + HDMI_AUDIO_CODING_TYPE_MLP = 12, + HDMI_AUDIO_CODING_TYPE_DST = 13, + HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14, + HDMI_AUDIO_CODING_TYPE_CXT = 15, +}; + +enum hdmi_audio_sample_size { + HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0, + HDMI_AUDIO_SAMPLE_SIZE_16 = 1, + HDMI_AUDIO_SAMPLE_SIZE_20 = 2, + HDMI_AUDIO_SAMPLE_SIZE_24 = 3, +}; + +enum hdmi_audio_sample_frequency { + HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0, + HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1, + HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2, + HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3, + HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4, + HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5, + HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6, + HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7, +}; + +enum hdmi_audio_coding_type_ext { + HDMI_AUDIO_CODING_TYPE_EXT_CT = 0, + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1, + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC = 4, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2 = 5, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC = 6, + HDMI_AUDIO_CODING_TYPE_EXT_DRA = 7, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND = 8, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, +}; + +enum hdmi_3d_structure { + HDMI_3D_STRUCTURE_INVALID = -1, + HDMI_3D_STRUCTURE_FRAME_PACKING = 0, + HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE = 1, + HDMI_3D_STRUCTURE_LINE_ALTERNATIVE = 2, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL = 3, + HDMI_3D_STRUCTURE_L_DEPTH = 4, + HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH = 5, + HDMI_3D_STRUCTURE_TOP_AND_BOTTOM = 6, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, +}; + +enum hdmi_eotf { + HDMI_EOTF_TRADITIONAL_GAMMA_SDR = 0, + HDMI_EOTF_TRADITIONAL_GAMMA_HDR = 1, + HDMI_EOTF_SMPTE_ST2084 = 2, + HDMI_EOTF_BT_2100_HLG = 3, +}; + +enum hdmi_metadata_type { + HDMI_STATIC_METADATA_TYPE1 = 0, +}; + +struct hdmi_any_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; +}; + +struct hdmi_avi_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + bool itc; + unsigned char pixel_repeat; + enum hdmi_colorspace colorspace; + enum hdmi_scan_mode scan_mode; + enum hdmi_colorimetry colorimetry; + enum hdmi_picture_aspect picture_aspect; + enum hdmi_active_aspect active_aspect; + enum hdmi_extended_colorimetry extended_colorimetry; + enum hdmi_quantization_range quantization_range; + enum hdmi_nups nups; + unsigned char video_code; + enum hdmi_ycc_quantization_range ycc_quantization_range; + enum hdmi_content_type content_type; + unsigned short top_bar; + unsigned short bottom_bar; + unsigned short left_bar; + unsigned short right_bar; +}; + +struct hdmi_spd_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + char vendor[8]; + char product[16]; + enum hdmi_spd_sdi sdi; +}; + +struct hdmi_audio_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned char channels; + enum hdmi_audio_coding_type coding_type; + enum hdmi_audio_sample_size sample_size; + enum hdmi_audio_sample_frequency sample_frequency; + enum hdmi_audio_coding_type_ext coding_type_ext; + unsigned char channel_allocation; + unsigned char level_shift_value; + bool downmix_inhibit; +}; + +struct hdmi_vendor_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + u8 vic; + enum hdmi_3d_structure s3d_struct; + unsigned int s3d_ext_data; +}; + +struct hdmi_drm_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_eotf eotf; + enum hdmi_metadata_type metadata_type; + struct { + u16 x; + u16 y; + } display_primaries[3]; + struct { + u16 x; + u16 y; + } white_point; + u16 max_display_mastering_luminance; + u16 min_display_mastering_luminance; + u16 max_cll; + u16 max_fall; +}; + +union hdmi_vendor_any_infoframe { + struct { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + } any; + struct hdmi_vendor_infoframe hdmi; +}; + +struct dp_sdp_header { + u8 HB0; + u8 HB1; + u8 HB2; + u8 HB3; +}; + +struct dp_sdp { + struct dp_sdp_header sdp_header; + u8 db[32]; +}; + +union hdmi_infoframe { + struct hdmi_any_infoframe any; + struct hdmi_avi_infoframe avi; + struct hdmi_spd_infoframe spd; + union hdmi_vendor_any_infoframe vendor; + struct hdmi_audio_infoframe audio; + struct hdmi_drm_infoframe drm; +}; + +enum con_scroll { + SM_UP = 0, + SM_DOWN = 1, +}; + +enum vc_intensity { + VCI_HALF_BRIGHT = 0, + VCI_NORMAL = 1, + VCI_BOLD = 2, + VCI_MASK = 3, +}; + +struct vc_data; + +struct console_font; + +struct consw { + struct module *owner; + const char *(*con_startup)(); + void (*con_init)(struct vc_data *, int); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, int, int, int, int); + void (*con_putc)(struct vc_data *, int, int, int); + void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); + void (*con_cursor)(struct vc_data *, int); + bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, + unsigned int); + int (*con_switch)(struct vc_data *); + int (*con_blank)(struct vc_data *, int, int); + int (*con_font_set)(struct vc_data *, struct console_font *, unsigned int, + unsigned int); + int (*con_font_get)(struct vc_data *, struct console_font *, unsigned int); + int (*con_font_default)(struct vc_data *, struct console_font *, char *); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); + void (*con_set_palette)(struct vc_data *, const unsigned char *); + void (*con_scrolldelta)(struct vc_data *, int); + int (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); + void (*con_invert_region)(struct vc_data *, u16 *, int); + u16 *(*con_screen_pos)(const struct vc_data *, int); + unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); + void (*con_flush_scrollback)(struct vc_data *); + int (*con_debug_enter)(struct vc_data *); + int (*con_debug_leave)(struct vc_data *); +}; + +struct vc_state { + unsigned int x; + unsigned int y; + unsigned char color; + unsigned char Gx_charset[2]; + unsigned int charset : 1; + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; + +struct console_font { + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_mode { + char mode; + char waitv; + short relsig; + short acqsig; + short frsig; +}; + +struct uni_pagedict; + +struct vc_data { + struct tty_port port; + struct vc_state state; + struct vc_state saved_state; + unsigned short vc_num; + unsigned int vc_cols; + unsigned int vc_rows; + unsigned int vc_size_row; + unsigned int vc_scan_lines; + unsigned int vc_cell_height; + unsigned long vc_origin; + unsigned long vc_scr_end; + unsigned long vc_visible_origin; + unsigned int vc_top; + unsigned int vc_bottom; + const struct consw *vc_sw; + unsigned short *vc_screenbuf; + unsigned int vc_screenbuf_size; + unsigned char vc_mode; + unsigned char vc_attr; + unsigned char vc_def_color; + unsigned char vc_ulcolor; + unsigned char vc_itcolor; + unsigned char vc_halfcolor; + unsigned int vc_cursor_type; + unsigned short vc_complement_mask; + unsigned short vc_s_complement_mask; + unsigned long vc_pos; + unsigned short vc_hi_font_mask; + struct console_font vc_font; + unsigned short vc_video_erase_char; + unsigned int vc_state; + unsigned int vc_npar; + unsigned int vc_par[16]; + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + unsigned int vc_disp_ctrl : 1; + unsigned int vc_toggle_meta : 1; + unsigned int vc_decscnm : 1; + unsigned int vc_decom : 1; + unsigned int vc_decawm : 1; + unsigned int vc_deccm : 1; + unsigned int vc_decim : 1; + unsigned int vc_priv : 3; + unsigned int vc_need_wrap : 1; + unsigned int vc_can_do_color : 1; + unsigned int vc_report_mouse : 2; + unsigned char vc_utf : 1; + unsigned char vc_utf_count; + int vc_utf_char; + unsigned long vc_tab_stop[4]; + unsigned char vc_palette[48]; + unsigned short *vc_translate; + unsigned int vc_resize_user; + unsigned int vc_bell_pitch; + unsigned int vc_bell_duration; + unsigned short vc_cur_blink_ms; + struct vc_data **vc_display_fg; + struct uni_pagedict *uni_pagedict; + struct uni_pagedict **uni_pagedict_loc; + u32 **vc_uni_lines; +}; + +struct vgastate { + void *vgabase; + unsigned long membase; + __u32 memsize; + __u32 flags; + __u32 depth; + __u32 num_attr; + __u32 num_crtc; + __u32 num_gfx; + __u32 num_seq; + void *vidstate; +}; + +struct fb_bitfield { + __u32 offset; + __u32 length; + __u32 msb_right; +}; + +struct fb_var_screeninfo { + __u32 xres; + __u32 yres; + __u32 xres_virtual; + __u32 yres_virtual; + __u32 xoffset; + __u32 yoffset; + __u32 bits_per_pixel; + __u32 grayscale; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + __u32 nonstd; + __u32 activate; + __u32 height; + __u32 width; + __u32 accel_flags; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 rotate; + __u32 colorspace; + __u32 reserved[4]; +}; + +struct fb_fix_screeninfo { + char id[16]; + unsigned long smem_start; + __u32 smem_len; + __u32 type; + __u32 type_aux; + __u32 visual; + __u16 xpanstep; + __u16 ypanstep; + __u16 ywrapstep; + __u32 line_length; + unsigned long mmio_start; + __u32 mmio_len; + __u32 accel; + __u16 capabilities; + __u16 reserved[2]; +}; + +struct fb_chroma { + __u32 redx; + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_videomode; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; + __u8 manufacturer[4]; + __u8 monitor[14]; + __u8 serial_no[14]; + __u8 ascii[14]; + __u32 modedb_len; + __u32 model; + __u32 serial; + __u32 year; + __u32 week; + __u32 hfmin; + __u32 hfmax; + __u32 dclkmin; + __u32 dclkmax; + __u16 input; + __u16 dpms; + __u16 signal; + __u16 vfmin; + __u16 vfmax; + __u16 gamma; + __u16 gtf : 1; + __u16 misc; + __u8 version; + __u8 revision; + __u8 max_x; + __u8 max_y; +}; + +struct fb_info; + +struct fb_pixmap { + u8 *addr; + u32 size; + u32 offset; + u32 buf_align; + u32 scan_align; + u32 access_align; + u32 flags; + u32 blit_x; + u32 blit_y; + void (*writeio)(struct fb_info *, void *, void *, unsigned int); + void (*readio)(struct fb_info *, void *, void *, unsigned int); +}; + +struct fb_cmap { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct fb_deferred_io_pageref; + +struct fb_deferred_io; + +struct fb_ops; + +struct fb_info { + refcount_t count; + int node; + int flags; + int fbcon_rotate_hint; + struct mutex lock; + struct mutex mm_lock; + struct fb_var_screeninfo var; + struct fb_fix_screeninfo fix; + struct fb_monspecs monspecs; + struct fb_pixmap pixmap; + struct fb_pixmap sprite; + struct fb_cmap cmap; + struct list_head modelist; + struct fb_videomode *mode; + struct delayed_work deferred_work; + unsigned long npagerefs; + struct fb_deferred_io_pageref *pagerefs; + struct fb_deferred_io *fbdefio; + const struct fb_ops *fbops; + struct device *device; + struct device *dev; + int class_flag; + union { + char *screen_base; + char *screen_buffer; + }; + unsigned long screen_size; + void *pseudo_palette; + u32 state; + void *fbcon_par; + void *par; + bool skip_vt_switch; +}; + +struct fb_videomode { + const char *name; + u32 refresh; + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct fb_deferred_io_pageref { + struct page *page; + unsigned long offset; + struct list_head list; +}; + +struct fb_deferred_io { + unsigned long delay; + bool sort_pagereflist; + int open_count; + struct mutex lock; + struct list_head pagereflist; + void (*deferred_io)(struct fb_info *, struct list_head *); +}; + +struct fb_fillrect; + +struct fb_copyarea; + +struct fb_image; + +struct fb_cursor; + +struct fb_blit_caps; + +struct fb_ops { + struct module *owner; + int (*fb_open)(struct fb_info *, int); + int (*fb_release)(struct fb_info *, int); + ssize_t (*fb_read)(struct fb_info *, char __attribute__((btf_type_tag("user"))) *, + size_t, loff_t *); + ssize_t (*fb_write)(struct fb_info *, + const char __attribute__((btf_type_tag("user"))) *, size_t, + loff_t *); + int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); + int (*fb_set_par)(struct fb_info *); + int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, + unsigned int, struct fb_info *); + int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); + int (*fb_blank)(int, struct fb_info *); + int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); + void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); + void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); + void (*fb_imageblit)(struct fb_info *, const struct fb_image *); + int (*fb_cursor)(struct fb_info *, struct fb_cursor *); + int (*fb_sync)(struct fb_info *); + int (*fb_ioctl)(struct fb_info *, unsigned int, unsigned long); + int (*fb_compat_ioctl)(struct fb_info *, unsigned int, unsigned long); + int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); + void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, + struct fb_var_screeninfo *); + void (*fb_destroy)(struct fb_info *); + int (*fb_debug_enter)(struct fb_info *); + int (*fb_debug_leave)(struct fb_info *); +}; + +struct fb_fillrect { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_image { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 fg_color; + __u32 bg_color; + __u8 depth; + const char *data; + struct fb_cmap cmap; +}; + +struct fbcurpos { + __u16 x; + __u16 y; +}; + +struct fb_cursor { + __u16 set; + __u16 enable; + __u16 rop; + const char *mask; + struct fbcurpos hot; + struct fb_image image; +}; + +struct fb_blit_caps { + u32 x; + u32 y; + u32 len; + u32 flags; +}; + +enum { + FB_BLANK_UNBLANK = 0, + FB_BLANK_NORMAL = 1, + FB_BLANK_VSYNC_SUSPEND = 2, + FB_BLANK_HSYNC_SUSPEND = 3, + FB_BLANK_POWERDOWN = 4, +}; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct fb_cmap_user { + __u32 start; + __u32 len; + __u16 __attribute__((btf_type_tag("user"))) * red; + __u16 __attribute__((btf_type_tag("user"))) * green; + __u16 __attribute__((btf_type_tag("user"))) * blue; + __u16 __attribute__((btf_type_tag("user"))) * transp; +}; + +struct fb_cvt_data { + u32 xres; + u32 yres; + u32 refresh; + u32 f_refresh; + u32 pixclock; + u32 hperiod; + u32 hblank; + u32 hfreq; + u32 htotal; + u32 vtotal; + u32 vsync; + u32 hsync; + u32 h_front_porch; + u32 h_back_porch; + u32 v_front_porch; + u32 v_back_porch; + u32 h_margin; + u32 v_margin; + u32 interlace; + u32 aspect_ratio; + u32 active_pixels; + u32 flags; + u32 status; +}; + +struct fb_cmap32 { + u32 start; + u32 len; + compat_caddr_t red; + compat_caddr_t green; + compat_caddr_t blue; + compat_caddr_t transp; +}; + +struct fb_fix_screeninfo32 { + char id[16]; + compat_caddr_t smem_start; + u32 smem_len; + u32 type; + u32 type_aux; + u32 visual; + u16 xpanstep; + u16 ypanstep; + u16 ywrapstep; + u32 line_length; + compat_caddr_t mmio_start; + u32 mmio_len; + u32 accel; + u16 reserved[3]; +}; + +typedef unsigned short u_short; + +struct fbcon_display { + const u_char *fontdata; + int userfont; + u_short inverse; + short yscroll; + int vrows; + int cursor_shape; + int con_rotate; + u32 xres_virtual; + u32 yres_virtual; + u32 height; + u32 width; + u32 bits_per_pixel; + u32 grayscale; + u32 nonstd; + u32 accel_flags; + u32 rotate; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + const struct fb_videomode *mode; +}; + +enum { + FBCON_LOGO_CANSHOW = -1, + FBCON_LOGO_DRAW = -2, + FBCON_LOGO_DONTSHOW = -3, +}; + +struct fbcon_ops { + void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); + void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); + void (*putcs)(struct vc_data *, struct fb_info *, const unsigned short *, int, + int, int, int, int); + void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); + void (*cursor)(struct vc_data *, struct fb_info *, int, int, int); + int (*update_start)(struct fb_info *); + int (*rotate_font)(struct fb_info *, struct vc_data *); + struct fb_var_screeninfo var; + struct delayed_work cursor_work; + struct fb_cursor cursor_state; + struct fbcon_display *p; + struct fb_info *info; + int currcon; + int cur_blink_jiffies; + int cursor_flash; + int cursor_reset; + int blank_state; + int graphics; + int save_graphics; + bool initialized; + int rotate; + int cur_rotate; + char *cursor_data; + u8 *fontbuffer; + u8 *fontdata; + u8 *cursor_src; + u32 cursor_size; + u32 fd_size; +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +struct vesafb_par { + u32 pseudo_palette[256]; + resource_size_t base; + resource_size_t size; + int wc_cookie; + struct resource *region; +}; + +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP = 1, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP = 2, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP = 3, +}; + +struct efifb_par { + u32 pseudo_palette[16]; + resource_size_t base; + resource_size_t size; +}; + +struct bmp_file_header { + u16 id; + u32 file_size; + u32 reserved; + u32 bitmap_offset; +} __attribute__((packed)); + +struct bmp_dib_header { + u32 dib_header_size; + s32 width; + s32 height; + u16 planes; + u16 bpp; + u32 compression; + u32 bitmap_size; + u32 horz_resolution; + u32 vert_resolution; + u32 colors_used; + u32 colors_important; +}; + +union acpi_name_union { + u32 integer; + char ascii[4]; +}; + +typedef u16 acpi_owner_id; + +struct acpi_table_desc { + acpi_physical_address address; + struct acpi_table_header *pointer; + u32 length; + union acpi_name_union signature; + acpi_owner_id owner_id; + u8 flags; + u16 validation_count; +}; + +enum acpi_cedt_type { + ACPI_CEDT_TYPE_CHBS = 0, + ACPI_CEDT_TYPE_CFMWS = 1, + ACPI_CEDT_TYPE_CXIMS = 2, + ACPI_CEDT_TYPE_RDPAS = 3, + ACPI_CEDT_TYPE_RESERVED = 4, +}; + +struct acpi_madt_io_sapic { + struct acpi_subtable_header header; + u8 id; + u8 reserved; + u32 global_irq_base; + u64 address; +}; + +struct acpi_madt_interrupt_source { + struct acpi_subtable_header header; + u16 inti_flags; + u8 type; + u8 id; + u8 eid; + u8 io_sapic_vector; + u32 global_irq; + u32 flags; +}; + +struct acpi_madt_generic_interrupt { + struct acpi_subtable_header header; + u16 reserved; + u32 cpu_interface_number; + u32 uid; + u32 flags; + u32 parking_version; + u32 performance_interrupt; + u64 parked_address; + u64 base_address; + u64 gicv_base_address; + u64 gich_base_address; + u32 vgic_interrupt; + u64 gicr_base_address; + u64 arm_mpidr; + u8 efficiency_class; + u8 reserved2[1]; + u16 spe_interrupt; + u16 trbe_interrupt; +} __attribute__((packed)); + +struct acpi_madt_generic_distributor { + struct acpi_subtable_header header; + u16 reserved; + u32 gic_id; + u64 base_address; + u32 global_irq_base; + u8 version; + u8 reserved2[3]; +}; + +struct acpi_madt_core_pic { + struct acpi_subtable_header header; + u8 version; + u32 processor_id; + u32 core_id; + u32 flags; +} __attribute__((packed)); + +struct acpi_madt_rintc { + struct acpi_subtable_header header; + u8 version; + u8 reserved; + u32 flags; + u64 hart_id; + u32 uid; + u32 ext_intc_id; + u64 imsic_addr; + u32 imsic_size; +} __attribute__((packed)); + +enum acpi_predicate { + all_versions = 0, + less_than_or_equal = 1, + equal = 2, + greater_than_or_equal = 3, +}; + +struct acpi_platform_list { + char oem_id[7]; + char oem_table_id[9]; + u32 oem_revision; + char *table; + enum acpi_predicate pred; + char *reason; + u32 data; +}; + +struct acpi_osi_config { + u8 default_disabling; + unsigned int linux_enable : 1; + unsigned int linux_dmi : 1; + unsigned int linux_cmdline : 1; + unsigned int darwin_enable : 1; + unsigned int darwin_dmi : 1; + unsigned int darwin_cmdline : 1; +}; + +struct acpi_osi_entry { + char string[64]; + bool enable; +}; + +typedef u32 (*acpi_interface_handler)(acpi_string, u32); + +typedef u32 (*acpi_osd_handler)(void *); + +struct acpi_ioremap { + struct list_head list; + void *virt; + acpi_physical_address phys; + acpi_size size; + union { + unsigned long refcount; + struct rcu_work rwork; + } track; +}; + +typedef void (*acpi_osd_exec_callback)(void *); + +struct acpi_os_dpc { + acpi_osd_exec_callback function; + void *context; + struct work_struct work; +}; + +struct acpi_hp_work { + struct work_struct work; + struct acpi_device *adev; + u32 src; +}; + +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_predefined_names { + const char *name; + u8 type; + char *val; +}; + +struct acpi_pci_id { + u16 segment; + u16 bus; + u16 device; + u16 function; +}; + +typedef enum { + OSL_GLOBAL_LOCK_HANDLER = 0, + OSL_NOTIFY_HANDLER = 1, + OSL_GPE_HANDLER = 2, + OSL_DEBUGGER_MAIN_THREAD = 3, + OSL_DEBUGGER_EXEC_THREAD = 4, + OSL_EC_POLL_HANDLER = 5, + OSL_EC_BURST_HANDLER = 6, +} acpi_execute_type; + +struct acpi_device_bus_id { + const char *bus_id; + struct ida instance_ida; + struct list_head node; +}; + +struct acpi_pld_info { + u8 revision; + u8 ignore_color; + u8 red; + u8 green; + u8 blue; + u16 width; + u16 height; + u8 user_visible; + u8 dock; + u8 lid; + u8 panel; + u8 vertical_position; + u8 horizontal_position; + u8 shape; + u8 group_orientation; + u8 group_token; + u8 group_position; + u8 bay; + u8 ejectable; + u8 ospm_eject_required; + u8 cabinet_number; + u8 card_cage_number; + u8 reference; + u8 rotation; + u8 order; + u8 reserved; + u16 vertical_offset; + u16 horizontal_offset; +}; + +struct acpi_handle_list { + u32 count; + acpi_handle *handles; +}; + +struct acpi_dev_match_info { + struct acpi_device_id hid[2]; + const char *uid; + s64 hrv; +}; + +struct nvs_region { + __u64 phys_start; + __u64 size; + struct list_head node; +}; + +struct nvs_page { + unsigned long phys_start; + unsigned int size; + void *kaddr; + void *data; + bool unmap; + struct list_head node; +}; + +struct acpi_wakeup_handler { + struct list_head list_node; + bool (*wakeup)(void *); + void *context; +}; + +struct acpi_table_facs { + char signature[4]; + u32 length; + u32 hardware_signature; + u32 firmware_waking_vector; + u32 global_lock; + u32 flags; + u64 xfirmware_waking_vector; + u8 version; + u8 reserved[3]; + u32 ospm_flags; + u8 reserved1[24]; +}; + +typedef u32 acpi_event_status; + +struct acpi_gpe_xrupt_info; + +struct acpi_gpe_block_info; + +typedef acpi_status (*acpi_gpe_callback)(struct acpi_gpe_xrupt_info *, + struct acpi_gpe_block_info *, void *); + +struct acpi_gpe_xrupt_info { + struct acpi_gpe_xrupt_info *previous; + struct acpi_gpe_xrupt_info *next; + struct acpi_gpe_block_info *gpe_block_list_head; + u32 interrupt_number; +}; + +struct acpi_namespace_node; + +struct acpi_gpe_register_info; + +struct acpi_gpe_event_info; + +struct acpi_gpe_block_info { + struct acpi_namespace_node *node; + struct acpi_gpe_block_info *previous; + struct acpi_gpe_block_info *next; + struct acpi_gpe_xrupt_info *xrupt_block; + struct acpi_gpe_register_info *register_info; + struct acpi_gpe_event_info *event_info; + u64 address; + u32 register_count; + u16 gpe_count; + u16 block_base_number; + u8 space_id; + u8 initialized; +}; + +union acpi_operand_object; + +struct acpi_namespace_node { + union acpi_operand_object *object; + u8 descriptor_type; + u8 type; + u16 flags; + union acpi_name_union name; + struct acpi_namespace_node *parent; + struct acpi_namespace_node *child; + struct acpi_namespace_node *peer; + acpi_owner_id owner_id; +}; + +struct acpi_object_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; +}; + +struct acpi_object_integer { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 fill[3]; + u64 value; +}; + +struct acpi_object_string { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + char *pointer; + u32 length; +}; + +struct acpi_object_buffer { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 *pointer; + u32 length; + u32 aml_length; + u8 *aml_start; + struct acpi_namespace_node *node; +}; + +struct acpi_object_package { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *node; + union acpi_operand_object **elements; + u8 *aml_start; + u32 aml_length; + u32 count; +}; + +struct acpi_object_event { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + void *os_semaphore; +}; + +struct acpi_walk_state; + +typedef acpi_status (*acpi_internal_method)(struct acpi_walk_state *); + +struct acpi_object_method { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 info_flags; + u8 param_count; + u8 sync_level; + union acpi_operand_object *mutex; + union acpi_operand_object *node; + u8 *aml_start; + union { + acpi_internal_method implementation; + union acpi_operand_object *handler; + } dispatch; + u32 aml_length; + acpi_owner_id owner_id; + u8 thread_count; +}; + +struct acpi_thread_state; + +struct acpi_object_mutex { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 sync_level; + u16 acquisition_depth; + void *os_mutex; + u64 thread_id; + struct acpi_thread_state *owner_thread; + union acpi_operand_object *prev; + union acpi_operand_object *next; + struct acpi_namespace_node *node; + u8 original_sync_level; +}; + +struct acpi_object_region { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 space_id; + struct acpi_namespace_node *node; + union acpi_operand_object *handler; + union acpi_operand_object *next; + acpi_physical_address address; + u32 length; + void *pointer; +}; + +struct acpi_object_notify_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; +}; + +struct acpi_object_device { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + struct acpi_gpe_block_info *gpe_block; +}; + +struct acpi_object_power_resource { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + u32 system_level; + u32 resource_order; +}; + +struct acpi_object_processor { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 proc_id; + u8 length; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + acpi_io_address address; +}; + +struct acpi_object_thermal_zone { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; +}; + +struct acpi_object_field_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *region_obj; +}; + +struct acpi_object_region_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + u16 resource_length; + union acpi_operand_object *region_obj; + u8 *resource_buffer; + u16 pin_number_index; + u8 *internal_pcc_buffer; +}; + +struct acpi_object_buffer_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + u8 is_create_field; + union acpi_operand_object *buffer_obj; +}; + +struct acpi_object_bank_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *region_obj; + union acpi_operand_object *bank_obj; +}; + +struct acpi_object_index_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *index_obj; + union acpi_operand_object *data_obj; +}; + +typedef void (*acpi_notify_handler)(acpi_handle, u32, void *); + +struct acpi_object_notify_handler { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *node; + u32 handler_type; + acpi_notify_handler handler; + void *context; + union acpi_operand_object *next[2]; +}; + +typedef acpi_status (*acpi_adr_space_handler)(u32, acpi_physical_address, u32, u64 *, + void *, void *); + +typedef acpi_status (*acpi_adr_space_setup)(acpi_handle, u32, void *, void **); + +struct acpi_object_addr_handler { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 space_id; + u8 handler_flags; + acpi_adr_space_handler handler; + struct acpi_namespace_node *node; + void *context; + void *context_mutex; + acpi_adr_space_setup setup; + union acpi_operand_object *region_list; + union acpi_operand_object *next; +}; + +struct acpi_object_reference { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 class; + u8 target_type; + u8 resolved; + void *object; + struct acpi_namespace_node *node; + union acpi_operand_object **where; + u8 *index_pointer; + u8 *aml; + u32 value; +}; + +struct acpi_object_extra { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *method_REG; + struct acpi_namespace_node *scope_node; + void *region_context; + u8 *aml_start; + u32 aml_length; +}; + +typedef void (*acpi_object_handler)(acpi_handle, void *); + +struct acpi_object_data { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + acpi_object_handler handler; + void *pointer; +}; + +struct acpi_object_cache_list { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *next; +}; + +union acpi_operand_object { + struct acpi_object_common common; + struct acpi_object_integer integer; + struct acpi_object_string string; + struct acpi_object_buffer buffer; + struct acpi_object_package package; + struct acpi_object_event event; + struct acpi_object_method method; + struct acpi_object_mutex mutex; + struct acpi_object_region region; + struct acpi_object_notify_common common_notify; + struct acpi_object_device device; + struct acpi_object_power_resource power_resource; + struct acpi_object_processor processor; + struct acpi_object_thermal_zone thermal_zone; + struct acpi_object_field_common common_field; + struct acpi_object_region_field field; + struct acpi_object_buffer_field buffer_field; + struct acpi_object_bank_field bank_field; + struct acpi_object_index_field index_field; + struct acpi_object_notify_handler notify; + struct acpi_object_addr_handler address_space; + struct acpi_object_reference reference; + struct acpi_object_extra extra; + struct acpi_object_data data; + struct acpi_object_cache_list cache; + struct acpi_namespace_node node; +}; + +union acpi_parse_object; + +union acpi_generic_state; + +struct acpi_parse_state { + u8 *aml_start; + u8 *aml; + u8 *aml_end; + u8 *pkg_start; + u8 *pkg_end; + union acpi_parse_object *start_op; + struct acpi_namespace_node *start_node; + union acpi_generic_state *scope; + union acpi_parse_object *start_scope; + u32 aml_size; +}; + +typedef acpi_status (*acpi_parse_downwards)(struct acpi_walk_state *, + union acpi_parse_object **); + +typedef acpi_status (*acpi_parse_upwards)(struct acpi_walk_state *); + +struct acpi_opcode_info; + +struct acpi_walk_state { + struct acpi_walk_state *next; + u8 descriptor_type; + u8 walk_type; + u16 opcode; + u8 next_op_info; + u8 num_operands; + u8 operand_index; + acpi_owner_id owner_id; + u8 last_predicate; + u8 current_result; + u8 return_used; + u8 scope_depth; + u8 pass_number; + u8 namespace_override; + u8 result_size; + u8 result_count; + u8 *aml; + u32 arg_types; + u32 method_breakpoint; + u32 user_breakpoint; + u32 parse_flags; + struct acpi_parse_state parser_state; + u32 prev_arg_types; + u32 arg_count; + u16 method_nesting_depth; + u8 method_is_nested; + struct acpi_namespace_node arguments[7]; + struct acpi_namespace_node local_variables[8]; + union acpi_operand_object *operands[9]; + union acpi_operand_object **params; + u8 *aml_last_while; + union acpi_operand_object **caller_return_desc; + union acpi_generic_state *control_state; + struct acpi_namespace_node *deferred_node; + union acpi_operand_object *implicit_return_obj; + struct acpi_namespace_node *method_call_node; + union acpi_parse_object *method_call_op; + union acpi_operand_object *method_desc; + struct acpi_namespace_node *method_node; + char *method_pathname; + union acpi_parse_object *op; + const struct acpi_opcode_info *op_info; + union acpi_parse_object *origin; + union acpi_operand_object *result_obj; + union acpi_generic_state *results; + union acpi_operand_object *return_desc; + union acpi_generic_state *scope_info; + union acpi_parse_object *prev_op; + union acpi_parse_object *next_op; + struct acpi_thread_state *thread; + acpi_parse_downwards descending_callback; + acpi_parse_upwards ascending_callback; +}; + +union acpi_parse_value { + u64 integer; + u32 size; + char *string; + u8 *buffer; + char *name; + union acpi_parse_object *arg; +}; + +struct acpi_parse_obj_common { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; +}; + +struct acpi_parse_obj_named { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; + char *path; + u8 *data; + u32 length; + u32 name; +}; + +struct acpi_parse_obj_asl { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; + union acpi_parse_object *child; + union acpi_parse_object *parent_method; + char *filename; + u8 file_changed; + char *parent_filename; + char *external_name; + char *namepath; + char name_seg[4]; + u32 extra_value; + u32 column; + u32 line_number; + u32 logical_line_number; + u32 logical_byte_offset; + u32 end_line; + u32 end_logical_line; + u32 acpi_btype; + u32 aml_length; + u32 aml_subtree_length; + u32 final_aml_length; + u32 final_aml_offset; + u32 compile_flags; + u16 parse_opcode; + u8 aml_opcode_length; + u8 aml_pkg_len_bytes; + u8 extra; + char parse_op_name[20]; +}; + +union acpi_parse_object { + struct acpi_parse_obj_common common; + struct acpi_parse_obj_named named; + struct acpi_parse_obj_asl asl; +}; + +struct acpi_common_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; +}; + +struct acpi_control_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u16 opcode; + union acpi_parse_object *predicate_op; + u8 *aml_predicate_start; + u8 *package_end; + u64 loop_timeout; +}; + +struct acpi_update_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + union acpi_operand_object *object; +}; + +struct acpi_scope_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + struct acpi_namespace_node *node; +}; + +struct acpi_pscope_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u32 arg_count; + union acpi_parse_object *op; + u8 *arg_end; + u8 *pkg_end; + u32 arg_list; +}; + +struct acpi_pkg_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u32 index; + union acpi_operand_object *source_object; + union acpi_operand_object *dest_object; + struct acpi_walk_state *walk_state; + void *this_target_obj; + u32 num_packages; +}; + +struct acpi_thread_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u8 current_sync_level; + struct acpi_walk_state *walk_state_list; + union acpi_operand_object *acquired_mutex_list; + u64 thread_id; +}; + +struct acpi_result_values { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + union acpi_operand_object *obj_desc[8]; +}; + +struct acpi_global_notify_handler; + +struct acpi_notify_info { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u8 handler_list_id; + struct acpi_namespace_node *node; + union acpi_operand_object *handler_list_head; + struct acpi_global_notify_handler *global; +}; + +union acpi_generic_state { + struct acpi_common_state common; + struct acpi_control_state control; + struct acpi_update_state update; + struct acpi_scope_state scope; + struct acpi_pscope_state parse_scope; + struct acpi_pkg_state pkg; + struct acpi_thread_state thread; + struct acpi_result_values results; + struct acpi_notify_info notify; +}; + +struct acpi_global_notify_handler { + acpi_notify_handler handler; + void *context; +}; + +struct acpi_opcode_info { + u32 parse_args; + u32 runtime_args; + u16 flags; + u8 object_type; + u8 class; + u8 type; +}; + +struct acpi_gpe_address { + u8 space_id; + u64 address; +}; + +struct acpi_gpe_register_info { + struct acpi_gpe_address status_address; + struct acpi_gpe_address enable_address; + u16 base_gpe_number; + u8 enable_for_wake; + u8 enable_for_run; + u8 mask_for_run; + u8 enable_mask; +}; + +struct acpi_gpe_handler_info; + +struct acpi_gpe_notify_info; + +union acpi_gpe_dispatch_info { + struct acpi_namespace_node *method_node; + struct acpi_gpe_handler_info *handler; + struct acpi_gpe_notify_info *notify_list; +}; + +struct acpi_gpe_event_info { + union acpi_gpe_dispatch_info dispatch; + struct acpi_gpe_register_info *register_info; + u8 flags; + u8 gpe_number; + u8 runtime_count; + u8 disable_for_dispatch; +}; + +typedef u32 (*acpi_gpe_handler)(acpi_handle, u32, void *); + +struct acpi_gpe_handler_info { + acpi_gpe_handler address; + void *context; + struct acpi_namespace_node *method_node; + u8 original_flags; + u8 originally_enabled; +}; + +struct acpi_gpe_notify_info { + struct acpi_namespace_node *device_node; + struct acpi_gpe_notify_info *next; +}; + +struct acpi_data_node; + +struct acpi_data_node_attr { + struct attribute attr; + ssize_t (*show)(struct acpi_data_node *, char *); + ssize_t (*store)(struct acpi_data_node *, const char *, size_t); +}; + +struct acpi_data_node { + const char *name; + acpi_handle handle; + struct fwnode_handle fwnode; + struct fwnode_handle *parent; + struct acpi_device_data data; + struct list_head sibling; + struct kobject kobj; + struct completion kobj_done; +}; + +struct acpi_hardware_id { + struct list_head list; + const char *id; +}; + +struct acpi_device_physical_node { + unsigned int node_id; + struct list_head node; + struct device *dev; + bool put_online : 1; +}; + +typedef int (*acpi_op_add)(struct acpi_device *); + +typedef void (*acpi_op_remove)(struct acpi_device *); + +typedef void (*acpi_op_notify)(struct acpi_device *, u32); + +struct acpi_device_ops { + acpi_op_add add; + acpi_op_remove remove; + acpi_op_notify notify; +}; + +struct acpi_driver { + char name[80]; + char class[80]; + const struct acpi_device_id *ids; + unsigned int flags; + struct acpi_device_ops ops; + struct device_driver drv; + struct module *owner; +}; + +struct acpi_osc_context { + char *uuid_str; + int rev; + struct acpi_buffer cap; + struct acpi_buffer ret; +}; + +typedef acpi_status (*acpi_table_handler)(u32, void *, void *); + +struct acpi_dev_walk_context { + int (*fn)(struct acpi_device *, void *); + void *data; +}; + +struct acpi_bus_type { + struct list_head list; + const char *name; + bool (*match)(struct device *); + struct acpi_device *(*find_companion)(struct device *); + void (*setup)(struct device *); +}; + +struct find_child_walk_data { + struct acpi_device *adev; + u64 address; + int score; + bool check_sta; + bool check_children; +}; + +struct acpi_probe_entry; + +typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, + struct acpi_probe_entry *); + +struct acpi_probe_entry { + __u8 id[5]; + __u8 type; + acpi_probe_entry_validate_subtbl subtable_valid; + union { + acpi_tbl_table_handler probe_table; + acpi_tbl_entry_handler probe_subtbl; + }; + kernel_ulong_t driver_data; +}; + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE = 1, +}; + +enum acpi_bus_device_type { + ACPI_BUS_TYPE_DEVICE = 0, + ACPI_BUS_TYPE_POWER = 1, + ACPI_BUS_TYPE_PROCESSOR = 2, + ACPI_BUS_TYPE_THERMAL = 3, + ACPI_BUS_TYPE_POWER_BUTTON = 4, + ACPI_BUS_TYPE_SLEEP_BUTTON = 5, + ACPI_BUS_TYPE_ECDT_EC = 6, + ACPI_BUS_DEVICE_TYPE_COUNT = 7, +}; + +struct acpi_dep_data { + struct list_head node; + acpi_handle supplier; + acpi_handle consumer; + bool honor_dep; + bool met; + bool free_when_met; +}; + +struct acpi_scan_clear_dep_work { + struct work_struct work; + struct acpi_device *adev; +}; + +typedef acpi_status (*acpi_walk_callback)(acpi_handle, u32, void *, void **); + +struct acpi_pnp_device_id { + u32 length; + char *string; +}; + +struct acpi_pnp_device_id_list { + u32 count; + u32 list_size; + struct acpi_pnp_device_id ids[0]; +}; + +struct acpi_device_info { + u32 info_size; + u32 name; + acpi_object_type type; + u8 param_count; + u16 valid; + u8 flags; + u8 highest_dstates[4]; + u8 lowest_dstates[5]; + u64 address; + struct acpi_pnp_device_id hardware_id; + struct acpi_pnp_device_id unique_id; + struct acpi_pnp_device_id class_code; + struct acpi_pnp_device_id_list compatible_id_list; +}; + +struct acpi_resource_irq { + u8 descriptor_length; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + union { + u8 interrupt; + struct { + struct { + } __Empty_interrupts; + u8 interrupts[0]; + }; + }; +}; + +struct acpi_resource_dma { + u8 type; + u8 bus_master; + u8 transfer; + u8 channel_count; + union { + u8 channel; + struct { + struct { + } __Empty_channels; + u8 channels[0]; + }; + }; +}; + +struct acpi_resource_start_dependent { + u8 descriptor_length; + u8 compatibility_priority; + u8 performance_robustness; +}; + +struct acpi_resource_io { + u8 io_decode; + u8 alignment; + u8 address_length; + u16 minimum; + u16 maximum; +} __attribute__((packed)); + +struct acpi_resource_fixed_io { + u16 address; + u8 address_length; +} __attribute__((packed)); + +struct acpi_resource_fixed_dma { + u16 request_lines; + u16 channels; + u8 width; +} __attribute__((packed)); + +struct acpi_resource_vendor { + u16 byte_length; + u8 byte_data[0]; +}; + +struct acpi_resource_vendor_typed { + u16 byte_length; + u8 uuid_subtype; + u8 uuid[16]; + u8 byte_data[0]; +} __attribute__((packed)); + +struct acpi_resource_end_tag { + u8 checksum; +}; + +struct acpi_resource_memory24 { + u8 write_protect; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +} __attribute__((packed)); + +struct acpi_resource_memory32 { + u8 write_protect; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +} __attribute__((packed)); + +struct acpi_resource_fixed_memory32 { + u8 write_protect; + u32 address; + u32 address_length; +} __attribute__((packed)); + +struct acpi_memory_attribute { + u8 write_protect; + u8 caching; + u8 range_type; + u8 translation; +}; + +struct acpi_io_attribute { + u8 range_type; + u8 translation; + u8 translation_type; + u8 reserved1; +}; + +union acpi_resource_attribute { + struct acpi_memory_attribute mem; + struct acpi_io_attribute io; + u8 type_specific; +}; + +struct acpi_address16_attribute { + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +}; + +struct acpi_resource_source { + u8 index; + u16 string_length; + char *string_ptr; +} __attribute__((packed)); + +struct acpi_resource_address16 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address16_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_address32_attribute { + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +}; + +struct acpi_resource_address32 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address32_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_address64_attribute { + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +}; + +struct acpi_resource_address64 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address64_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_extended_address64 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + u8 revision_ID; + struct acpi_address64_attribute address; + u64 type_specific; +} __attribute__((packed)); + +struct acpi_resource_extended_irq { + u8 producer_consumer; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + struct acpi_resource_source resource_source; + union { + u32 interrupt; + struct { + struct { + } __Empty_interrupts; + u32 interrupts[0]; + }; + }; +} __attribute__((packed)); + +struct acpi_resource_generic_register { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct acpi_resource_gpio { + u8 revision_id; + u8 connection_type; + u8 producer_consumer; + u8 pin_config; + u8 shareable; + u8 wake_capable; + u8 io_restriction; + u8 triggering; + u8 polarity; + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_i2c_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 access_mode; + u16 slave_address; + u32 connection_speed; +} __attribute__((packed)); + +struct acpi_resource_spi_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 wire_mode; + u8 device_polarity; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; + u32 connection_speed; +} __attribute__((packed)); + +struct acpi_resource_uart_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 endian; + u8 data_bits; + u8 stop_bits; + u8 flow_control; + u8 parity; + u8 lines_enabled; + u16 rx_fifo_size; + u16 tx_fifo_size; + u32 default_baud_rate; +} __attribute__((packed)); + +struct acpi_resource_csi2_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 local_port_instance; + u8 phy_type; +} __attribute__((packed)); + +struct acpi_resource_common_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_function { + u8 revision_id; + u8 pin_config; + u8 shareable; + u16 function_number; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_config { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_label { + u16 string_length; + char *string_ptr; +} __attribute__((packed)); + +struct acpi_resource_pin_group { + u8 revision_id; + u8 producer_consumer; + u16 pin_table_length; + u16 vendor_length; + u16 *pin_table; + struct acpi_resource_label resource_label; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group_function { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u16 function_number; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group_config { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u8 pin_config_type; + u32 pin_config_value; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_clock_input { + u8 revision_id; + u8 mode; + u8 scale; + u16 frequency_divisor; + u32 frequency_numerator; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_address { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; +}; + +union acpi_resource_data { + struct acpi_resource_irq irq; + struct acpi_resource_dma dma; + struct acpi_resource_start_dependent start_dpf; + struct acpi_resource_io io; + struct acpi_resource_fixed_io fixed_io; + struct acpi_resource_fixed_dma fixed_dma; + struct acpi_resource_vendor vendor; + struct acpi_resource_vendor_typed vendor_typed; + struct acpi_resource_end_tag end_tag; + struct acpi_resource_memory24 memory24; + struct acpi_resource_memory32 memory32; + struct acpi_resource_fixed_memory32 fixed_memory32; + struct acpi_resource_address16 address16; + struct acpi_resource_address32 address32; + struct acpi_resource_address64 address64; + struct acpi_resource_extended_address64 ext_address64; + struct acpi_resource_extended_irq extended_irq; + struct acpi_resource_generic_register generic_reg; + struct acpi_resource_gpio gpio; + struct acpi_resource_i2c_serialbus i2c_serial_bus; + struct acpi_resource_spi_serialbus spi_serial_bus; + struct acpi_resource_uart_serialbus uart_serial_bus; + struct acpi_resource_csi2_serialbus csi2_serial_bus; + struct acpi_resource_common_serialbus common_serial_bus; + struct acpi_resource_pin_function pin_function; + struct acpi_resource_pin_config pin_config; + struct acpi_resource_pin_group pin_group; + struct acpi_resource_pin_group_function pin_group_function; + struct acpi_resource_pin_group_config pin_group_config; + struct acpi_resource_clock_input clock_input; + struct acpi_resource_address address; +}; + +struct acpi_resource { + u32 type; + u32 length; + union acpi_resource_data data; +}; + +typedef acpi_status (*acpi_walk_resource_callback)(struct acpi_resource *, void *); + +struct acpi_table_stao { + struct acpi_table_header header; + u8 ignore_uart; +} __attribute__((packed)); + +struct acpi_table_spcr { + struct acpi_table_header header; + u8 interface_type; + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; + u32 reserved2; +} __attribute__((packed)); + +struct irq_override_cmp { + const struct dmi_system_id *system; + unsigned char irq; + unsigned char triggering; + unsigned char polarity; + unsigned char shareable; + bool override; +}; + +struct res_proc_context { + struct list_head *list; + int (*preproc)(struct acpi_resource *, void *); + void *preproc_data; + int count; + int error; +}; + +struct resource_win { + struct resource res; + resource_size_t offset; +}; + +struct acpi_lpi_state { + u32 min_residency; + u32 wake_latency; + u32 flags; + u32 arch_flags; + u32 res_cnt_freq; + u32 enable_parent_state; + u64 address; + u8 index; + u8 entry_method; + char desc[32]; +}; + +struct acpi_processor_power { + int count; + union { + struct acpi_processor_cx states[8]; + struct acpi_lpi_state lpi_states[8]; + }; + int timer_broadcast_on_state; +}; + +struct acpi_pct_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __attribute__((packed)); + +struct acpi_tsd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +}; + +struct acpi_processor_tx { + u16 power; + u16 performance; +}; + +struct acpi_processor_tx_tss; + +struct acpi_processor; + +struct acpi_processor_throttling { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + unsigned int state_count; + struct acpi_processor_tx_tss *states_tss; + struct acpi_tsd_package domain_info; + cpumask_var_t shared_cpu_map; + int (*acpi_processor_get_throttling)(struct acpi_processor *); + int (*acpi_processor_set_throttling)(struct acpi_processor *, int, bool); + u32 address; + u8 duty_offset; + u8 duty_width; + u8 tsd_valid_flag; + unsigned int shared_type; + struct acpi_processor_tx states[16]; +}; + +struct acpi_processor_lx { + int px; + int tx; +}; + +struct acpi_processor_limit { + struct acpi_processor_lx state; + struct acpi_processor_lx thermal; + struct acpi_processor_lx user; +}; + +struct acpi_processor_performance; + +struct acpi_processor { + acpi_handle handle; + u32 acpi_id; + phys_cpuid_t phys_id; + u32 id; + u32 pblk; + int performance_platform_limit; + int throttling_platform_limit; + struct acpi_processor_flags flags; + struct acpi_processor_power power; + struct acpi_processor_performance *performance; + struct acpi_processor_throttling throttling; + struct acpi_processor_limit limit; + struct thermal_cooling_device *cdev; + struct device *dev; + struct freq_qos_request perflib_req; + struct freq_qos_request thermal_req; +}; + +struct acpi_psd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +}; + +struct acpi_processor_px; + +struct acpi_processor_performance { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + unsigned int state_count; + struct acpi_processor_px *states; + struct acpi_psd_package domain_info; + cpumask_var_t shared_cpu_map; + unsigned int shared_type; +}; + +struct acpi_processor_px { + u64 core_frequency; + u64 power; + u64 transition_latency; + u64 bus_master_latency; + u64 control; + u64 status; +}; + +struct acpi_processor_tx_tss { + u64 freqpercentage; + u64 power; + u64 transition_latency; + u64 control; + u64 status; +}; + +struct acpi_processor_errata { + u8 smp; + struct { + u8 throttle : 1; + u8 fdma : 1; + u8 reserved : 6; + u32 bmisx; + } piix4; +}; + +enum acpi_ec_event_state { + EC_EVENT_READY = 0, + EC_EVENT_IN_PROGRESS = 1, + EC_EVENT_COMPLETE = 2, +}; + +struct transaction; + +struct acpi_ec { + acpi_handle handle; + acpi_handle address_space_handler_holder; + int gpe; + int irq; + unsigned long command_addr; + unsigned long data_addr; + bool global_lock; + unsigned long flags; + unsigned long reference_count; + struct mutex mutex; + wait_queue_head_t wait; + struct list_head list; + struct transaction *curr; + spinlock_t lock; + struct work_struct work; + unsigned long timestamp; + enum acpi_ec_event_state event_state; + unsigned int events_to_process; + unsigned int events_in_progress; + unsigned int queries_in_progress; + bool busy_polling; + unsigned int polling_guard; +}; + +struct transaction { + const u8 *wdata; + u8 *rdata; + unsigned short irq_count; + u8 command; + u8 wi; + u8 ri; + u8 wlen; + u8 rlen; + u8 flags; +}; + +enum ec_command { + ACPI_EC_COMMAND_READ = 128, + ACPI_EC_COMMAND_WRITE = 129, + ACPI_EC_BURST_ENABLE = 130, + ACPI_EC_BURST_DISABLE = 131, + ACPI_EC_COMMAND_QUERY = 132, +}; + +enum { + EC_FLAGS_QUERY_ENABLED = 0, + EC_FLAGS_EVENT_HANDLER_INSTALLED = 1, + EC_FLAGS_EC_HANDLER_INSTALLED = 2, + EC_FLAGS_EC_REG_CALLED = 3, + EC_FLAGS_QUERY_METHODS_INSTALLED = 4, + EC_FLAGS_STARTED = 5, + EC_FLAGS_STOPPED = 6, + EC_FLAGS_EVENTS_MASKED = 7, +}; + +typedef int (*acpi_ec_query_func)(void *); + +struct acpi_ec_query_handler { + struct list_head node; + acpi_ec_query_func func; + acpi_handle handle; + void *data; + u8 query_bit; + struct kref kref; +}; + +struct acpi_ec_query { + struct transaction transaction; + struct work_struct work; + struct acpi_ec_query_handler *handler; + struct acpi_ec *ec; +}; + +struct acpi_table_ecdt { + struct acpi_table_header header; + struct acpi_generic_address control; + struct acpi_generic_address data; + u32 uid; + u8 gpe; + u8 id[0]; +} __attribute__((packed)); + +enum dock_callback_type { + DOCK_CALL_HANDLER = 0, + DOCK_CALL_FIXUP = 1, + DOCK_CALL_UEVENT = 2, +}; + +struct dock_station { + acpi_handle handle; + unsigned long last_dock_time; + u32 flags; + struct list_head dependent_devices; + struct list_head sibling; + struct platform_device *dock_device; +}; + +struct dock_dependent_device { + struct list_head list; + struct acpi_device *adev; +}; + +struct pci_osc_bit_struct { + u32 bit; + char *desc; +}; + +enum acpi_bridge_type { + ACPI_BRIDGE_TYPE_PCIE = 1, + ACPI_BRIDGE_TYPE_CXL = 2, +}; + +struct acpi_pci_root_ops; + +struct acpi_pci_root_info { + struct acpi_pci_root *root; + struct acpi_device *bridge; + struct acpi_pci_root_ops *ops; + struct list_head resources; + char name[16]; +}; + +struct acpi_pci_root_ops { + struct pci_ops *pci_ops; + int (*init_info)(struct acpi_pci_root_info *); + void (*release_info)(struct acpi_pci_root_info *); + int (*prepare_resources)(struct acpi_pci_root_info *); +}; + +struct acpi_pci_link_irq { + u32 active; + u8 triggering; + u8 polarity; + u8 resource_type; + u8 possible_count; + u32 possible[16]; + u8 initialized : 1; + u8 reserved : 7; +}; + +struct acpi_pci_link { + struct list_head list; + struct acpi_device *device; + struct acpi_pci_link_irq irq; + int refcnt; +}; + +struct prt_quirk { + const struct dmi_system_id *system; + unsigned int segment; + unsigned int bus; + unsigned int device; + unsigned char pin; + const char *source; + const char *actual_source; +}; + +struct acpi_pci_routing_table { + u32 length; + u32 pin; + u64 address; + u32 source_index; + union { + char pad[4]; + struct { + struct { + } __Empty_source; + char source[0]; + }; + }; +}; + +struct acpi_prt_entry { + struct acpi_pci_id id; + u8 pin; + acpi_handle link; + u32 index; +}; + +struct apd_private_data; + +struct apd_device_desc { + unsigned int fixed_clk_rate; + struct property_entry *properties; + int (*setup)(struct apd_private_data *); +}; + +struct apd_private_data { + struct clk *clk; + struct acpi_device *adev; + const struct apd_device_desc *dev_desc; +}; + +struct acpi_power_resource; + +struct acpi_power_resource_entry { + struct list_head node; + struct acpi_power_resource *resource; +}; + +struct acpi_power_resource { + struct acpi_device device; + struct list_head list_node; + u32 system_level; + u32 order; + unsigned int ref_count; + u8 state; + struct mutex resource_lock; + struct list_head dependents; +}; + +struct acpi_power_dependent_device { + struct device *dev; + struct list_head node; +}; + +enum { + ACPI_GENL_CMD_UNSPEC = 0, + ACPI_GENL_CMD_EVENT = 1, + __ACPI_GENL_CMD_MAX = 2, +}; + +enum { + ACPI_GENL_ATTR_UNSPEC = 0, + ACPI_GENL_ATTR_EVENT = 1, + __ACPI_GENL_ATTR_MAX = 2, +}; + +struct acpi_bus_event { + struct list_head node; + acpi_device_class device_class; + acpi_bus_id bus_id; + u32 type; + u32 data; +}; + +struct acpi_genl_event { + acpi_device_class device_class; + char bus_id[15]; + u32 type; + u32 data; +}; + +struct acpi_ged_event { + struct list_head node; + struct device *dev; + unsigned int gsi; + unsigned int irq; + acpi_handle handle; +}; + +struct acpi_ged_device { + struct device *dev; + struct list_head event_list; +}; + +struct event_counter { + u32 count; + u32 flags; +}; + +struct acpi_data_attr; + +struct acpi_data_obj { + char *name; + int (*fn)(void *, struct acpi_data_attr *); +}; + +struct acpi_data_attr { + struct bin_attribute attr; + u64 addr; +}; + +struct acpi_table_attr { + struct bin_attribute attr; + char name[4]; + int instance; + char filename[8]; + struct list_head node; +}; + +typedef void (*acpi_gbl_event_handler)(u32, acpi_handle, u32, void *); + +struct acpi_table_bert { + struct acpi_table_header header; + u32 region_length; + u64 address; +}; + +struct acpi_table_ccel { + struct acpi_table_header header; + u8 CCtype; + u8 Ccsub_type; + u16 reserved; + u64 log_area_minimum_length; + u64 log_area_start_address; +}; + +struct acpi_device_properties { + const guid_t *guid; + union acpi_object *properties; + struct list_head list; + void **bufs; +}; + +struct override_status_id { + struct acpi_device_id hid[2]; + struct x86_cpu_id cpu_ids[2]; + struct dmi_system_id dmi_ids[2]; + const char *uid; + const char *path; + unsigned long long status; +}; + +struct lpi_constraints { + acpi_handle handle; + int min_dstate; +}; + +struct amd_lps0_hid_device_data { + const bool check_off_by_one; +}; + +struct acpi_s2idle_dev_ops { + struct list_head list_node; + void (*prepare)(); + void (*check)(); + void (*restore)(); +}; + +struct lpi_device_constraint_amd { + char *name; + int enabled; + int function_states; + int min_dstate; +}; + +struct lpi_device_info { + char *name; + int enabled; + union acpi_object *package; +}; + +struct lpi_device_constraint { + int uid; + int min_dstate; + int function_states; +}; + +struct acpi_lpat { + int temp; + int raw; +}; + +struct acpi_lpat_conversion_table { + struct acpi_lpat *lpat; + int lpat_count; +}; + +struct fpdt_record_header { + u16 type; + u8 length; + u8 revision; +}; + +struct resume_performance_record { + struct fpdt_record_header header; + u32 resume_count; + u64 resume_prev; + u64 resume_avg; +}; + +struct suspend_performance_record { + struct fpdt_record_header header; + u64 suspend_start; + u64 suspend_end; +} __attribute__((packed)); + +struct boot_performance_record { + struct fpdt_record_header header; + u32 reserved; + u64 firmware_start; + u64 bootloader_load; + u64 bootloader_launch; + u64 exitbootservice_start; + u64 exitbootservice_end; +}; + +enum fpdt_subtable_type { + SUBTABLE_FBPT = 0, + SUBTABLE_S3PT = 1, +}; + +enum fpdt_record_type { + RECORD_S3_RESUME = 0, + RECORD_S3_SUSPEND = 1, + RECORD_BOOT = 2, +}; + +struct fpdt_subtable_header { + u32 signature; + u32 length; +}; + +struct fpdt_subtable_entry { + u16 type; + u8 length; + u8 revision; + u32 reserved; + u64 address; +}; + +struct lpit_residency_info { + struct acpi_generic_address gaddr; + u64 frequency; + void *iomem_addr; +}; + +struct acpi_lpit_header { + u32 type; + u32 length; + u16 unique_id; + u16 reserved; + u32 flags; +}; + +struct acpi_lpit_native { + struct acpi_lpit_header header; + struct acpi_generic_address entry_trigger; + u32 residency; + u32 latency; + struct acpi_generic_address residency_counter; + u64 counter_frequency; +}; + +struct acpi_table_lpit { + struct acpi_table_header header; +}; + +struct acpi_prmt_module_info { + u16 revision; + u16 length; + u8 module_guid[16]; + u16 major_rev; + u16 minor_rev; + u16 handler_info_count; + u32 handler_info_offset; + u64 mmio_list_pointer; +} __attribute__((packed)); + +struct acpi_prmt_handler_info { + u16 revision; + u16 length; + u8 handler_guid[16]; + u64 handler_address; + u64 static_data_buffer_address; + u64 acpi_param_buffer_address; +} __attribute__((packed)); + +struct prm_buffer { + u8 prm_status; + u64 efi_status; + u8 prm_cmd; + guid_t handler_guid; +} __attribute__((packed)); + +struct prm_handler_info { + guid_t guid; + efi_status_t (*handler_addr)(u64, void *); + u64 static_data_buffer_addr; + u64 acpi_param_buffer_addr; + struct list_head handler_list; +}; + +struct prm_mmio_info; + +struct prm_module_info { + guid_t guid; + u16 major_rev; + u16 minor_rev; + u16 handler_count; + struct prm_mmio_info *mmio_info; + bool updatable; + struct list_head module_list; + struct prm_handler_info handlers[0]; +}; + +struct prm_mmio_addr_range { + u64 phys_addr; + u64 virt_addr; + u32 length; +} __attribute__((packed)); + +struct prm_mmio_info { + u64 mmio_count; + struct prm_mmio_addr_range addr_ranges[0]; +}; + +struct prm_context_buffer { + char signature[4]; + u16 revision; + u16 reserved; + guid_t identifier; + u64 static_data_buffer; + struct prm_mmio_info *mmio_ranges; +}; + +typedef u64 acpi_integer; + +struct acpi_pcc_info { + u8 subspace_id; + u16 length; + u8 *internal_buffer; +}; + +struct mbox_client { + struct device *dev; + bool tx_block; + unsigned long tx_tout; + bool knows_txdone; + void (*rx_callback)(struct mbox_client *, void *); + void (*tx_prepare)(struct mbox_client *, void *); + void (*tx_done)(struct mbox_client *, void *, int); +}; + +struct pcc_mbox_chan; + +struct pcc_data { + struct pcc_mbox_chan *pcc_chan; + void *pcc_comm_addr; + struct completion done; + struct mbox_client cl; + struct acpi_pcc_info ctx; +}; + +struct mbox_chan; + +struct pcc_mbox_chan { + struct mbox_chan *mchan; + u64 shmem_base_addr; + u64 shmem_size; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +}; + +struct mbox_controller; + +struct mbox_chan { + struct mbox_controller *mbox; + unsigned int txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned int msg_count; + unsigned int msg_free; + void *msg_data[20]; + spinlock_t lock; + void *con_priv; +}; + +struct mbox_chan_ops; + +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned int txpoll_period; + struct mbox_chan *(*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); + struct hrtimer poll_hrt; + spinlock_t poll_hrt_lock; + struct list_head node; +}; + +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *, void *); + int (*flush)(struct mbox_chan *, unsigned long); + int (*startup)(struct mbox_chan *); + void (*shutdown)(struct mbox_chan *); + bool (*last_tx_done)(struct mbox_chan *); + bool (*peek_data)(struct mbox_chan *); +}; + +union acpi_predefined_info; + +struct acpi_evaluate_info { + struct acpi_namespace_node *prefix_node; + const char *relative_pathname; + union acpi_operand_object **parameters; + struct acpi_namespace_node *node; + union acpi_operand_object *obj_desc; + char *full_pathname; + const union acpi_predefined_info *predefined; + union acpi_operand_object *return_object; + union acpi_operand_object *parent_package; + u32 return_flags; + u32 return_btype; + u16 param_count; + u16 node_flags; + u8 pass_number; + u8 return_object_type; + u8 flags; +}; + +struct acpi_name_info { + char name[4]; + u16 argument_list; + u8 expected_btypes; +} __attribute__((packed)); + +struct acpi_package_info { + u8 type; + u8 object_type1; + u8 count1; + u8 object_type2; + u8 count2; + u16 reserved; +} __attribute__((packed)); + +struct acpi_package_info2 { + u8 type; + u8 count; + u8 object_type[4]; + u8 reserved; +}; + +struct acpi_package_info3 { + u8 type; + u8 count; + u8 object_type[2]; + u8 tail_object_type; + u16 reserved; +} __attribute__((packed)); + +struct acpi_package_info4 { + u8 type; + u8 object_type1; + u8 count1; + u8 sub_object_types; + u8 pkg_count; + u16 reserved; +} __attribute__((packed)); + +union acpi_predefined_info { + struct acpi_name_info info; + struct acpi_package_info ret_info; + struct acpi_package_info2 ret_info2; + struct acpi_package_info3 ret_info3; + struct acpi_package_info4 ret_info4; +}; + +enum { + ACPI_REFCLASS_LOCAL = 0, + ACPI_REFCLASS_ARG = 1, + ACPI_REFCLASS_REFOF = 2, + ACPI_REFCLASS_INDEX = 3, + ACPI_REFCLASS_TABLE = 4, + ACPI_REFCLASS_NAME = 5, + ACPI_REFCLASS_DEBUG = 6, + ACPI_REFCLASS_MAX = 6, +}; + +struct acpi_common_descriptor { + void *common_pointer; + u8 descriptor_type; +}; + +union acpi_descriptor { + struct acpi_common_descriptor common; + union acpi_operand_object object; + struct acpi_namespace_node node; + union acpi_parse_object op; +}; + +typedef enum { + ACPI_IMODE_LOAD_PASS1 = 1, + ACPI_IMODE_LOAD_PASS2 = 2, + ACPI_IMODE_EXECUTE = 3, +} acpi_interpreter_mode; + +struct acpi_create_field_info { + struct acpi_namespace_node *region_node; + struct acpi_namespace_node *field_node; + struct acpi_namespace_node *register_node; + struct acpi_namespace_node *data_register_node; + struct acpi_namespace_node *connection_node; + u8 *resource_buffer; + u32 bank_value; + u32 field_bit_position; + u32 field_bit_length; + u16 resource_length; + u16 pin_number_index; + u8 field_flags; + u8 attribute; + u8 field_type; + u8 access_length; +}; + +struct acpi_init_walk_info { + u32 table_index; + u32 object_count; + u32 method_count; + u32 serial_method_count; + u32 non_serial_method_count; + u32 serialized_method_count; + u32 device_count; + u32 op_region_count; + u32 field_count; + u32 buffer_count; + u32 package_count; + u32 op_region_init; + u32 field_init; + u32 buffer_init; + u32 package_init; + acpi_owner_id owner_id; +}; + +typedef u32 acpi_name; + +enum { + AML_FIELD_ACCESS_ANY = 0, + AML_FIELD_ACCESS_BYTE = 1, + AML_FIELD_ACCESS_WORD = 2, + AML_FIELD_ACCESS_DWORD = 3, + AML_FIELD_ACCESS_QWORD = 4, + AML_FIELD_ACCESS_BUFFER = 5, +}; + +typedef acpi_status (*acpi_execute_op)(struct acpi_walk_state *); + +typedef u32 acpi_mutex_handle; + +struct acpi_gpe_walk_info { + struct acpi_namespace_node *gpe_device; + struct acpi_gpe_block_info *gpe_block; + u16 count; + acpi_owner_id owner_id; + u8 execute_by_owner_id; +}; + +struct acpi_gpe_device_info { + u32 index; + u32 next_block_base_index; + acpi_status status; + struct acpi_namespace_node *gpe_device; +}; + +typedef u32 (*acpi_event_handler)(void *); + +struct acpi_reg_walk_info { + u32 function; + u32 reg_run_count; + acpi_adr_space_type space_id; +}; + +struct acpi_connection_info { + u8 *connection; + u16 length; + u8 access_length; +}; + +struct acpi_ffh_info { + u64 offset; + u64 length; +}; + +struct acpi_mem_mapping; + +struct acpi_mem_space_context { + u32 length; + acpi_physical_address address; + struct acpi_mem_mapping *cur_mm; + struct acpi_mem_mapping *first_mm; +}; + +struct acpi_mem_mapping { + acpi_physical_address physical_address; + u8 *logical_address; + acpi_size length; + struct acpi_mem_mapping *next_mm; +}; + +struct acpi_data_table_mapping { + void *pointer; +}; + +typedef u32 (*acpi_sci_handler)(void *); + +struct acpi_sci_handler_info { + struct acpi_sci_handler_info *next; + acpi_sci_handler address; + void *context; +}; + +enum { + AML_FIELD_UPDATE_PRESERVE = 0, + AML_FIELD_UPDATE_WRITE_AS_ONES = 32, + AML_FIELD_UPDATE_WRITE_AS_ZEROS = 64, +}; + +struct acpi_signal_fatal_info { + u32 type; + u32 code; + u32 argument; +}; + +enum { + MATCH_MTR = 0, + MATCH_MEQ = 1, + MATCH_MLE = 2, + MATCH_MLT = 3, + MATCH_MGE = 4, + MATCH_MGT = 5, +}; + +enum { + AML_FIELD_ATTRIB_QUICK = 2, + AML_FIELD_ATTRIB_SEND_RECEIVE = 4, + AML_FIELD_ATTRIB_BYTE = 6, + AML_FIELD_ATTRIB_WORD = 8, + AML_FIELD_ATTRIB_BLOCK = 10, + AML_FIELD_ATTRIB_BYTES = 11, + AML_FIELD_ATTRIB_PROCESS_CALL = 12, + AML_FIELD_ATTRIB_BLOCK_PROCESS_CALL = 13, + AML_FIELD_ATTRIB_RAW_BYTES = 14, + AML_FIELD_ATTRIB_RAW_PROCESS_BYTES = 15, +}; + +typedef enum { + ACPI_TRACE_AML_METHOD = 0, + ACPI_TRACE_AML_OPCODE = 1, + ACPI_TRACE_AML_REGION = 2, +} acpi_trace_event_type; + +struct acpi_gpe_block_status_context { + struct acpi_gpe_register_info *gpe_skip_register_info; + u8 gpe_skip_mask; + u8 retval; +}; + +struct acpi_bit_register_info { + u8 parent_register; + u8 bit_position; + u16 access_bit_mask; +}; + +struct acpi_port_info { + char *name; + u16 start; + u16 end; + u8 osi_dependency; +}; + +struct acpi_pci_device { + acpi_handle device; + struct acpi_pci_device *next; +}; + +struct acpi_device_walk_info { + struct acpi_table_desc *table_desc; + struct acpi_evaluate_info *evaluate_info; + u32 device_count; + u32 num_STA; + u32 num_INI; +}; + +typedef acpi_status (*acpi_pkg_callback)(u8, union acpi_operand_object *, + union acpi_generic_state *, void *); + +enum acpi_return_package_types { + ACPI_PTYPE1_FIXED = 1, + ACPI_PTYPE1_VAR = 2, + ACPI_PTYPE1_OPTION = 3, + ACPI_PTYPE2 = 4, + ACPI_PTYPE2_COUNT = 5, + ACPI_PTYPE2_PKG_COUNT = 6, + ACPI_PTYPE2_FIXED = 7, + ACPI_PTYPE2_MIN = 8, + ACPI_PTYPE2_REV_FIXED = 9, + ACPI_PTYPE2_FIX_VAR = 10, + ACPI_PTYPE2_VAR_VAR = 11, + ACPI_PTYPE2_UUID_PAIR = 12, + ACPI_PTYPE_CUSTOM = 13, +}; + +typedef acpi_status (*acpi_object_converter)(struct acpi_namespace_node *, + union acpi_operand_object *, + union acpi_operand_object **); + +struct acpi_simple_repair_info { + char name[4]; + u32 unexpected_btypes; + u32 package_index; + acpi_object_converter object_converter; +}; + +typedef acpi_status (*acpi_repair_function)(struct acpi_evaluate_info *, + union acpi_operand_object **); + +struct acpi_repair_info { + char name[4]; + acpi_repair_function repair_function; +}; + +struct acpi_namestring_info { + const char *external_name; + const char *next_external_char; + char *internal_name; + u32 length; + u32 num_segments; + u32 num_carats; + u8 fully_qualified; +}; + +struct acpi_rw_lock { + void *writer_mutex; + void *reader_mutex; + u32 num_readers; +}; + +struct acpi_get_devices_info { + acpi_walk_callback user_function; + void *context; + const char *hid; +}; + +struct acpi_rsconvert_info { + u8 opcode; + u8 resource_offset; + u8 aml_offset; + u8 value; +}; + +struct aml_resource_small_header { + u8 descriptor_type; +}; + +struct aml_resource_large_header { + u8 descriptor_type; + u16 resource_length; +} __attribute__((packed)); + +struct aml_resource_irq { + u8 descriptor_type; + u16 irq_mask; + u8 flags; +} __attribute__((packed)); + +struct aml_resource_dma { + u8 descriptor_type; + u8 dma_channel_mask; + u8 flags; +}; + +struct aml_resource_start_dependent { + u8 descriptor_type; + u8 flags; +}; + +struct aml_resource_end_dependent { + u8 descriptor_type; +}; + +struct aml_resource_io { + u8 descriptor_type; + u8 flags; + u16 minimum; + u16 maximum; + u8 alignment; + u8 address_length; +}; + +struct aml_resource_fixed_io { + u8 descriptor_type; + u16 address; + u8 address_length; +} __attribute__((packed)); + +struct aml_resource_fixed_dma { + u8 descriptor_type; + u16 request_lines; + u16 channels; + u8 width; +} __attribute__((packed)); + +struct aml_resource_vendor_small { + u8 descriptor_type; +}; + +struct aml_resource_end_tag { + u8 descriptor_type; + u8 checksum; +}; + +struct aml_resource_memory24 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +} __attribute__((packed)); + +struct aml_resource_generic_register { + u8 descriptor_type; + u16 resource_length; + u8 address_space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct aml_resource_vendor_large { + u8 descriptor_type; + u16 resource_length; +} __attribute__((packed)); + +struct aml_resource_memory32 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_fixed_memory32 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u32 address; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_address16 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +} __attribute__((packed)); + +struct aml_resource_address32 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_address64 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +} __attribute__((packed)); + +struct aml_resource_extended_address64 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u8 revision_ID; + u8 reserved; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; + u64 type_specific; +} __attribute__((packed)); + +struct aml_resource_extended_irq { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u8 interrupt_count; + union { + u32 interrupt; + struct { + struct { + } __Empty_interrupts; + u32 interrupts[0]; + }; + }; +} __attribute__((packed)); + +struct aml_resource_gpio { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 connection_type; + u16 flags; + u16 int_flags; + u8 pin_config; + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_i2c_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 connection_speed; + u16 slave_address; +} __attribute__((packed)); + +struct aml_resource_spi_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 connection_speed; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; +} __attribute__((packed)); + +struct aml_resource_uart_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 default_baud_rate; + u16 rx_fifo_size; + u16 tx_fifo_size; + u8 parity; + u8 lines_enabled; +} __attribute__((packed)); + +struct aml_resource_csi2_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; +} __attribute__((packed)); + +struct aml_resource_common_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; +} __attribute__((packed)); + +struct aml_resource_pin_function { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config; + u16 function_number; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_config { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u16 pin_table_offset; + u16 label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group_function { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u16 function_number; + u8 res_source_index; + u16 res_source_offset; + u16 res_source_label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group_config { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config_type; + u32 pin_config_value; + u8 res_source_index; + u16 res_source_offset; + u16 res_source_label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_clock_input { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u16 frequency_divisor; + u32 frequency_numerator; +} __attribute__((packed)); + +struct aml_resource_address { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; +} __attribute__((packed)); + +union aml_resource { + u8 descriptor_type; + struct aml_resource_small_header small_header; + struct aml_resource_large_header large_header; + struct aml_resource_irq irq; + struct aml_resource_dma dma; + struct aml_resource_start_dependent start_dpf; + struct aml_resource_end_dependent end_dpf; + struct aml_resource_io io; + struct aml_resource_fixed_io fixed_io; + struct aml_resource_fixed_dma fixed_dma; + struct aml_resource_vendor_small vendor_small; + struct aml_resource_end_tag end_tag; + struct aml_resource_memory24 memory24; + struct aml_resource_generic_register generic_reg; + struct aml_resource_vendor_large vendor_large; + struct aml_resource_memory32 memory32; + struct aml_resource_fixed_memory32 fixed_memory32; + struct aml_resource_address16 address16; + struct aml_resource_address32 address32; + struct aml_resource_address64 address64; + struct aml_resource_extended_address64 ext_address64; + struct aml_resource_extended_irq extended_irq; + struct aml_resource_gpio gpio; + struct aml_resource_i2c_serialbus i2c_serial_bus; + struct aml_resource_spi_serialbus spi_serial_bus; + struct aml_resource_uart_serialbus uart_serial_bus; + struct aml_resource_csi2_serialbus csi2_serial_bus; + struct aml_resource_common_serialbus common_serial_bus; + struct aml_resource_pin_function pin_function; + struct aml_resource_pin_config pin_config; + struct aml_resource_pin_group pin_group; + struct aml_resource_pin_group_function pin_group_function; + struct aml_resource_pin_group_config pin_group_config; + struct aml_resource_clock_input clock_input; + struct aml_resource_address address; + u32 dword_item; + u16 word_item; + u8 byte_item; +}; + +typedef u16 acpi_rs_length; + +typedef acpi_status (*acpi_walk_aml_callback)(u8 *, u32, u32, u8, void **); + +enum { + ACPI_RSC_INITGET = 0, + ACPI_RSC_INITSET = 1, + ACPI_RSC_FLAGINIT = 2, + ACPI_RSC_1BITFLAG = 3, + ACPI_RSC_2BITFLAG = 4, + ACPI_RSC_3BITFLAG = 5, + ACPI_RSC_6BITFLAG = 6, + ACPI_RSC_ADDRESS = 7, + ACPI_RSC_BITMASK = 8, + ACPI_RSC_BITMASK16 = 9, + ACPI_RSC_COUNT = 10, + ACPI_RSC_COUNT16 = 11, + ACPI_RSC_COUNT_GPIO_PIN = 12, + ACPI_RSC_COUNT_GPIO_RES = 13, + ACPI_RSC_COUNT_GPIO_VEN = 14, + ACPI_RSC_COUNT_SERIAL_RES = 15, + ACPI_RSC_COUNT_SERIAL_VEN = 16, + ACPI_RSC_DATA8 = 17, + ACPI_RSC_EXIT_EQ = 18, + ACPI_RSC_EXIT_LE = 19, + ACPI_RSC_EXIT_NE = 20, + ACPI_RSC_LENGTH = 21, + ACPI_RSC_MOVE_GPIO_PIN = 22, + ACPI_RSC_MOVE_GPIO_RES = 23, + ACPI_RSC_MOVE_SERIAL_RES = 24, + ACPI_RSC_MOVE_SERIAL_VEN = 25, + ACPI_RSC_MOVE8 = 26, + ACPI_RSC_MOVE16 = 27, + ACPI_RSC_MOVE32 = 28, + ACPI_RSC_MOVE64 = 29, + ACPI_RSC_SET8 = 30, + ACPI_RSC_SOURCE = 31, + ACPI_RSC_SOURCEX = 32, +}; + +typedef u32 acpi_rsdesc_size; + +struct acpi_vendor_uuid; + +struct acpi_vendor_walk_info { + struct acpi_vendor_uuid *uuid; + struct acpi_buffer *buffer; + acpi_status status; +}; + +struct acpi_vendor_uuid { + u8 subtype; + u8 data[16]; +}; + +struct acpi_fadt_info { + const char *name; + u16 address64; + u16 address32; + u16 length; + u8 default_length; + u8 flags; +}; + +struct acpi_fadt_pm_info { + struct acpi_generic_address *target; + u16 source; + u8 register_num; +}; + +struct acpi_table_rsdp { + char signature[8]; + u8 checksum; + char oem_id[6]; + u8 revision; + u32 rsdt_physical_address; + u32 length; + u64 xsdt_physical_address; + u8 extended_checksum; + u8 reserved[3]; +} __attribute__((packed)); + +struct acpi_address_range { + struct acpi_address_range *next; + struct acpi_namespace_node *region_node; + acpi_physical_address start_address; + acpi_physical_address end_address; +}; + +struct acpi_table_cdat { + u32 length; + u8 revision; + u8 checksum; + u8 reserved[6]; + u32 sequence; +}; + +struct acpi_pkg_info { + u8 *free_space; + acpi_size length; + u32 object_space; + u32 num_packages; +}; + +struct acpi_exception_info { + char *name; +}; + +struct acpi_comment_node { + char *comment; + struct acpi_comment_node *next; +}; + +struct acpi_fixed_event_info { + u8 status_register_id; + u8 enable_register_id; + u16 status_bit_mask; + u16 enable_bit_mask; +}; + +struct acpi_table_fadt { + struct acpi_table_header header; + u32 facs; + u32 dsdt; + u8 model; + u8 preferred_profile; + u16 sci_interrupt; + u32 smi_command; + u8 acpi_enable; + u8 acpi_disable; + u8 s4_bios_request; + u8 pstate_control; + u32 pm1a_event_block; + u32 pm1b_event_block; + u32 pm1a_control_block; + u32 pm1b_control_block; + u32 pm2_control_block; + u32 pm_timer_block; + u32 gpe0_block; + u32 gpe1_block; + u8 pm1_event_length; + u8 pm1_control_length; + u8 pm2_control_length; + u8 pm_timer_length; + u8 gpe0_block_length; + u8 gpe1_block_length; + u8 gpe1_base; + u8 cst_control; + u16 c2_latency; + u16 c3_latency; + u16 flush_size; + u16 flush_stride; + u8 duty_offset; + u8 duty_width; + u8 day_alarm; + u8 month_alarm; + u8 century; + u16 boot_flags; + u8 reserved; + u32 flags; + struct acpi_generic_address reset_register; + u8 reset_value; + u16 arm_boot_flags; + u8 minor_revision; + u64 Xfacs; + u64 Xdsdt; + struct acpi_generic_address xpm1a_event_block; + struct acpi_generic_address xpm1b_event_block; + struct acpi_generic_address xpm1a_control_block; + struct acpi_generic_address xpm1b_control_block; + struct acpi_generic_address xpm2_control_block; + struct acpi_generic_address xpm_timer_block; + struct acpi_generic_address xgpe0_block; + struct acpi_generic_address xgpe1_block; + struct acpi_generic_address sleep_control; + struct acpi_generic_address sleep_status; + u64 hypervisor_id; +} __attribute__((packed)); + +struct acpi_table_list { + struct acpi_table_desc *tables; + u32 current_table_count; + u32 max_table_count; + u8 flags; +}; + +struct acpi_mutex_info { + void *mutex; + u32 use_count; + u64 thread_id; +}; + +typedef acpi_status (*acpi_exception_handler)(acpi_status, acpi_name, u16, u32, void *); + +typedef acpi_status (*acpi_init_handler)(acpi_handle, u32); + +struct acpi_ged_handler_info { + struct acpi_ged_handler_info *next; + u32 int_id; + struct acpi_namespace_node *evt_method; +}; + +struct acpi_interface_info { + char *name; + struct acpi_interface_info *next; + u8 flags; + u8 value; +}; + +struct acpi_fixed_event_handler { + acpi_event_handler handler; + void *context; +}; + +enum power_supply_property { + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE = 1, + POWER_SUPPLY_PROP_HEALTH = 2, + POWER_SUPPLY_PROP_PRESENT = 3, + POWER_SUPPLY_PROP_ONLINE = 4, + POWER_SUPPLY_PROP_AUTHENTIC = 5, + POWER_SUPPLY_PROP_TECHNOLOGY = 6, + POWER_SUPPLY_PROP_CYCLE_COUNT = 7, + POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, + POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, + POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, + POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, + POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, + POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, + POWER_SUPPLY_PROP_CURRENT_MAX = 16, + POWER_SUPPLY_PROP_CURRENT_NOW = 17, + POWER_SUPPLY_PROP_CURRENT_AVG = 18, + POWER_SUPPLY_PROP_CURRENT_BOOT = 19, + POWER_SUPPLY_PROP_POWER_NOW = 20, + POWER_SUPPLY_PROP_POWER_AVG = 21, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, + POWER_SUPPLY_PROP_CHARGE_FULL = 24, + POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, + POWER_SUPPLY_PROP_CHARGE_NOW = 26, + POWER_SUPPLY_PROP_CHARGE_AVG = 27, + POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, + POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, + POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR = 37, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 38, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 39, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 40, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 41, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 42, + POWER_SUPPLY_PROP_ENERGY_FULL = 43, + POWER_SUPPLY_PROP_ENERGY_EMPTY = 44, + POWER_SUPPLY_PROP_ENERGY_NOW = 45, + POWER_SUPPLY_PROP_ENERGY_AVG = 46, + POWER_SUPPLY_PROP_CAPACITY = 47, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 48, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 49, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 50, + POWER_SUPPLY_PROP_CAPACITY_LEVEL = 51, + POWER_SUPPLY_PROP_TEMP = 52, + POWER_SUPPLY_PROP_TEMP_MAX = 53, + POWER_SUPPLY_PROP_TEMP_MIN = 54, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 55, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 56, + POWER_SUPPLY_PROP_TEMP_AMBIENT = 57, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 58, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 59, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 60, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 61, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 62, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 63, + POWER_SUPPLY_PROP_TYPE = 64, + POWER_SUPPLY_PROP_USB_TYPE = 65, + POWER_SUPPLY_PROP_SCOPE = 66, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 67, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 68, + POWER_SUPPLY_PROP_CALIBRATE = 69, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 70, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 71, + POWER_SUPPLY_PROP_MANUFACTURE_DAY = 72, + POWER_SUPPLY_PROP_MODEL_NAME = 73, + POWER_SUPPLY_PROP_MANUFACTURER = 74, + POWER_SUPPLY_PROP_SERIAL_NUMBER = 75, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY = 1, + POWER_SUPPLY_TYPE_UPS = 2, + POWER_SUPPLY_TYPE_MAINS = 3, + POWER_SUPPLY_TYPE_USB = 4, + POWER_SUPPLY_TYPE_USB_DCP = 5, + POWER_SUPPLY_TYPE_USB_CDP = 6, + POWER_SUPPLY_TYPE_USB_ACA = 7, + POWER_SUPPLY_TYPE_USB_TYPE_C = 8, + POWER_SUPPLY_TYPE_USB_PD = 9, + POWER_SUPPLY_TYPE_USB_PD_DRP = 10, + POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, + POWER_SUPPLY_TYPE_WIRELESS = 12, +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP = 1, + POWER_SUPPLY_USB_TYPE_DCP = 2, + POWER_SUPPLY_USB_TYPE_CDP = 3, + POWER_SUPPLY_USB_TYPE_ACA = 4, + POWER_SUPPLY_USB_TYPE_C = 5, + POWER_SUPPLY_USB_TYPE_PD = 6, + POWER_SUPPLY_USB_TYPE_PD_DRP = 7, + POWER_SUPPLY_USB_TYPE_PD_PPS = 8, + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, +}; + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct power_supply; + +union power_supply_propval; + +struct power_supply_desc { + const char *name; + enum power_supply_type type; + const enum power_supply_usb_type *usb_types; + size_t num_usb_types; + const enum power_supply_property *properties; + size_t num_properties; + int (*get_property)(struct power_supply *, enum power_supply_property, + union power_supply_propval *); + int (*set_property)(struct power_supply *, enum power_supply_property, + const union power_supply_propval *); + int (*property_is_writeable)(struct power_supply *, enum power_supply_property); + void (*external_power_changed)(struct power_supply *); + void (*set_charged)(struct power_supply *); + bool no_thermal; + int use_for_apm; +}; + +struct acpi_ac { + struct power_supply *charger; + struct power_supply_desc charger_desc; + struct acpi_device *device; + unsigned long long state; + struct notifier_block battery_nb; +}; + +struct power_supply_battery_info; + +struct thermal_zone_device; + +struct led_trigger; + +struct power_supply { + const struct power_supply_desc *desc; + char **supplied_to; + size_t num_supplicants; + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + void *drv_data; + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; + struct power_supply_battery_info *battery_info; + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; + struct led_trigger *charging_full_trig; + char *charging_full_trig_name; + struct led_trigger *charging_trig; + char *charging_trig_name; + struct led_trigger *full_trig; + char *full_trig_name; + struct led_trigger *online_trig; + char *online_trig_name; + struct led_trigger *charging_blink_full_solid_trig; + char *charging_blink_full_solid_trig_name; +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply_maintenance_charge_table; + +struct power_supply_battery_ocv_table; + +struct power_supply_resistance_temp_table; + +struct power_supply_vbat_ri_table; + +struct power_supply_battery_info { + unsigned int technology; + int energy_full_design_uwh; + int charge_full_design_uah; + int voltage_min_design_uv; + int voltage_max_design_uv; + int tricklecharge_current_ua; + int precharge_current_ua; + int precharge_voltage_max_uv; + int charge_term_current_ua; + int charge_restart_voltage_uv; + int overvoltage_limit_uv; + int constant_charge_current_max_ua; + int constant_charge_voltage_max_uv; + struct power_supply_maintenance_charge_table *maintenance_charge; + int maintenance_charge_size; + int alert_low_temp_charge_current_ua; + int alert_low_temp_charge_voltage_uv; + int alert_high_temp_charge_current_ua; + int alert_high_temp_charge_voltage_uv; + int factory_internal_resistance_uohm; + int factory_internal_resistance_charging_uohm; + int ocv_temp[20]; + int temp_ambient_alert_min; + int temp_ambient_alert_max; + int temp_alert_min; + int temp_alert_max; + int temp_min; + int temp_max; + struct power_supply_battery_ocv_table *ocv_table[20]; + int ocv_table_size[20]; + struct power_supply_resistance_temp_table *resist_table; + int resist_table_size; + struct power_supply_vbat_ri_table *vbat2ri_discharging; + int vbat2ri_discharging_size; + struct power_supply_vbat_ri_table *vbat2ri_charging; + int vbat2ri_charging_size; + int bti_resistance_ohm; + int bti_resistance_tolerance; +}; + +struct power_supply_maintenance_charge_table { + int charge_current_max_ua; + int charge_voltage_max_uv; + int charge_safety_timer_minutes; +}; + +struct power_supply_battery_ocv_table { + int ocv; + int capacity; +}; + +struct power_supply_resistance_temp_table { + int temp; + int resistance; +}; + +struct power_supply_vbat_ri_table { + int vbat_uv; + int ri_uohm; +}; + +struct led_classdev; + +struct led_hw_trigger_type; + +struct led_trigger { + const char *name; + int (*activate)(struct led_classdev *); + void (*deactivate)(struct led_classdev *); + struct led_hw_trigger_type *trigger_type; + spinlock_t leddev_list_lock; + struct list_head led_cdevs; + struct list_head next_trig; + const struct attribute_group **groups; +}; + +struct led_pattern; + +struct led_classdev { + const char *name; + unsigned int brightness; + unsigned int max_brightness; + unsigned int color; + int flags; + unsigned long work_flags; + void (*brightness_set)(struct led_classdev *, enum led_brightness); + int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness); + enum led_brightness (*brightness_get)(struct led_classdev *); + int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *); + int (*pattern_set)(struct led_classdev *, struct led_pattern *, u32, int); + int (*pattern_clear)(struct led_classdev *); + struct device *dev; + const struct attribute_group **groups; + struct list_head node; + const char *default_trigger; + unsigned long blink_delay_on; + unsigned long blink_delay_off; + struct timer_list blink_timer; + int blink_brightness; + int new_blink_brightness; + void (*flash_resume)(struct led_classdev *); + struct work_struct set_brightness_work; + int delayed_set_value; + unsigned long delayed_delay_on; + unsigned long delayed_delay_off; + struct rw_semaphore trigger_lock; + struct led_trigger *trigger; + struct list_head trig_list; + void *trigger_data; + bool activated; + struct led_hw_trigger_type *trigger_type; + const char *hw_control_trigger; + int (*hw_control_is_supported)(struct led_classdev *, unsigned long); + int (*hw_control_set)(struct led_classdev *, unsigned long); + int (*hw_control_get)(struct led_classdev *, unsigned long *); + struct device *(*hw_control_get_device)(struct led_classdev *); + struct mutex led_access; +}; + +struct led_pattern { + u32 delta_t; + int brightness; +}; + +struct led_hw_trigger_type { + int dummy; +}; + +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + void *drv_data; + const struct attribute_group **attr_grp; + char **supplied_to; + size_t num_supplicants; +}; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED = 1, +}; + +enum thermal_notify_event { + THERMAL_EVENT_UNSPECIFIED = 0, + THERMAL_EVENT_TEMP_SAMPLE = 1, + THERMAL_TRIP_VIOLATED = 2, + THERMAL_TRIP_CHANGED = 3, + THERMAL_DEVICE_DOWN = 4, + THERMAL_DEVICE_UP = 5, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, + THERMAL_TABLE_CHANGED = 7, + THERMAL_EVENT_KEEP_ALIVE = 8, +}; + +struct thermal_attr; + +struct thermal_trip; + +struct thermal_zone_device_ops; + +struct thermal_zone_params; + +struct thermal_governor; + +struct thermal_zone_device { + int id; + char type[20]; + struct device device; + struct attribute_group trips_attribute_group; + struct thermal_attr *trip_temp_attrs; + struct thermal_attr *trip_type_attrs; + struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; + void *devdata; + struct thermal_trip *trips; + int num_trips; + unsigned long passive_delay_jiffies; + unsigned long polling_delay_jiffies; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + atomic_t need_update; + struct thermal_zone_device_ops *ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct list_head thermal_instances; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; + bool suspended; +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE = 1, + THERMAL_TRIP_HOT = 2, + THERMAL_TRIP_CRITICAL = 3, +}; + +struct thermal_trip { + int temperature; + int hysteresis; + enum thermal_trip_type type; + void *priv; +}; + +enum thermal_trend { + THERMAL_TREND_STABLE = 0, + THERMAL_TREND_RAISING = 1, + THERMAL_TREND_DROPPING = 2, +}; + +struct thermal_zone_device_ops { + int (*bind)(struct thermal_zone_device *, struct thermal_cooling_device *); + int (*unbind)(struct thermal_zone_device *, struct thermal_cooling_device *); + int (*get_temp)(struct thermal_zone_device *, int *); + int (*set_trips)(struct thermal_zone_device *, int, int); + int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); + int (*set_trip_temp)(struct thermal_zone_device *, int, int); + int (*set_trip_hyst)(struct thermal_zone_device *, int, int); + int (*get_crit_temp)(struct thermal_zone_device *, int *); + int (*set_emul_temp)(struct thermal_zone_device *, int); + int (*get_trend)(struct thermal_zone_device *, const struct thermal_trip *, + enum thermal_trend *); + void (*hot)(struct thermal_zone_device *); + void (*critical)(struct thermal_zone_device *); +}; + +struct thermal_zone_params { + char governor_name[20]; + bool no_hwmon; + u32 sustainable_power; + s32 k_po; + s32 k_pu; + s32 k_i; + s32 k_d; + s32 integral_cutoff; + int slope; + int offset; +}; + +struct thermal_governor { + char name[20]; + int (*bind_to_tz)(struct thermal_zone_device *); + void (*unbind_from_tz)(struct thermal_zone_device *); + int (*throttle)(struct thermal_zone_device *, const struct thermal_trip *); + struct list_head governor_list; +}; + +struct acpi_fan_fif { + u8 revision; + u8 fine_grain_ctrl; + u8 step_size; + u8 low_speed_notification; +}; + +struct acpi_fan_fps; + +struct acpi_fan { + bool acpi4; + struct acpi_fan_fif fif; + struct acpi_fan_fps *fps; + int fps_count; + struct thermal_cooling_device *cdev; + struct device_attribute fst_speed; + struct device_attribute fine_grain_control; +}; + +struct acpi_fan_fps { + u64 control; + u64 trip_point; + u64 speed; + u64 noise_level; + u64 power; + char name[20]; + struct device_attribute dev_attr; +}; + +struct acpi_fan_fst { + u64 revision; + u64 control; + u64 speed; +}; + +struct acpi_tad_rt { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 valid; + u16 msec; + s16 tz; + u8 daylight; + u8 padding[3]; +}; + +struct acpi_tad_driver_data { + u32 capabilities; +}; + +struct acpi_pci_slot { + struct pci_slot *pci_slot; + struct list_head list; +}; + +struct acpi_lpi_states_array { + unsigned int size; + unsigned int composite_states_size; + struct acpi_lpi_state *entries; + struct acpi_lpi_state *composite_states[8]; +}; + +struct throttling_tstate { + unsigned int cpu; + int target_state; +}; + +struct acpi_processor_throttling_arg { + struct acpi_processor *pr; + int target_state; + bool force; +}; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *); +}; + +struct acpi_thermal_trip { + unsigned long temp_dk; + struct acpi_handle_list devices; +}; + +struct acpi_thermal_passive { + struct acpi_thermal_trip trip; + unsigned long tc1; + unsigned long tc2; + unsigned long tsp; +}; + +struct acpi_thermal_active { + struct acpi_thermal_trip trip; +}; + +struct acpi_thermal_trips { + struct acpi_thermal_passive passive; + struct acpi_thermal_active active[10]; +}; + +struct acpi_thermal { + struct acpi_device *device; + acpi_bus_id name; + unsigned long temp_dk; + unsigned long last_temp_dk; + unsigned long polling_frequency; + volatile u8 zombie; + struct acpi_thermal_trips trips; + struct thermal_trip *trip_table; + struct thermal_zone_device *thermal_zone; + int kelvin_offset; + struct work_struct thermal_check_work; + struct mutex thermal_check_lock; + refcount_t thermal_check_count; +}; + +struct adjust_trip_data { + struct acpi_thermal *tz; + u32 event; +}; + +struct acpi_thermal_bind_data { + struct thermal_zone_device *thermal; + struct thermal_cooling_device *cdev; + bool bind; +}; + +struct thermal_attr { + struct device_attribute attr; + char name[20]; +}; + +struct nvdimm; + +struct nd_mapping_desc { + struct nvdimm *nvdimm; + u64 start; + u64 size; + int position; +}; + +enum nfit_uuids { + NFIT_DEV_DIMM = 0, + NFIT_DEV_DIMM_N_HPE1 = 1, + NFIT_DEV_DIMM_N_HPE2 = 2, + NFIT_DEV_DIMM_N_MSFT = 3, + NFIT_DEV_DIMM_N_HYPERV = 4, + NFIT_BUS_INTEL = 6, + NFIT_SPA_VOLATILE = 7, + NFIT_SPA_PM = 8, + NFIT_SPA_DCR = 9, + NFIT_SPA_BDW = 10, + NFIT_SPA_VDISK = 11, + NFIT_SPA_VCD = 12, + NFIT_SPA_PDISK = 13, + NFIT_SPA_PCD = 14, + NFIT_DEV_BUS = 15, + NFIT_UUID_MAX = 16, +}; + +enum nvdimm_fwa_state { + NVDIMM_FWA_INVALID = 0, + NVDIMM_FWA_IDLE = 1, + NVDIMM_FWA_ARMED = 2, + NVDIMM_FWA_BUSY = 3, + NVDIMM_FWA_ARM_OVERFLOW = 4, +}; + +enum nvdimm_fwa_capability { + NVDIMM_FWA_CAP_INVALID = 0, + NVDIMM_FWA_CAP_NONE = 1, + NVDIMM_FWA_CAP_QUIESCE = 2, + NVDIMM_FWA_CAP_LIVE = 3, +}; + +enum nvdimm_fwa_result { + NVDIMM_FWA_RESULT_INVALID = 0, + NVDIMM_FWA_RESULT_NONE = 1, + NVDIMM_FWA_RESULT_SUCCESS = 2, + NVDIMM_FWA_RESULT_NOTSTAGED = 3, + NVDIMM_FWA_RESULT_NEEDRESET = 4, + NVDIMM_FWA_RESULT_FAIL = 5, +}; + +enum { + ND_CMD_IMPLEMENTED = 0, + ND_CMD_ARS_CAP = 1, + ND_CMD_ARS_START = 2, + ND_CMD_ARS_STATUS = 3, + ND_CMD_CLEAR_ERROR = 4, + ND_CMD_SMART = 1, + ND_CMD_SMART_THRESHOLD = 2, + ND_CMD_DIMM_FLAGS = 3, + ND_CMD_GET_CONFIG_SIZE = 4, + ND_CMD_GET_CONFIG_DATA = 5, + ND_CMD_SET_CONFIG_DATA = 6, + ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7, + ND_CMD_VENDOR_EFFECT_LOG = 8, + ND_CMD_VENDOR = 9, + ND_CMD_CALL = 10, +}; + +enum nfit_mem_flags { + NFIT_MEM_LSR = 0, + NFIT_MEM_LSW = 1, + NFIT_MEM_DIRTY = 2, + NFIT_MEM_DIRTY_COUNT = 3, +}; + +enum nfit_dimm_notifiers { + NFIT_NOTIFY_DIMM_HEALTH = 129, +}; + +enum nfit_ars_state { + ARS_REQ_SHORT = 0, + ARS_REQ_LONG = 1, + ARS_FAILED = 2, +}; + +enum scrub_flags { + ARS_BUSY = 0, + ARS_CANCEL = 1, + ARS_VALID = 2, + ARS_POLL = 3, +}; + +enum nfit_root_notifiers { + NFIT_NOTIFY_UPDATE = 128, + NFIT_NOTIFY_UC_MEMORY_ERROR = 129, +}; + +enum nvdimm_family_cmds { + NVDIMM_INTEL_LATCH_SHUTDOWN = 10, + NVDIMM_INTEL_GET_MODES = 11, + NVDIMM_INTEL_GET_FWINFO = 12, + NVDIMM_INTEL_START_FWUPDATE = 13, + NVDIMM_INTEL_SEND_FWUPDATE = 14, + NVDIMM_INTEL_FINISH_FWUPDATE = 15, + NVDIMM_INTEL_QUERY_FWUPDATE = 16, + NVDIMM_INTEL_SET_THRESHOLD = 17, + NVDIMM_INTEL_INJECT_ERROR = 18, + NVDIMM_INTEL_GET_SECURITY_STATE = 19, + NVDIMM_INTEL_SET_PASSPHRASE = 20, + NVDIMM_INTEL_DISABLE_PASSPHRASE = 21, + NVDIMM_INTEL_UNLOCK_UNIT = 22, + NVDIMM_INTEL_FREEZE_LOCK = 23, + NVDIMM_INTEL_SECURE_ERASE = 24, + NVDIMM_INTEL_OVERWRITE = 25, + NVDIMM_INTEL_QUERY_OVERWRITE = 26, + NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27, + NVDIMM_INTEL_MASTER_SECURE_ERASE = 28, + NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29, + NVDIMM_INTEL_FW_ACTIVATE_ARM = 30, +}; + +enum { + NFIT_BLK_READ_FLUSH = 1, + NFIT_BLK_DCR_LATCH = 2, + NFIT_ARS_STATUS_DONE = 0, + NFIT_ARS_STATUS_BUSY = 65536, + NFIT_ARS_STATUS_NONE = 131072, + NFIT_ARS_STATUS_INTR = 196608, + NFIT_ARS_START_BUSY = 6, + NFIT_ARS_CAP_NONE = 1, + NFIT_ARS_F_OVERFLOW = 1, + NFIT_ARS_TIMEOUT = 90, +}; + +enum { + ND_ARS_VOLATILE = 1, + ND_ARS_PERSISTENT = 2, + ND_ARS_RETURN_PREV_DATA = 2, + ND_CONFIG_LOCKED = 1, +}; + +enum nfit_aux_cmds { + NFIT_CMD_TRANSLATE_SPA = 5, + NFIT_CMD_ARS_INJECT_SET = 7, + NFIT_CMD_ARS_INJECT_CLEAR = 8, + NFIT_CMD_ARS_INJECT_GET = 9, +}; + +enum nvdimm_bus_family_cmds { + NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1, + NVDIMM_BUS_INTEL_FW_ACTIVATE = 2, +}; + +enum acpi_nfit_type { + ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0, + ACPI_NFIT_TYPE_MEMORY_MAP = 1, + ACPI_NFIT_TYPE_INTERLEAVE = 2, + ACPI_NFIT_TYPE_SMBIOS = 3, + ACPI_NFIT_TYPE_CONTROL_REGION = 4, + ACPI_NFIT_TYPE_DATA_REGION = 5, + ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, + ACPI_NFIT_TYPE_CAPABILITIES = 7, + ACPI_NFIT_TYPE_RESERVED = 8, +}; + +enum { + NDD_UNARMED = 1, + NDD_LOCKED = 2, + NDD_SECURITY_OVERWRITE = 3, + NDD_WORK_PENDING = 4, + NDD_LABELING = 6, + NDD_INCOHERENT = 7, + NDD_REGISTER_SYNC = 8, + ND_IOCTL_MAX_BUFLEN = 4194304, + ND_CMD_MAX_ELEM = 5, + ND_CMD_MAX_ENVELOPE = 256, + ND_MAX_MAPPINGS = 32, + ND_REGION_PAGEMAP = 0, + ND_REGION_PERSIST_CACHE = 1, + ND_REGION_PERSIST_MEMCTRL = 2, + ND_REGION_ASYNC = 3, + ND_REGION_CXL = 4, + DPA_RESOURCE_ADJUSTED = 1, +}; + +enum nvdimm_passphrase_type { + NVDIMM_USER = 0, + NVDIMM_MASTER = 1, +}; + +enum nvdimm_fwa_trigger { + NVDIMM_FWA_ARM = 0, + NVDIMM_FWA_DISARM = 1, +}; + +enum nvdimm_event { + NVDIMM_REVALIDATE_POISON = 0, + NVDIMM_REVALIDATE_REGION = 1, +}; + +enum scrub_mode { + HW_ERROR_SCRUB_OFF = 0, + HW_ERROR_SCRUB_ON = 1, +}; + +struct nvdimm_bus_descriptor; + +typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, + void *, unsigned int, int *); + +struct nvdimm_bus_fw_ops; + +struct nvdimm_bus_descriptor { + const struct attribute_group **attr_groups; + unsigned long cmd_mask; + unsigned long dimm_family_mask; + unsigned long bus_family_mask; + struct module *module; + char *provider_name; + struct device_node *of_node; + ndctl_fn ndctl; + int (*flush_probe)(struct nvdimm_bus_descriptor *); + int (*clear_to_send)(struct nvdimm_bus_descriptor *, struct nvdimm *, + unsigned int, void *); + const struct nvdimm_bus_fw_ops *fw_ops; +}; + +struct nvdimm_bus; + +struct nd_cmd_ars_status; + +struct nfit_spa; + +struct acpi_nfit_desc { + struct nvdimm_bus_descriptor nd_desc; + struct acpi_table_header acpi_header; + struct mutex init_mutex; + struct list_head memdevs; + struct list_head flushes; + struct list_head dimms; + struct list_head spas; + struct list_head dcrs; + struct list_head bdws; + struct list_head idts; + struct nvdimm_bus *nvdimm_bus; + struct device *dev; + struct nd_cmd_ars_status *ars_status; + struct nfit_spa *scrub_spa; + struct delayed_work dwork; + struct list_head list; + struct kernfs_node *scrub_count_state; + unsigned int max_ars; + unsigned int scrub_count; + unsigned int scrub_mode; + unsigned long scrub_flags; + unsigned long dimm_cmd_force_en; + unsigned long bus_cmd_force_en; + unsigned long bus_dsm_mask; + unsigned long family_dsm_mask[2]; + unsigned int platform_cap; + unsigned int scrub_tmo; + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_capability fwa_cap; + int fwa_count; + bool fwa_noidle; + bool fwa_nosuspend; +}; + +struct nvdimm_bus_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm_bus_descriptor *); + enum nvdimm_fwa_capability (*capability)(struct nvdimm_bus_descriptor *); + int (*activate)(struct nvdimm_bus_descriptor *); +}; + +struct nd_ars_record { + __u32 handle; + __u32 reserved; + __u64 err_address; + __u64 length; +}; + +struct nd_cmd_ars_status { + __u32 status; + __u32 out_length; + __u64 address; + __u64 length; + __u64 restart_address; + __u64 restart_length; + __u16 type; + __u16 flags; + __u32 num_records; + struct nd_ars_record records[0]; +}; + +struct acpi_nfit_header { + u16 type; + u16 length; +}; + +struct acpi_nfit_system_address { + struct acpi_nfit_header header; + u16 range_index; + u16 flags; + u32 reserved; + u32 proximity_domain; + u8 range_guid[16]; + u64 address; + u64 length; + u64 memory_mapping; + u64 location_cookie; +}; + +struct nd_region; + +struct nfit_spa { + struct list_head list; + struct nd_region *nd_region; + unsigned long ars_state; + u32 clear_err_unit; + u32 max_ars; + struct acpi_nfit_system_address spa[0]; +}; + +struct acpi_nfit_memory_map; + +struct acpi_nfit_control_region; + +struct acpi_nfit_interleave; + +struct nfit_flush; + +struct nfit_mem { + struct nvdimm *nvdimm; + struct acpi_nfit_memory_map *memdev_dcr; + struct acpi_nfit_memory_map *memdev_pmem; + struct acpi_nfit_control_region *dcr; + struct acpi_nfit_system_address *spa_dcr; + struct acpi_nfit_interleave *idt_dcr; + struct kernfs_node *flags_attr; + struct nfit_flush *nfit_flush; + struct list_head list; + struct acpi_device *adev; + struct acpi_nfit_desc *acpi_desc; + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_result fwa_result; + int fwa_count; + char id[23]; + struct resource *flush_wpq; + unsigned long dsm_mask; + unsigned long flags; + u32 dirty_shutdown; + int family; +}; + +struct acpi_nfit_memory_map { + struct acpi_nfit_header header; + u32 device_handle; + u16 physical_id; + u16 region_id; + u16 range_index; + u16 region_index; + u64 region_size; + u64 region_offset; + u64 address; + u16 interleave_index; + u16 interleave_ways; + u16 flags; + u16 reserved; +}; + +struct acpi_nfit_control_region { + struct acpi_nfit_header header; + u16 region_index; + u16 vendor_id; + u16 device_id; + u16 revision_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 subsystem_revision_id; + u8 valid_fields; + u8 manufacturing_location; + u16 manufacturing_date; + u8 reserved[2]; + u32 serial_number; + u16 code; + u16 windows; + u64 window_size; + u64 command_offset; + u64 command_size; + u64 status_offset; + u64 status_size; + u16 flags; + u8 reserved1[6]; +}; + +struct acpi_nfit_interleave { + struct acpi_nfit_header header; + u16 interleave_index; + u16 reserved; + u32 line_count; + u32 line_size; + u32 line_offset[0]; +}; + +struct acpi_nfit_flush_address { + struct acpi_nfit_header header; + u32 device_handle; + u16 hint_count; + u8 reserved[6]; + u64 hint_address[0]; +}; + +struct nfit_flush { + struct list_head list; + struct acpi_nfit_flush_address flush[0]; +}; + +struct nfit_memdev { + struct list_head list; + struct acpi_nfit_memory_map memdev[0]; +}; + +struct nfit_dcr { + struct list_head list; + struct acpi_nfit_control_region dcr[0]; +}; + +struct acpi_nfit_data_region { + struct acpi_nfit_header header; + u16 region_index; + u16 windows; + u64 offset; + u64 size; + u64 capacity; + u64 start_address; +}; + +struct nfit_bdw { + struct list_head list; + struct acpi_nfit_data_region bdw[0]; +}; + +struct nfit_idt { + struct list_head list; + struct acpi_nfit_interleave idt[0]; +}; + +struct nd_cmd_pkg { + __u64 nd_family; + __u64 nd_command; + __u32 nd_size_in; + __u32 nd_size_out; + __u32 nd_reserved2[9]; + __u32 nd_fw_size; + unsigned char nd_payload[0]; +}; + +struct nd_cmd_desc { + int in_num; + int out_num; + u32 in_sizes[5]; + int out_sizes[5]; +}; + +struct nd_cmd_clear_error { + __u64 address; + __u64 length; + __u32 status; + __u8 reserved[4]; + __u64 cleared; +}; + +struct nfit_table_prev { + struct list_head spas; + struct list_head memdevs; + struct list_head dcrs; + struct list_head bdws; + struct list_head idts; + struct list_head flushes; +}; + +struct acpi_nfit_capabilities { + struct acpi_nfit_header header; + u8 highest_capability; + u8 reserved[3]; + u32 capabilities; + u32 reserved2; +}; + +struct nvdimm_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm *); + enum nvdimm_fwa_result (*activate_result)(struct nvdimm *); + int (*arm)(struct nvdimm *, enum nvdimm_fwa_trigger); +}; + +struct nvdimm_key_data; + +struct nvdimm_security_ops { + unsigned long (*get_flags)(struct nvdimm *, enum nvdimm_passphrase_type); + int (*freeze)(struct nvdimm *); + int (*change_key)(struct nvdimm *, const struct nvdimm_key_data *, + const struct nvdimm_key_data *, enum nvdimm_passphrase_type); + int (*unlock)(struct nvdimm *, const struct nvdimm_key_data *); + int (*disable)(struct nvdimm *, const struct nvdimm_key_data *); + int (*erase)(struct nvdimm *, const struct nvdimm_key_data *, + enum nvdimm_passphrase_type); + int (*overwrite)(struct nvdimm *, const struct nvdimm_key_data *); + int (*query_overwrite)(struct nvdimm *); + int (*disable_master)(struct nvdimm *, const struct nvdimm_key_data *); +}; + +struct nvdimm_key_data { + u8 data[32]; +}; + +struct nd_cmd_ars_cap { + __u64 address; + __u64 length; + __u32 status; + __u32 max_ars_out; + __u32 clear_err_unit; + __u16 flags; + __u16 reserved; +}; + +struct nd_cmd_ars_start { + __u64 address; + __u64 length; + __u16 type; + __u8 flags; + __u8 reserved[5]; + __u32 status; + __u32 scrub_time; +}; + +struct nd_interleave_set; + +struct nd_region_desc { + struct resource *res; + struct nd_mapping_desc *mapping; + u16 num_mappings; + const struct attribute_group **attr_groups; + struct nd_interleave_set *nd_set; + void *provider_data; + int num_lanes; + int numa_node; + int target_node; + unsigned long flags; + int memregion; + struct device_node *of_node; + int (*flush)(struct nd_region *, struct bio *); +}; + +struct nd_interleave_set { + u64 cookie1; + u64 cookie2; + u64 altcookie; + guid_t type_guid; +}; + +struct nfit_set_info2 { + u64 region_offset; + u32 serial_number; + u16 vendor_id; + u16 manufacturing_date; + u8 manufacturing_location; + u8 reserved[31]; +}; + +struct nfit_set_info { + u64 region_offset; + u32 serial_number; + u32 pad; +}; + +struct nd_cmd_get_config_data_hdr { + __u32 in_offset; + __u32 in_length; + __u32 status; + __u8 out_buf[0]; +}; + +struct nd_cmd_set_config_hdr { + __u32 in_offset; + __u32 in_length; + __u8 in_buf[0]; +}; + +struct nd_intel_smart { + u32 status; + union { + struct { + u32 flags; + u8 reserved0[4]; + u8 health; + u8 spares; + u8 life_used; + u8 alarm_flags; + u16 media_temperature; + u16 ctrl_temperature; + u32 shutdown_count; + u8 ait_status; + u16 pmic_temperature; + u8 reserved1[8]; + u8 shutdown_state; + u32 vendor_size; + u8 vendor_data[92]; + } __attribute__((packed)); + u8 data[128]; + }; +}; + +struct nvdimm { + unsigned long flags; + void *provider_data; + unsigned long cmd_mask; + struct device dev; + atomic_t busy; + int id; + int num_flush; + struct resource *flush_wpq; + const char *dimm_id; + struct { + const struct nvdimm_security_ops *ops; + unsigned long flags; + unsigned long ext_flags; + unsigned int overwrite_tmo; + struct kernfs_node *overwrite_state; + } sec; + struct delayed_work dwork; + const struct nvdimm_fw_ops *fw_ops; +}; + +struct badrange { + struct list_head list; + spinlock_t lock; +}; + +struct nvdimm_bus { + struct nvdimm_bus_descriptor *nd_desc; + wait_queue_head_t wait; + struct list_head list; + struct device dev; + int id; + int probe_active; + atomic_t ioctl_active; + struct list_head mapping_list; + struct mutex reconfig_mutex; + struct badrange badrange; +}; + +struct nvdimm_drvdata; + +struct nd_mapping { + struct nvdimm *nvdimm; + u64 start; + u64 size; + int position; + struct list_head labels; + struct mutex lock; + struct nvdimm_drvdata *ndd; +}; + +struct nd_percpu_lane; + +struct nd_region { + struct device dev; + struct ida ns_ida; + struct ida btt_ida; + struct ida pfn_ida; + struct ida dax_ida; + unsigned long flags; + struct device *ns_seed; + struct device *btt_seed; + struct device *pfn_seed; + struct device *dax_seed; + unsigned long align; + u16 ndr_mappings; + u64 ndr_size; + u64 ndr_start; + int id; + int num_lanes; + int ro; + int numa_node; + int target_node; + void *provider_data; + struct kernfs_node *bb_state; + struct badblocks bb; + struct nd_interleave_set *nd_set; + struct nd_percpu_lane __attribute__((btf_type_tag("percpu"))) * lane; + int (*flush)(struct nd_region *, struct bio *); + struct nd_mapping mapping[0]; +}; + +struct nd_percpu_lane { + int count; + spinlock_t lock; +}; + +struct nd_cmd_get_config_size { + __u32 status; + __u32 config_size; + __u32 max_xfer; +}; + +struct nvdimm_drvdata { + struct device *dev; + int nslabel_size; + struct nd_cmd_get_config_size nsarea; + void *data; + bool cxl; + int ns_current; + int ns_next; + struct resource dpa; + struct kref kref; +}; + +struct nd_intel_set_passphrase { + u8 old_pass[32]; + u8 new_pass[32]; + u32 status; +}; + +struct nd_intel_unlock_unit { + u8 passphrase[32]; + u32 status; +}; + +struct nd_intel_disable_passphrase { + u8 passphrase[32]; + u32 status; +}; + +struct nd_intel_secure_erase { + u8 passphrase[32]; + u32 status; +}; + +struct nd_intel_overwrite { + u8 passphrase[32]; + u32 status; +}; + +struct nd_intel_bus_fw_activate { + u8 iodev_state; + u32 status; +} __attribute__((packed)); + +struct nd_intel_fw_activate_arm { + u8 activate_arm; + u32 status; +} __attribute__((packed)); + +enum nvdimm_security_bits { + NVDIMM_SECURITY_DISABLED = 0, + NVDIMM_SECURITY_UNLOCKED = 1, + NVDIMM_SECURITY_LOCKED = 2, + NVDIMM_SECURITY_FROZEN = 3, + NVDIMM_SECURITY_OVERWRITE = 4, +}; + +struct nd_intel_get_security_state { + u32 status; + u8 extended_state; + u8 reserved[3]; + u8 state; + u8 reserved1[3]; +}; + +struct nd_intel_freeze_lock { + u32 status; +}; + +struct nd_intel_query_overwrite { + u32 status; +}; + +struct nd_intel_bus_fw_activate_businfo { + u32 status; + u16 reserved; + u8 state; + u8 capability; + u64 activate_tmo; + u64 cpu_quiesce_tmo; + u64 io_quiesce_tmo; + u64 max_quiesce_tmo; +}; + +struct nd_intel_fw_activate_dimminfo { + u32 status; + u16 result; + u8 state; + u8 reserved[7]; +} __attribute__((packed)); + +enum acpi_srat_type { + ACPI_SRAT_TYPE_CPU_AFFINITY = 0, + ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, + ACPI_SRAT_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, + ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, + ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, + ACPI_SRAT_TYPE_RESERVED = 7, +}; + +struct acpi_table_srat { + struct acpi_table_header header; + u32 table_revision; + u64 reserved; +}; + +struct acpi_srat_mem_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; + u64 base_address; + u64 length; + u32 reserved1; + u32 flags; + u64 reserved2; +} __attribute__((packed)); + +struct acpi_srat_gicc_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +} __attribute__((packed)); + +struct acpi_srat_generic_affinity { + struct acpi_subtable_header header; + u8 reserved; + u8 device_handle_type; + u32 proximity_domain; + u8 device_handle[16]; + u32 flags; + u32 reserved1; +}; + +struct acpi_table_slit { + struct acpi_table_header header; + u64 locality_count; + u8 entry[0]; +} __attribute__((packed)); + +struct acpi_cedt_cfmws { + struct acpi_cedt_header header; + u32 reserved1; + u64 base_hpa; + u64 window_size; + u8 interleave_ways; + u8 interleave_arithmetic; + u16 reserved2; + u32 granularity; + u16 restrictions; + u16 qtg_id; + u32 interleave_targets[0]; +} __attribute__((packed)); + +struct acpi_hmat_locality; + +struct memory_locality { + struct list_head node; + struct acpi_hmat_locality *hmat_loc; +}; + +struct acpi_hmat_locality { + struct acpi_hmat_structure header; + u8 flags; + u8 data_type; + u8 min_transfer_size; + u8 reserved1; + u32 number_of_initiator_Pds; + u32 number_of_target_Pds; + u32 reserved2; + u64 entry_base_unit; +}; + +enum acpi_hmat_type { + ACPI_HMAT_TYPE_PROXIMITY = 0, + ACPI_HMAT_TYPE_LOCALITY = 1, + ACPI_HMAT_TYPE_CACHE = 2, + ACPI_HMAT_TYPE_RESERVED = 3, +}; + +enum cache_indexing { + NODE_CACHE_DIRECT_MAP = 0, + NODE_CACHE_INDEXED = 1, + NODE_CACHE_OTHER = 2, +}; + +enum cache_write_policy { + NODE_CACHE_WRITE_BACK = 0, + NODE_CACHE_WRITE_THROUGH = 1, + NODE_CACHE_WRITE_OTHER = 2, +}; + +enum locality_types { + WRITE_LATENCY = 0, + READ_LATENCY = 1, + WRITE_BANDWIDTH = 2, + READ_BANDWIDTH = 3, +}; + +struct node_cache_attrs { + enum cache_indexing indexing; + enum cache_write_policy write_policy; + u64 size; + u16 line_size; + u8 level; +}; + +struct memory_target { + struct list_head node; + unsigned int memory_pxm; + unsigned int processor_pxm; + struct resource memregions; + struct node_hmem_attrs hmem_attrs[2]; + struct list_head caches; + struct node_cache_attrs cache_attrs; + bool registered; +}; + +struct memory_initiator { + struct list_head node; + unsigned int processor_pxm; + bool has_cpu; +}; + +struct target_cache { + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + +struct acpi_hmat_proximity_domain { + struct acpi_hmat_structure header; + u16 flags; + u16 reserved1; + u32 processor_PD; + u32 memory_PD; + u32 reserved2; + u64 reserved3; + u64 reserved4; +}; + +struct acpi_hmat_cache { + struct acpi_hmat_structure header; + u32 memory_PD; + u32 reserved1; + u64 cache_size; + u32 cache_attributes; + u16 reserved2; + u16 number_of_SMBIOShandles; +}; + +struct acpi_memory_info { + struct list_head list; + u64 start_addr; + u64 length; + unsigned short caching; + unsigned short write_protect; + unsigned int enabled : 1; +}; + +struct acpi_memory_device { + struct acpi_device *device; + struct list_head res_list; + int mgid; +}; + +struct acpi_pci_ioapic { + acpi_handle root_handle; + acpi_handle handle; + u32 gsi_base; + struct resource res; + struct pci_dev *pdev; + struct list_head list; +}; + +struct acpi_offsets { + size_t offset; + u8 mode; +}; + +enum { + ACPI_BATTERY_ALARM_PRESENT = 0, + ACPI_BATTERY_XINFO_PRESENT = 1, + ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY = 2, + ACPI_BATTERY_QUIRK_THINKPAD_MAH = 3, + ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE = 4, +}; + +enum dmi_entry_type { + DMI_ENTRY_BIOS = 0, + DMI_ENTRY_SYSTEM = 1, + DMI_ENTRY_BASEBOARD = 2, + DMI_ENTRY_CHASSIS = 3, + DMI_ENTRY_PROCESSOR = 4, + DMI_ENTRY_MEM_CONTROLLER = 5, + DMI_ENTRY_MEM_MODULE = 6, + DMI_ENTRY_CACHE = 7, + DMI_ENTRY_PORT_CONNECTOR = 8, + DMI_ENTRY_SYSTEM_SLOT = 9, + DMI_ENTRY_ONBOARD_DEVICE = 10, + DMI_ENTRY_OEMSTRINGS = 11, + DMI_ENTRY_SYSCONF = 12, + DMI_ENTRY_BIOS_LANG = 13, + DMI_ENTRY_GROUP_ASSOC = 14, + DMI_ENTRY_SYSTEM_EVENT_LOG = 15, + DMI_ENTRY_PHYS_MEM_ARRAY = 16, + DMI_ENTRY_MEM_DEVICE = 17, + DMI_ENTRY_32_MEM_ERROR = 18, + DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19, + DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20, + DMI_ENTRY_BUILTIN_POINTING_DEV = 21, + DMI_ENTRY_PORTABLE_BATTERY = 22, + DMI_ENTRY_SYSTEM_RESET = 23, + DMI_ENTRY_HW_SECURITY = 24, + DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25, + DMI_ENTRY_VOLTAGE_PROBE = 26, + DMI_ENTRY_COOLING_DEV = 27, + DMI_ENTRY_TEMP_PROBE = 28, + DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29, + DMI_ENTRY_OOB_REMOTE_ACCESS = 30, + DMI_ENTRY_BIS_ENTRY = 31, + DMI_ENTRY_SYSTEM_BOOT = 32, + DMI_ENTRY_MGMT_DEV = 33, + DMI_ENTRY_MGMT_DEV_COMPONENT = 34, + DMI_ENTRY_MGMT_DEV_THRES = 35, + DMI_ENTRY_MEM_CHANNEL = 36, + DMI_ENTRY_IPMI_DEV = 37, + DMI_ENTRY_SYS_POWER_SUPPLY = 38, + DMI_ENTRY_ADDITIONAL = 39, + DMI_ENTRY_ONBOARD_DEV_EXT = 40, + DMI_ENTRY_MGMT_CONTROLLER_HOST = 41, + DMI_ENTRY_INACTIVE = 126, + DMI_ENTRY_END_OF_TABLE = 127, +}; + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING = 1, + POWER_SUPPLY_STATUS_DISCHARGING = 2, + POWER_SUPPLY_STATUS_NOT_CHARGING = 3, + POWER_SUPPLY_STATUS_FULL = 4, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, + POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, + POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH = 1, + POWER_SUPPLY_TECHNOLOGY_LION = 2, + POWER_SUPPLY_TECHNOLOGY_LIPO = 3, + POWER_SUPPLY_TECHNOLOGY_LiFe = 4, + POWER_SUPPLY_TECHNOLOGY_NiCd = 5, + POWER_SUPPLY_TECHNOLOGY_LiMn = 6, +}; + +struct acpi_battery { + struct mutex lock; + struct mutex sysfs_lock; + struct power_supply *bat; + struct power_supply_desc bat_desc; + struct acpi_device *device; + struct notifier_block pm_nb; + struct list_head list; + unsigned long update_time; + int revision; + int rate_now; + int capacity_now; + int voltage_now; + int design_capacity; + int full_charge_capacity; + int technology; + int design_voltage; + int design_capacity_warning; + int design_capacity_low; + int cycle_count; + int measurement_accuracy; + int max_sampling_time; + int min_sampling_time; + int max_averaging_interval; + int min_averaging_interval; + int capacity_granularity_1; + int capacity_granularity_2; + int alarm; + char model_number[64]; + char serial_number[64]; + char type[64]; + char oem_info[64]; + int state; + int power_unit; + unsigned long flags; +}; + +struct acpi_battery_hook { + const char *name; + int (*add_battery)(struct power_supply *, struct acpi_battery_hook *); + int (*remove_battery)(struct power_supply *, struct acpi_battery_hook *); + struct list_head list; +}; + +struct dmi_header { + u8 type; + u8 length; + u16 handle; +}; + +struct cppc_pcc_data { + struct pcc_mbox_chan *pcc_channel; + void *pcc_comm_addr; + bool pcc_channel_acquired; + unsigned int deadline_us; + unsigned int pcc_mpar; + unsigned int pcc_mrtt; + unsigned int pcc_nominal; + bool pending_pcc_write_cmd; + bool platform_owns_pcc; + unsigned int pcc_write_cnt; + struct rw_semaphore pcc_lock; + wait_queue_head_t pcc_write_wait_q; + ktime_t last_cmd_cmpl_time; + ktime_t last_mpar_reset; + int mpar_count; + int refcount; +}; + +struct cpc_register_resource { + acpi_object_type type; + u64 *sys_mem_vaddr; + union { + struct cpc_reg reg; + u64 int_value; + } cpc_entry; +}; + +struct cpc_desc { + int num_entries; + int version; + int cpu_id; + int write_cmd_status; + int write_cmd_id; + struct cpc_register_resource cpc_regs[21]; + struct acpi_psd_package domain_info; + struct kobject kobj; +}; + +enum cppc_regs { + HIGHEST_PERF = 0, + NOMINAL_PERF = 1, + LOW_NON_LINEAR_PERF = 2, + LOWEST_PERF = 3, + GUARANTEED_PERF = 4, + DESIRED_PERF = 5, + MIN_PERF = 6, + MAX_PERF = 7, + PERF_REDUC_TOLERANCE = 8, + TIME_WINDOW = 9, + CTR_WRAP_TIME = 10, + REFERENCE_CTR = 11, + DELIVERED_CTR = 12, + PERF_LIMITED = 13, + ENABLE = 14, + AUTO_SEL_ENABLE = 15, + AUTO_ACT_WINDOW = 16, + ENERGY_PERF = 17, + REFERENCE_PERF = 18, + LOWEST_FREQ = 19, + NOMINAL_FREQ = 20, +}; + +struct cppc_perf_ctrls { + u32 max_perf; + u32 min_perf; + u32 desired_perf; + u32 energy_perf; +}; + +struct cppc_perf_fb_ctrs { + u64 reference; + u64 delivered; + u64 reference_perf; + u64 wraparound_time; +}; + +struct cppc_cpudata { + struct list_head node; + struct cppc_perf_caps perf_caps; + struct cppc_perf_ctrls perf_ctrls; + struct cppc_perf_fb_ctrs perf_fb_ctrs; + unsigned int shared_type; + cpumask_var_t shared_cpu_map; +}; + +struct acpi_pcct_shared_memory { + u32 signature; + u16 command; + u16 status; +}; + +struct apei_resources { + struct list_head iomem; + struct list_head ioport; +}; + +struct apei_res { + struct list_head list; + unsigned long start; + unsigned long end; +}; + +struct apei_exec_ins_type; + +struct acpi_whea_header; + +struct apei_exec_context { + u32 ip; + u64 value; + u64 var1; + u64 var2; + u64 src_base; + u64 dst_base; + struct apei_exec_ins_type *ins_table; + u32 instructions; + struct acpi_whea_header *action_table; + u32 entries; +}; + +typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *, struct acpi_whea_header *); + +struct apei_exec_ins_type { + u32 flags; + apei_exec_ins_func_t run; +}; + +struct acpi_whea_header { + u8 action; + u8 instruction; + u8 flags; + u8 reserved; + struct acpi_generic_address register_region; + u64 value; + u64 mask; +}; + +typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *, + struct acpi_whea_header *, void *); + +struct acpi_table_hest { + struct acpi_table_header header; + u32 error_source_count; +}; + +enum hest_status { + HEST_ENABLED = 0, + HEST_DISABLED = 1, + HEST_NOT_FOUND = 2, +}; + +enum acpi_hest_types { + ACPI_HEST_TYPE_IA32_CHECK = 0, + ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, + ACPI_HEST_TYPE_IA32_NMI = 2, + ACPI_HEST_TYPE_NOT_USED3 = 3, + ACPI_HEST_TYPE_NOT_USED4 = 4, + ACPI_HEST_TYPE_NOT_USED5 = 5, + ACPI_HEST_TYPE_AER_ROOT_PORT = 6, + ACPI_HEST_TYPE_AER_ENDPOINT = 7, + ACPI_HEST_TYPE_AER_BRIDGE = 8, + ACPI_HEST_TYPE_GENERIC_ERROR = 9, + ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10, + ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11, + ACPI_HEST_TYPE_RESERVED = 12, +}; + +struct acpi_hest_ia_machine_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u64 global_capability_data; + u64 global_control_data; + u8 num_hardware_banks; + u8 reserved3[7]; +}; + +struct acpi_hest_ia_deferred_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +struct acpi_hest_generic { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; +}; + +typedef int (*apei_hest_func_t)(struct acpi_hest_header *, void *); + +struct ghes_arr { + struct platform_device **ghes_devs; + unsigned int count; +}; + +struct erst_erange { + u64 base; + u64 size; + void *vaddr; + u32 attr; + u64 timings; +}; + +struct acpi_table_erst { + struct acpi_table_header header; + u32 header_length; + u32 reserved; + u32 entries; +}; + +struct erst_record_id_cache { + struct mutex lock; + u64 *entries; + int len; + int size; + int refcount; +}; + +enum acpi_erst_actions { + ACPI_ERST_BEGIN_WRITE = 0, + ACPI_ERST_BEGIN_READ = 1, + ACPI_ERST_BEGIN_CLEAR = 2, + ACPI_ERST_END = 3, + ACPI_ERST_SET_RECORD_OFFSET = 4, + ACPI_ERST_EXECUTE_OPERATION = 5, + ACPI_ERST_CHECK_BUSY_STATUS = 6, + ACPI_ERST_GET_COMMAND_STATUS = 7, + ACPI_ERST_GET_RECORD_ID = 8, + ACPI_ERST_SET_RECORD_ID = 9, + ACPI_ERST_GET_RECORD_COUNT = 10, + ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, + ACPI_ERST_NOT_USED = 12, + ACPI_ERST_GET_ERROR_RANGE = 13, + ACPI_ERST_GET_ERROR_LENGTH = 14, + ACPI_ERST_GET_ERROR_ATTRIBUTES = 15, + ACPI_ERST_EXECUTE_TIMINGS = 16, + ACPI_ERST_ACTION_RESERVED = 17, +}; + +struct cper_pstore_record { + struct cper_record_header hdr; + struct cper_section_descriptor sec_hdr; + char data[0]; +}; + +struct acpi_hest_generic_status { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +struct acpi_bert_region { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +struct ghes_estatus_cache { + u32 estatus_len; + atomic_t count; + struct acpi_hest_generic *generic; + unsigned long long time_in; + struct callback_head rcu; +}; + +enum acpi_hest_notify_types { + ACPI_HEST_NOTIFY_POLLED = 0, + ACPI_HEST_NOTIFY_EXTERNAL = 1, + ACPI_HEST_NOTIFY_LOCAL = 2, + ACPI_HEST_NOTIFY_SCI = 3, + ACPI_HEST_NOTIFY_NMI = 4, + ACPI_HEST_NOTIFY_CMCI = 5, + ACPI_HEST_NOTIFY_MCE = 6, + ACPI_HEST_NOTIFY_GPIO = 7, + ACPI_HEST_NOTIFY_SEA = 8, + ACPI_HEST_NOTIFY_SEI = 9, + ACPI_HEST_NOTIFY_GSIV = 10, + ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, + ACPI_HEST_NOTIFY_RESERVED = 12, +}; + +struct ghes; + +struct ghes_estatus_node { + struct llist_node llnode; + struct acpi_hest_generic *generic; + struct ghes *ghes; + int task_work_cpu; + struct callback_head task_work; +}; + +struct acpi_hest_generic_v2; + +struct ghes { + union { + struct acpi_hest_generic *generic; + struct acpi_hest_generic_v2 *generic_v2; + }; + struct acpi_hest_generic_status *estatus; + unsigned long flags; + union { + struct list_head list; + struct timer_list timer; + unsigned int irq; + }; + struct device *dev; + struct list_head elist; +}; + +struct acpi_hest_generic_v2 { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; + struct acpi_generic_address read_ack_register; + u64 read_ack_preserve; + u64 read_ack_write; +} __attribute__((packed)); + +struct acpi_hest_generic_data { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; +}; + +struct acpi_hest_generic_data_v300 { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; + u64 time_stamp; +}; + +struct cper_arm_err_info { + u8 version; + u8 length; + u16 validation_bits; + u8 type; + u16 multiple_error; + u8 flags; + u64 error_info; + u64 virt_fault_addr; + u64 physical_fault_addr; +} __attribute__((packed)); + +struct ghes_vendor_record_entry { + struct work_struct work; + int error_severity; + char vendor_record[0]; +}; + +struct cper_sec_pcie { + u64 validation_bits; + u32 port_type; + struct { + u8 minor; + u8 major; + u8 reserved[2]; + } version; + u16 command; + u16 status; + u32 reserved; + struct { + u16 vendor_id; + u16 device_id; + u8 class_code[3]; + u8 function; + u8 device; + u16 segment; + u8 bus; + u8 secondary_bus; + u16 slot; + u8 reserved; + } __attribute__((packed)) device_id; + struct { + u32 lower; + u32 upper; + } serial_number; + struct { + u16 secondary_status; + u16 control; + } bridge; + u8 capability[60]; + u8 aer_info[96]; +}; + +struct cper_sec_proc_arm { + u32 validation_bits; + u16 err_info_num; + u16 context_info_num; + u32 section_length; + u8 affinity_level; + u8 reserved[3]; + u64 mpidr; + u64 midr; + u32 running_state; + u32 psci_state; +}; + +struct intel_pmic_regs_handler_ctx { + unsigned int val; + u16 addr; +}; + +struct regmap; + +struct intel_pmic_opregion_data; + +struct intel_pmic_opregion { + struct mutex lock; + struct acpi_lpat_conversion_table *lpat_table; + struct regmap *regmap; + const struct intel_pmic_opregion_data *data; + struct intel_pmic_regs_handler_ctx ctx; +}; + +struct pmic_table; + +struct intel_pmic_opregion_data { + int (*get_power)(struct regmap *, int, int, u64 *); + int (*update_power)(struct regmap *, int, int, bool); + int (*get_raw_temp)(struct regmap *, int); + int (*update_aux)(struct regmap *, int, int); + int (*get_policy)(struct regmap *, int, int, u64 *); + int (*update_policy)(struct regmap *, int, int, int); + int (*exec_mipi_pmic_seq_element)(struct regmap *, u16, u32, u32, u32); + int (*lpat_raw_to_temp)(struct acpi_lpat_conversion_table *, int); + struct pmic_table *power_table; + int power_table_count; + struct pmic_table *thermal_table; + int thermal_table_count; + int pmic_i2c_address; +}; + +struct pmic_table { + int address; + int reg; + int bit; +}; + +struct pnp_resource { + struct list_head list; + struct resource res; +}; + +struct pnp_port { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t size; + unsigned char flags; +}; + +typedef struct { + unsigned long bits[4]; +} pnp_irq_mask_t; + +struct pnp_irq { + pnp_irq_mask_t map; + unsigned char flags; +}; + +struct pnp_dma { + unsigned char map; + unsigned char flags; +}; + +struct pnp_mem { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t size; + unsigned char flags; +}; + +struct pnp_option { + struct list_head list; + unsigned int flags; + unsigned long type; + union { + struct pnp_port port; + struct pnp_irq irq; + struct pnp_dma dma; + struct pnp_mem mem; + } u; +}; + +struct cdrom_device_ops; + +struct cdrom_device_info { + const struct cdrom_device_ops *ops; + struct list_head list; + struct gendisk *disk; + void *handle; + int mask; + int speed; + int capacity; + unsigned int options : 30; + unsigned int mc_flags : 2; + unsigned int vfs_events; + unsigned int ioctl_events; + int use_count; + char name[20]; + __u8 sanyo_slot : 2; + __u8 keeplocked : 1; + __u8 reserved : 5; + int cdda_method; + __u8 last_sense; + __u8 media_written; + unsigned short mmc3_profile; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; + bool opened_for_data; + __s64 last_media_change_ms; +}; + +struct cdrom_multisession; + +struct cdrom_mcn; + +struct packet_command; + +struct cdrom_device_ops { + int (*open)(struct cdrom_device_info *, int); + void (*release)(struct cdrom_device_info *); + int (*drive_status)(struct cdrom_device_info *, int); + unsigned int (*check_events)(struct cdrom_device_info *, unsigned int, int); + int (*tray_move)(struct cdrom_device_info *, int); + int (*lock_door)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, int); + int (*get_last_session)(struct cdrom_device_info *, struct cdrom_multisession *); + int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); + int (*reset)(struct cdrom_device_info *); + int (*audio_ioctl)(struct cdrom_device_info *, unsigned int, void *); + int (*generic_packet)(struct cdrom_device_info *, struct packet_command *); + int (*read_cdda_bpc)(struct cdrom_device_info *, + void __attribute__((btf_type_tag("user"))) *, u32, u32, u8 *); + const int capability; +}; + +struct cdrom_msf0 { + __u8 minute; + __u8 second; + __u8 frame; +}; + +union cdrom_addr { + struct cdrom_msf0 msf; + int lba; +}; + +struct cdrom_multisession { + union cdrom_addr addr; + __u8 xa_flag; + __u8 addr_format; +}; + +struct cdrom_mcn { + __u8 medium_catalog_number[14]; +}; + +struct scsi_sense_hdr; + +struct packet_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +struct scsi_sense_hdr { + u8 response_code; + u8 sense_key; + u8 asc; + u8 ascq; + u8 byte4; + u8 byte5; + u8 byte6; + u8 additional_length; +}; + +struct pnp_info_buffer { + char *buffer; + char *curr; + unsigned long size; + unsigned long len; + int stop; + int error; +}; + +typedef struct pnp_info_buffer pnp_info_buffer_t; + +struct pnp_fixup { + char id[7]; + void (*quirk_function)(struct pnp_dev *); +}; + +struct acpipnp_parse_option_s { + struct pnp_dev *dev; + unsigned int option_flags; +}; + +struct dma_chan___2; + +struct dma_chan_tbl_ent { + struct dma_chan___2 *chan; +}; + +typedef s32 dma_cookie_t; + +struct dma_device; + +struct dma_chan_dev; + +struct dma_chan_percpu; + +struct dma_router; + +struct dma_chan___2 { + struct dma_device *device; + struct device *slave; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + int chan_id; + struct dma_chan_dev *dev; + const char *name; + char *dbg_client_name; + struct list_head device_node; + struct dma_chan_percpu __attribute__((btf_type_tag("percpu"))) * local; + int client_count; + int table_count; + struct dma_router *router; + void *route_data; + void *private; +}; + +typedef bool (*dma_filter_fn)(struct dma_chan___2 *, void *); + +struct dma_slave_map; + +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +typedef struct { + unsigned long bits[1]; +} dma_cap_mask_t; + +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = 1, + DESC_METADATA_ENGINE = 2, +}; + +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, +}; + +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +enum sum_check_flags { + SUM_CHECK_P_RESULT = 1, + SUM_CHECK_Q_RESULT = 2, +}; + +enum dma_transfer_direction { + DMA_MEM_TO_MEM = 0, + DMA_MEM_TO_DEV = 1, + DMA_DEV_TO_MEM = 2, + DMA_DEV_TO_DEV = 3, + DMA_TRANS_NONE = 4, +}; + +enum dma_status { + DMA_COMPLETE = 0, + DMA_IN_PROGRESS = 1, + DMA_PAUSED = 2, + DMA_ERROR = 3, + DMA_OUT_OF_ORDER = 4, +}; + +struct dma_async_tx_descriptor; + +struct dma_interleaved_template; + +struct dma_slave_caps; + +struct dma_slave_config; + +struct dma_tx_state; + +struct dma_device { + struct kref ref; + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; + unsigned short max_xor; + unsigned short max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + int dev_id; + struct device *dev; + struct module *owner; + struct ida chan_ida; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan___2 *); + int (*device_router_config)(struct dma_chan___2 *); + void (*device_free_chan_resources)(struct dma_chan___2 *); + struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(struct dma_chan___2 *, + dma_addr_t, dma_addr_t, + size_t, unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_xor)(struct dma_chan___2 *, dma_addr_t, + dma_addr_t *, unsigned int, + size_t, unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(struct dma_chan___2 *, + dma_addr_t *, + unsigned int, size_t, + enum sum_check_flags *, + unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_pq)(struct dma_chan___2 *, + dma_addr_t *, dma_addr_t *, + unsigned int, + const unsigned char *, + size_t, unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( + struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, + const unsigned char *, size_t, enum sum_check_flags *, unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_memset)(struct dma_chan___2 *, + dma_addr_t, int, size_t, + unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(struct dma_chan___2 *, + struct scatterlist *, + unsigned int, int, + unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(struct dma_chan___2 *, + unsigned long); + struct dma_async_tx_descriptor *(*device_prep_slave_sg)(struct dma_chan___2 *, + struct scatterlist *, + unsigned int, + enum dma_transfer_direction, + unsigned long, void *); + struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(struct dma_chan___2 *, + dma_addr_t, size_t, size_t, + enum dma_transfer_direction, + unsigned long); + struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( + struct dma_chan___2 *, struct dma_interleaved_template *, unsigned long); + struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(struct dma_chan___2 *, + dma_addr_t, u64, + unsigned long); + void (*device_caps)(struct dma_chan___2 *, struct dma_slave_caps *); + int (*device_config)(struct dma_chan___2 *, struct dma_slave_config *); + int (*device_pause)(struct dma_chan___2 *); + int (*device_resume)(struct dma_chan___2 *); + int (*device_terminate_all)(struct dma_chan___2 *); + void (*device_synchronize)(struct dma_chan___2 *); + enum dma_status (*device_tx_status)(struct dma_chan___2 *, dma_cookie_t, + struct dma_tx_state *); + void (*device_issue_pending)(struct dma_chan___2 *); + void (*device_release)(struct dma_device *); + void (*dbg_summary_show)(struct seq_file *, struct dma_device *); + struct dentry *dbg_dev_root; +}; + +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = 1, + DMA_CTRL_ACK = 2, + DMA_PREP_PQ_DISABLE_P = 4, + DMA_PREP_PQ_DISABLE_Q = 8, + DMA_PREP_CONTINUE = 16, + DMA_PREP_FENCE = 32, + DMA_CTRL_REUSE = 64, + DMA_PREP_CMD = 128, + DMA_PREP_REPEAT = 256, + DMA_PREP_LOAD_EOT = 512, +}; + +typedef void (*dma_async_tx_callback)(void *); + +struct dmaengine_result; + +typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); + +struct dmaengine_unmap_data; + +struct dma_descriptor_metadata_ops; + +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; + dma_addr_t phys; + struct dma_chan___2 *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); + int (*desc_free)(struct dma_async_tx_descriptor *); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; +}; + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, + DMA_TRANS_READ_FAILED = 1, + DMA_TRANS_WRITE_FAILED = 2, + DMA_TRANS_ABORTED = 3, +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +struct dmaengine_unmap_data { + u8 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); + void *(*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); + int (*set_len)(struct dma_async_tx_descriptor *, size_t); +}; + +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, + DMA_SLAVE_BUSWIDTH_128_BYTES = 128, +}; + +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + void *peripheral_config; + size_t peripheral_size; +}; + +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; + u32 in_flight_bytes; +}; + +struct dma_chan_dev { + struct dma_chan___2 *chan; + struct device device; + int dev_id; + bool chan_dma_dev; +}; + +struct dma_chan_percpu { + unsigned long memcpy_count; + unsigned long bytes_transferred; +}; + +struct dma_router { + struct device *dev; + void (*route_free)(struct device *, void *); +}; + +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; + +enum dma_transaction_type { + DMA_MEMCPY = 0, + DMA_XOR = 1, + DMA_PQ = 2, + DMA_XOR_VAL = 3, + DMA_PQ_VAL = 4, + DMA_MEMSET = 5, + DMA_MEMSET_SG = 6, + DMA_INTERRUPT = 7, + DMA_PRIVATE = 8, + DMA_ASYNC_TX = 9, + DMA_SLAVE = 10, + DMA_CYCLIC = 11, + DMA_INTERLEAVE = 12, + DMA_COMPLETION_NO_ORDER = 13, + DMA_REPEAT = 14, + DMA_LOAD_EOT = 15, + DMA_TX_TYPE_END = 16, +}; + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan___2 chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + spinlock_t lock; + struct list_head desc_allocated; + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + struct list_head desc_terminated; + struct virt_dma_desc *cyclic; +}; + +struct dmaengine_desc_callback { + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; +}; + +struct acpi_dma_spec; + +struct acpi_dma { + struct list_head dma_controllers; + struct device *dev; + struct dma_chan___2 *(*acpi_dma_xlate)(struct acpi_dma_spec *, struct acpi_dma *); + void *data; + unsigned short base_request_line; + unsigned short end_request_line; +}; + +struct acpi_dma_spec { + int chan_id; + int slave_id; + struct device *dev; +}; + +struct acpi_csrt_group { + u32 length; + u32 vendor_id; + u32 subvendor_id; + u16 device_id; + u16 subdevice_id; + u16 revision; + u16 reserved; + u32 shared_info_length; +}; + +struct acpi_csrt_shared_info { + u16 major_version; + u16 minor_version; + u32 mmio_base_low; + u32 mmio_base_high; + u32 gsi_interrupt; + u8 interrupt_polarity; + u8 interrupt_mode; + u8 num_channels; + u8 dma_address_width; + u16 base_request_line; + u16 num_handshake_signals; + u32 max_block_size; +}; + +struct acpi_table_csrt { + struct acpi_table_header header; +}; + +struct acpi_dma_parser_data { + struct acpi_dma_spec dma_spec; + size_t index; + size_t n; +}; + +struct acpi_dma_filter_info { + dma_cap_mask_t dma_cap; + dma_filter_fn filter_fn; +}; + +enum dw_dmac_flags { + DW_DMA_IS_CYCLIC = 0, + DW_DMA_IS_SOFT_LLP = 1, + DW_DMA_IS_PAUSED = 2, + DW_DMA_IS_INITIALIZED = 3, +}; + +enum dw_dma_fc { + DW_DMA_FC_D_M2M = 0, + DW_DMA_FC_D_M2P = 1, + DW_DMA_FC_D_P2M = 2, + DW_DMA_FC_D_P2P = 3, + DW_DMA_FC_P_P2M = 4, + DW_DMA_FC_SP_P2P = 5, + DW_DMA_FC_P_M2P = 6, + DW_DMA_FC_DP_P2P = 7, +}; + +struct dw_dma_slave { + struct device *dma_dev; + u8 src_id; + u8 dst_id; + u8 m_master; + u8 p_master; + u8 channels; + bool hs_polarity; +}; + +struct dw_dma_chan { + struct dma_chan___2 chan; + void *ch_regs; + u8 mask; + u8 priority; + enum dma_transfer_direction direction; + struct list_head *tx_node_active; + spinlock_t lock; + unsigned long flags; + struct list_head active_list; + struct list_head queue; + unsigned int descs_allocated; + unsigned int block_size; + bool nollp; + u32 max_burst; + struct dw_dma_slave dws; + struct dma_slave_config dma_sconfig; +}; + +struct dw_dma_platform_data; + +struct dw_dma { + struct dma_device dma; + char name[20]; + void *regs; + struct dma_pool *desc_pool; + struct tasklet_struct tasklet; + struct dw_dma_chan *chan; + u8 all_chan_mask; + u8 in_use; + void (*initialize_chan)(struct dw_dma_chan *); + void (*suspend_chan)(struct dw_dma_chan *, bool); + void (*resume_chan)(struct dw_dma_chan *, bool); + u32 (*prepare_ctllo)(struct dw_dma_chan *); + void (*encode_maxburst)(struct dw_dma_chan *, u32 *); + u32 (*bytes2block)(struct dw_dma_chan *, size_t, unsigned int, size_t *); + size_t (*block2bytes)(struct dw_dma_chan *, u32, u32); + void (*set_device_name)(struct dw_dma *, int); + void (*disable)(struct dw_dma *); + void (*enable)(struct dw_dma *); + struct dw_dma_platform_data *pdata; +}; + +struct dw_dma_platform_data { + u32 nr_masters; + u32 nr_channels; + u32 chan_allocation_order; + u32 chan_priority; + u32 block_size; + u32 data_width[4]; + u32 multi_block[8]; + u32 max_burst[8]; + u32 protctl; + u32 quirks; +}; + +struct dw_lli { + __le32 sar; + __le32 dar; + __le32 llp; + __le32 ctllo; + __le32 ctlhi; + __le32 sstat; + __le32 dstat; +}; + +struct dw_desc { + struct dw_lli lli; + struct list_head desc_node; + struct list_head tx_list; + struct dma_async_tx_descriptor txd; + size_t len; + size_t total_len; + u32 residue; +}; + +struct dw_dma_chan_regs { + u32 SAR; + u32 __pad_SAR; + u32 DAR; + u32 __pad_DAR; + u32 LLP; + u32 __pad_LLP; + u32 CTL_LO; + u32 CTL_HI; + u32 SSTAT; + u32 __pad_SSTAT; + u32 DSTAT; + u32 __pad_DSTAT; + u32 SSTATAR; + u32 __pad_SSTATAR; + u32 DSTATAR; + u32 __pad_DSTATAR; + u32 CFG_LO; + u32 CFG_HI; + u32 SGR; + u32 __pad_SGR; + u32 DSR; + u32 __pad_DSR; +}; + +struct dw_dma_irq_regs { + u32 XFER; + u32 __pad_XFER; + u32 BLOCK; + u32 __pad_BLOCK; + u32 SRC_TRAN; + u32 __pad_SRC_TRAN; + u32 DST_TRAN; + u32 __pad_DST_TRAN; + u32 ERROR; + u32 __pad_ERROR; +}; + +struct dw_dma_regs { + struct dw_dma_chan_regs CHAN[8]; + struct dw_dma_irq_regs RAW; + struct dw_dma_irq_regs STATUS; + struct dw_dma_irq_regs MASK; + struct dw_dma_irq_regs CLEAR; + u32 STATUS_INT; + u32 __pad_STATUS_INT; + u32 REQ_SRC; + u32 __pad_REQ_SRC; + u32 REQ_DST; + u32 __pad_REQ_DST; + u32 SGL_REQ_SRC; + u32 __pad_SGL_REQ_SRC; + u32 SGL_REQ_DST; + u32 __pad_SGL_REQ_DST; + u32 LAST_SRC; + u32 __pad_LAST_SRC; + u32 LAST_DST; + u32 __pad_LAST_DST; + u32 CFG; + u32 __pad_CFG; + u32 CH_EN; + u32 __pad_CH_EN; + u32 ID; + u32 __pad_ID; + u32 TEST; + u32 __pad_TEST; + u32 CLASS_PRIORITY0; + u32 __pad_CLASS_PRIORITY0; + u32 CLASS_PRIORITY1; + u32 __pad_CLASS_PRIORITY1; + u32 __reserved; + u32 DWC_PARAMS[8]; + u32 MULTI_BLK_TYPE; + u32 MAX_BLK_SIZE; + u32 DW_PARAMS; + u32 COMP_TYPE; + u32 COMP_VERSION; + u32 FIFO_PARTITION0; + u32 __pad_FIFO_PARTITION0; + u32 FIFO_PARTITION1; + u32 __pad_FIFO_PARTITION1; + u32 SAI_ERR; + u32 __pad_SAI_ERR; + u32 GLOBAL_CFG; + u32 __pad_GLOBAL_CFG; +}; + +struct dw_dma_chip { + struct device *dev; + int id; + int irq; + void *regs; + struct clk *clk; + struct dw_dma *dw; + const struct dw_dma_platform_data *pdata; +}; + +struct hsu_dma_sg; + +struct hsu_dma_desc { + struct virt_dma_desc vdesc; + enum dma_transfer_direction direction; + struct hsu_dma_sg *sg; + unsigned int nents; + size_t length; + unsigned int active; + enum dma_status status; +}; + +struct hsu_dma_sg { + dma_addr_t addr; + unsigned int len; +}; + +struct hsu_dma_chan { + struct virt_dma_chan vchan; + void *reg; + enum dma_transfer_direction direction; + struct dma_slave_config config; + struct hsu_dma_desc *desc; +}; + +struct hsu_dma; + +struct hsu_dma_chip { + struct device *dev; + int irq; + void *regs; + unsigned int length; + unsigned int offset; + struct hsu_dma *hsu; +}; + +struct hsu_dma { + struct dma_device dma; + struct hsu_dma_chan *chan; + unsigned short nr_channels; +}; + +struct virtio_driver { + struct device_driver driver; + const struct virtio_device_id *id_table; + const unsigned int *feature_table; + unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *); + int (*probe)(struct virtio_device *); + void (*scan)(struct virtio_device *); + void (*remove)(struct virtio_device *); + void (*config_changed)(struct virtio_device *); + int (*freeze)(struct virtio_device *); + int (*restore)(struct virtio_device *); +}; + +struct vring_desc; + +typedef struct vring_desc vring_desc_t; + +struct vring_avail; + +typedef struct vring_avail vring_avail_t; + +struct vring_used; + +typedef struct vring_used vring_used_t; + +struct vring { + unsigned int num; + vring_desc_t *desc; + vring_avail_t *avail; + vring_used_t *used; +}; + +struct vring_desc_state_split; + +struct vring_desc_extra; + +struct vring_virtqueue_split { + struct vring vring; + u16 avail_flags_shadow; + u16 avail_idx_shadow; + struct vring_desc_state_split *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + u32 vring_align; + bool may_reduce_num; +}; + +struct vring_packed_desc; + +struct vring_packed_desc_event; + +struct vring_desc_state_packed; + +struct vring_virtqueue_packed { + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + bool avail_wrap_counter; + u16 avail_used_flags; + u16 next_avail_idx; + u16 event_flags_shadow; + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; +}; + +struct vring_virtqueue { + struct virtqueue vq; + bool packed_ring; + bool use_dma_api; + bool weak_barriers; + bool broken; + bool indirect; + bool event; + bool premapped; + bool do_unmap; + unsigned int free_head; + unsigned int num_added; + u16 last_used_idx; + bool event_triggered; + union { + struct vring_virtqueue_split split; + struct vring_virtqueue_packed packed; + }; + bool (*notify)(struct virtqueue *); + bool we_own_ring; + struct device *dma_dev; +}; + +typedef __u64 __virtio64; + +typedef __u32 __virtio32; + +typedef __u16 __virtio16; + +struct vring_desc { + __virtio64 addr; + __virtio32 len; + __virtio16 flags; + __virtio16 next; +}; + +struct vring_avail { + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[0]; +}; + +struct vring_used_elem { + __virtio32 id; + __virtio32 len; +}; + +typedef struct vring_used_elem vring_used_elem_t; + +struct vring_used { + __virtio16 flags; + __virtio16 idx; + vring_used_elem_t ring[0]; +}; + +struct vring_desc_state_split { + void *data; + struct vring_desc *indir_desc; +}; + +struct vring_desc_extra { + dma_addr_t addr; + u32 len; + u16 flags; + u16 next; +}; + +struct vring_packed_desc { + __le64 addr; + __le32 len; + __le16 id; + __le16 flags; +}; + +struct vring_packed_desc_event { + __le16 off_wrap; + __le16 flags; +}; + +struct vring_desc_state_packed { + void *data; + struct vring_packed_desc *indir_desc; + u16 num; + u16 last; +}; + +struct virtio_pci_common_cfg { + __le32 device_feature_select; + __le32 device_feature; + __le32 guest_feature_select; + __le32 guest_feature; + __le16 msix_config; + __le16 num_queues; + __u8 device_status; + __u8 config_generation; + __le16 queue_select; + __le16 queue_size; + __le16 queue_msix_vector; + __le16 queue_enable; + __le16 queue_notify_off; + __le32 queue_desc_lo; + __le32 queue_desc_hi; + __le32 queue_avail_lo; + __le32 queue_avail_hi; + __le32 queue_used_lo; + __le32 queue_used_hi; +}; + +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + __le16 queue_notify_data; + __le16 queue_reset; +}; + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + struct virtio_pci_common_cfg *common; + void *device; + void *notify_base; + resource_size_t notify_pa; + u8 *isr; + size_t notify_len; + size_t device_len; + size_t common_len; + int notify_map_cap; + u32 notify_offset_multiplier; + int modern_bars; + struct virtio_device_id id; + int (*device_id_check)(struct pci_dev *); + u64 dma_mask; +}; + +struct virtio_pci_legacy_device { + struct pci_dev *pci_dev; + u8 *isr; + void *ioaddr; + struct virtio_device_id id; +}; + +struct virtio_pci_vq_info; + +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + union { + struct virtio_pci_legacy_device ldev; + struct virtio_pci_modern_device mdev; + }; + bool is_legacy; + u8 *isr; + spinlock_t lock; + struct list_head virtqueues; + struct virtio_pci_vq_info **vqs; + int msix_enabled; + int intx_enabled; + cpumask_var_t *msix_affinity_masks; + char (*msix_names)[256]; + unsigned int msix_vectors; + unsigned int msix_used_vectors; + bool per_vq_vectors; + struct virtqueue *(*setup_vq)(struct virtio_pci_device *, + struct virtio_pci_vq_info *, unsigned int, + void (*)(struct virtqueue *), const char *, bool, u16); + void (*del_vq)(struct virtio_pci_vq_info *); + u16 (*config_vector)(struct virtio_pci_device *, u16); +}; + +struct virtio_pci_vq_info { + struct virtqueue *vq; + struct list_head node; + unsigned int msix_vector; +}; + +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +enum virtio_input_config_select { + VIRTIO_INPUT_CFG_UNSET = 0, + VIRTIO_INPUT_CFG_ID_NAME = 1, + VIRTIO_INPUT_CFG_ID_SERIAL = 2, + VIRTIO_INPUT_CFG_ID_DEVIDS = 3, + VIRTIO_INPUT_CFG_PROP_BITS = 16, + VIRTIO_INPUT_CFG_EV_BITS = 17, + VIRTIO_INPUT_CFG_ABS_INFO = 18, +}; + +struct virtio_input_event { + __le16 type; + __le16 code; + __le32 value; +}; + +struct input_dev; + +struct virtio_input { + struct virtio_device *vdev; + struct input_dev *idev; + char name[64]; + char serial[64]; + char phys[64]; + struct virtqueue *evt; + struct virtqueue *sts; + struct virtio_input_event evts[64]; + spinlock_t lock; + bool ready; +}; + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +struct input_keymap_entry; + +struct ff_device; + +struct input_dev_poller; + +struct input_mt; + +struct input_absinfo; + +struct input_handle; + +struct input_value; + +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + unsigned long propbit[1]; + unsigned long evbit[1]; + unsigned long keybit[12]; + unsigned long relbit[1]; + unsigned long absbit[1]; + unsigned long mscbit[1]; + unsigned long ledbit[1]; + unsigned long sndbit[1]; + unsigned long ffbit[2]; + unsigned long swbit[1]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, + unsigned int *); + int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); + struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; + struct timer_list timer; + int rep[2]; + struct input_mt *mt; + struct input_absinfo *absinfo; + unsigned long key[12]; + unsigned long led[1]; + unsigned long snd[1]; + unsigned long sw[1]; + int (*open)(struct input_dev *); + void (*close)(struct input_dev *); + int (*flush)(struct input_dev *, struct file *); + int (*event)(struct input_dev *, unsigned int, unsigned int, int); + struct input_handle __attribute__((btf_type_tag("rcu"))) * grab; + spinlock_t event_lock; + struct mutex mutex; + unsigned int users; + bool going_away; + struct device dev; + struct list_head h_list; + struct list_head node; + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + bool devres_managed; + ktime_t timestamp[3]; + bool inhibited; +}; + +struct input_keymap_entry { + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +struct ff_effect; + +struct ff_device { + int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); + int (*erase)(struct input_dev *, int); + int (*playback)(struct input_dev *, int, int); + void (*set_gain)(struct input_dev *, u16); + void (*set_autocenter)(struct input_dev *, u16); + void (*destroy)(struct ff_device *); + void *private; + unsigned long ffbit[2]; + struct mutex mutex; + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[0]; +}; + +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + __s16 __attribute__((btf_type_tag("user"))) * custom_data; +}; + +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + __s16 right_coeff; + __s16 left_coeff; + __u16 deadband; + __s16 center; +}; + +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +struct ff_replay { + __u16 length; + __u16 delay; +}; + +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_mt_slot { + int abs[14]; + unsigned int frame; + unsigned int key; +}; + +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[0]; +}; + +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +struct input_handler; + +struct input_handle { + void *private; + int open; + const char *name; + struct input_dev *dev; + struct input_handler *handler; + struct list_head d_node; + struct list_head h_node; +}; + +struct input_device_id; + +struct input_handler { + void *private; + void (*event)(struct input_handle *, unsigned int, unsigned int, int); + void (*events)(struct input_handle *, const struct input_value *, unsigned int); + bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); + bool (*match)(struct input_handler *, struct input_dev *); + int (*connect)(struct input_handler *, struct input_dev *, + const struct input_device_id *); + void (*disconnect)(struct input_handle *); + void (*start)(struct input_handle *); + bool legacy_minors; + int minor; + const char *name; + const struct input_device_id *id_table; + struct list_head h_list; + struct list_head node; +}; + +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +struct input_device_id { + kernel_ulong_t flags; + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + kernel_ulong_t evbit[1]; + kernel_ulong_t keybit[12]; + kernel_ulong_t relbit[1]; + kernel_ulong_t absbit[1]; + kernel_ulong_t mscbit[1]; + kernel_ulong_t ledbit[1]; + kernel_ulong_t sndbit[1]; + kernel_ulong_t ffbit[2]; + kernel_ulong_t swbit[1]; + kernel_ulong_t propbit[1]; + kernel_ulong_t driver_info; +}; + +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +struct serial_struct32 { + compat_int_t type; + compat_int_t line; + compat_uint_t port; + compat_int_t irq; + compat_int_t flags; + compat_int_t xmit_fifo_size; + compat_int_t custom_divisor; + compat_int_t baud_base; + unsigned short close_delay; + char io_type; + char reserved_char; + compat_int_t hub6; + unsigned short closing_wait; + unsigned short closing_wait2; + compat_uint_t iomem_base; + unsigned short iomem_reg_shift; + unsigned int port_high; + compat_int_t reserved; +}; + +enum tty_flow_change { + TTY_FLOW_NO_CHANGE = 0, + TTY_THROTTLE_SAFE = 1, + TTY_UNTHROTTLE_SAFE = 2, +}; + +enum { + ERASE = 0, + WERASE = 1, + KILL = 2, +}; + +struct n_tty_data { + size_t read_head; + size_t commit_head; + size_t canon_head; + size_t echo_head; + size_t echo_commit; + size_t echo_mark; + unsigned long char_map[4]; + unsigned long overrun_time; + unsigned int num_overrun; + bool no_room; + unsigned char lnext : 1; + unsigned char erasing : 1; + unsigned char raw : 1; + unsigned char real_raw : 1; + unsigned char icanon : 1; + unsigned char push : 1; + u8 read_buf[4096]; + unsigned long read_flags[64]; + u8 echo_buf[4096]; + size_t read_tail; + size_t line_start; + size_t lookahead_count; + unsigned int column; + unsigned int canon_column; + size_t echo_tail; + struct mutex atomic_read_lock; + struct mutex output_lock; +}; + +struct termios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; +}; + +struct termios2 { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct termio { + unsigned short c_iflag; + unsigned short c_oflag; + unsigned short c_cflag; + unsigned short c_lflag; + unsigned char c_line; + unsigned char c_cc[8]; +}; + +struct ldsem_waiter { + struct list_head list; + struct task_struct *task; +}; + +struct tty_audit_buf { + struct mutex mutex; + dev_t dev; + bool icanon; + size_t valid; + u8 *data; +}; + +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + unsigned long key_down[12]; + unsigned int alt; + unsigned int alt_use; + unsigned int shift; + unsigned int shift_use; + bool active; + bool need_reinject; + bool reinjecting; + bool reset_canceled; + bool reset_requested; + unsigned long reset_keybit[12]; + int reset_seq_len; + int reset_seq_cnt; + int reset_seq_version; + struct timer_list keyreset_timer; +}; + +struct vt_event { + unsigned int event; + unsigned int oldev; + unsigned int newev; + unsigned int pad[4]; +}; + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; +}; + +struct compat_console_font_op { + compat_uint_t op; + compat_uint_t flags; + compat_uint_t width; + compat_uint_t height; + compat_uint_t charcount; + compat_caddr_t data; +}; + +struct kbd_repeat { + int delay; + int period; +}; + +struct console_font_op { + unsigned int op; + unsigned int flags; + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char __attribute__((btf_type_tag("user"))) * data; +}; + +struct unipair; + +struct unimapdesc { + unsigned short entry_ct; + struct unipair __attribute__((btf_type_tag("user"))) * entries; +}; + +struct unipair { + unsigned short unicode; + unsigned short fontpos; +}; + +struct kbkeycode { + unsigned int scancode; + unsigned int keycode; +}; + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + unsigned short kb_value; +}; + +struct compat_unimapdesc { + unsigned short entry_ct; + compat_caddr_t entries; +}; + +struct vt_stat { + unsigned short v_active; + unsigned short v_signal; + unsigned short v_state; +}; + +struct vt_sizes { + unsigned short v_rows; + unsigned short v_cols; + unsigned short v_scrollsize; +}; + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +struct vt_consize { + unsigned short v_rows; + unsigned short v_cols; + unsigned short v_vlin; + unsigned short v_clin; + unsigned short v_vcol; + unsigned short v_ccol; +}; + +struct vcs_poll_data { + struct notifier_block notifier; + unsigned int cons_num; + int event; + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +struct vt_notifier_param { + struct vc_data *vc; + unsigned int c; +}; + +struct vc_selection { + struct mutex lock; + struct vc_data *cons; + char *buffer; + unsigned int buf_len; + volatile int start; + int end; +}; + +struct tiocl_selection { + unsigned short xs; + unsigned short ys; + unsigned short xe; + unsigned short ye; + unsigned short sel_mode; +}; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; + +struct kbd_struct { + unsigned char lockstate; + unsigned char slockstate; + unsigned char ledmode : 1; + unsigned char ledflagstate : 4; + char : 3; + unsigned char default_ledflagstate : 4; + unsigned char kbdmode : 3; + int : 1; + unsigned char modeflags : 5; +}; + +typedef void k_handler_fn(struct vc_data *, unsigned char, char); + +typedef void fn_handler_fn(struct vc_data *); + +struct kbd_led_trigger { + struct led_trigger trigger; + unsigned int mask; +}; + +struct getset_keycode_data { + struct input_keymap_entry ke; + int error; +}; + +struct keyboard_notifier_param { + struct vc_data *vc; + int down; + int shift; + int ledstate; + unsigned int value; +}; + +struct kbdiacr { + unsigned char diacr; + unsigned char base; + unsigned char result; +}; + +struct kbdiacrs { + unsigned int kb_cnt; + struct kbdiacr kbdiacr[256]; +}; + +struct kbdiacruc { + unsigned int diacr; + unsigned int base; + unsigned int result; +}; + +struct kbdiacrsuc { + unsigned int kb_cnt; + struct kbdiacruc kbdiacruc[256]; +}; + +enum translation_map { + LAT1_MAP = 0, + GRAF_MAP = 1, + IBMPC_MAP = 2, + USER_MAP = 3, + FIRST_MAP = 0, + LAST_MAP = 3, +}; + +struct uni_pagedict { + u16 **uni_pgdir[32]; + unsigned long refcount; + unsigned long sum; + unsigned char *inverse_translations[4]; + u16 *inverse_trans_unicode; +}; + +struct con_driver { + const struct consw *con; + const char *desc; + struct device *dev; + int node; + int first; + int last; + int flag; +}; + +struct interval { + uint32_t first; + uint32_t last; +}; + +enum { + blank_off = 0, + blank_normal_wait = 1, + blank_vesa_wait = 2, +}; + +enum { + ESnormal = 0, + ESesc = 1, + ESsquare = 2, + ESgetpars = 3, + ESfunckey = 4, + EShash = 5, + ESsetG0 = 6, + ESsetG1 = 7, + ESpercent = 8, + EScsiignore = 9, + ESnonstd = 10, + ESpalette = 11, + ESosc = 12, + ESapc = 13, + ESpm = 14, + ESdcs = 15, +}; + +enum { + EPecma = 0, + EPdec = 1, + EPeq = 2, + EPgt = 3, + EPlt = 4, +}; + +struct vc_draw_region { + unsigned long from; + unsigned long to; + int x; +}; + +struct rgb { + u8 r; + u8 g; + u8 b; +}; + +struct hvc_struct; + +struct hv_ops { + int (*get_chars)(uint32_t, char *, int); + int (*put_chars)(uint32_t, const char *, int); + int (*flush)(uint32_t, bool); + int (*notifier_add)(struct hvc_struct *, int); + void (*notifier_del)(struct hvc_struct *, int); + void (*notifier_hangup)(struct hvc_struct *, int); + int (*tiocmget)(struct hvc_struct *); + int (*tiocmset)(struct hvc_struct *, unsigned int, unsigned int); + void (*dtr_rts)(struct hvc_struct *, bool); +}; + +struct hvc_struct { + struct tty_port port; + spinlock_t lock; + int index; + int do_wakeup; + char *outbuf; + int outbuf_size; + int n_outbuf; + uint32_t vtermno; + const struct hv_ops *ops; + int irq_requested; + int data; + struct winsize ws; + struct work_struct tty_resize; + struct list_head next; + unsigned long flags; +}; + +enum uart_pm_state { + UART_PM_STATE_ON = 0, + UART_PM_STATE_OFF = 3, + UART_PM_STATE_UNDEFINED = 4, +}; + +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = 1, + GPIOD_OUT_LOW = 3, + GPIOD_OUT_HIGH = 7, + GPIOD_OUT_LOW_OPEN_DRAIN = 11, + GPIOD_OUT_HIGH_OPEN_DRAIN = 15, +}; + +typedef u64 upf_t; + +typedef unsigned int upstat_t; + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +struct uart_port; + +struct uart_state { + struct tty_port port; + enum uart_pm_state pm_state; + struct circ_buf xmit; + atomic_t refcount; + wait_queue_head_t remove_wait; + struct uart_port *uart_port; +}; + +struct uart_icount { + __u32 cts; + __u32 dsr; + __u32 rng; + __u32 dcd; + __u32 rx; + __u32 tx; + __u32 frame; + __u32 overrun; + __u32 parity; + __u32 brk; + __u32 buf_overrun; +}; + +struct serial_rs485 { + __u32 flags; + __u32 delay_rts_before_send; + __u32 delay_rts_after_send; + union { + __u32 padding[5]; + struct { + __u8 addr_recv; + __u8 addr_dest; + __u8 padding0[2]; + __u32 padding1[4]; + }; + }; +}; + +struct gpio_desc; + +struct serial_iso7816 { + __u32 flags; + __u32 tg; + __u32 sc_fi; + __u32 sc_di; + __u32 clk; + __u32 reserved[5]; +}; + +struct uart_ops; + +struct serial_port_device; + +struct uart_port { + spinlock_t lock; + unsigned long iobase; + unsigned char *membase; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); + void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); + int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); + int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *); + unsigned int ctrl_id; + unsigned int port_id; + unsigned int irq; + unsigned long irqflags; + unsigned int uartclk; + unsigned int fifosize; + unsigned char x_char; + unsigned char regshift; + unsigned char iotype; + unsigned char quirks; + unsigned int read_status_mask; + unsigned int ignore_status_mask; + struct uart_state *state; + struct uart_icount icount; + struct console *cons; + upf_t flags; + upstat_t status; + bool hw_stopped; + unsigned int mctrl; + unsigned int frame_time; + unsigned int type; + const struct uart_ops *ops; + unsigned int custom_divisor; + unsigned int line; + unsigned int minor; + resource_size_t mapbase; + resource_size_t mapsize; + struct device *dev; + struct serial_port_device *port_dev; + unsigned long sysrq; + u8 sysrq_ch; + unsigned char has_sysrq; + unsigned char sysrq_seq; + unsigned char hub6; + unsigned char suspended; + unsigned char console_reinit; + const char *name; + struct attribute_group *attr_group; + const struct attribute_group **tty_groups; + struct serial_rs485 rs485; + struct serial_rs485 rs485_supported; + struct gpio_desc *rs485_term_gpio; + struct gpio_desc *rs485_rx_during_tx_gpio; + struct serial_iso7816 iso7816; + void *private_data; +}; + +struct uart_ops { + unsigned int (*tx_empty)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_mctrl)(struct uart_port *); + void (*stop_tx)(struct uart_port *); + void (*start_tx)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + void (*send_xchar)(struct uart_port *, char); + void (*stop_rx)(struct uart_port *); + void (*start_rx)(struct uart_port *); + void (*enable_ms)(struct uart_port *); + void (*break_ctl)(struct uart_port *, int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + const char *(*type)(struct uart_port *); + void (*release_port)(struct uart_port *); + int (*request_port)(struct uart_port *); + void (*config_port)(struct uart_port *, int); + int (*verify_port)(struct uart_port *, struct serial_struct *); + int (*ioctl)(struct uart_port *, unsigned int, unsigned long); +}; + +struct serial_port_device { + struct device dev; + struct uart_port *port; +}; + +struct serial_ctrl_device { + struct device dev; + struct ida port_ida; +}; + +struct uart_driver { + struct module *owner; + const char *driver_name; + const char *dev_name; + int major; + int minor; + int nr; + struct console *cons; + struct uart_state *state; + struct tty_driver *tty_driver; +}; + +struct uart_match { + struct uart_port *port; + struct uart_driver *driver; +}; + +struct earlycon_device { + struct console *con; + struct uart_port port; + char options[32]; + unsigned int baud; +}; + +struct earlycon_id { + char name[15]; + char name_term; + char compatible[128]; + int (*setup)(struct earlycon_device *, const char *); +}; + +struct mctrl_gpios; + +struct uart_8250_dma; + +struct uart_8250_ops; + +struct uart_8250_em485; + +struct uart_8250_port { + struct uart_port port; + struct timer_list timer; + struct list_head list; + u32 capabilities; + u16 bugs; + unsigned int tx_loadsz; + unsigned char acr; + unsigned char fcr; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char cur_iotype; + unsigned int rpm_tx_active; + unsigned char canary; + unsigned char probe; + struct mctrl_gpios *gpios; + u16 lsr_saved_flags; + u16 lsr_save_mask; + unsigned char msr_saved_flags; + struct uart_8250_dma *dma; + const struct uart_8250_ops *ops; + u32 (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, u32); + struct uart_8250_em485 *em485; + void (*rs485_start_tx)(struct uart_8250_port *); + void (*rs485_stop_tx)(struct uart_8250_port *); + struct delayed_work overrun_backoff; + u32 overrun_backoff_time_ms; +}; + +struct uart_8250_dma { + int (*tx_dma)(struct uart_8250_port *); + int (*rx_dma)(struct uart_8250_port *); + void (*prepare_tx_dma)(struct uart_8250_port *); + void (*prepare_rx_dma)(struct uart_8250_port *); + dma_filter_fn fn; + void *rx_param; + void *tx_param; + struct dma_slave_config rxconf; + struct dma_slave_config txconf; + struct dma_chan___2 *rxchan; + struct dma_chan___2 *txchan; + phys_addr_t rx_dma_addr; + phys_addr_t tx_dma_addr; + dma_addr_t rx_addr; + dma_addr_t tx_addr; + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; + void *rx_buf; + size_t rx_size; + size_t tx_size; + unsigned char tx_running; + unsigned char tx_err; + unsigned char rx_running; +}; + +struct uart_8250_ops { + int (*setup_irq)(struct uart_8250_port *); + void (*release_irq)(struct uart_8250_port *); + void (*setup_timer)(struct uart_8250_port *); +}; + +struct uart_8250_em485 { + struct hrtimer start_tx_timer; + struct hrtimer stop_tx_timer; + struct hrtimer *active_timer; + struct uart_8250_port *port; + unsigned int tx_stopped : 1; +}; + +struct old_serial_port { + unsigned int uart; + unsigned int baud_base; + unsigned int port; + unsigned int irq; + upf_t flags; + unsigned char io_type; + unsigned char *iomem_base; + unsigned short iomem_reg_shift; +}; + +enum { + PLAT8250_DEV_LEGACY = -1, + PLAT8250_DEV_PLATFORM = 0, + PLAT8250_DEV_PLATFORM1 = 1, + PLAT8250_DEV_PLATFORM2 = 2, + PLAT8250_DEV_FOURPORT = 3, + PLAT8250_DEV_ACCENT = 4, + PLAT8250_DEV_BOCA = 5, + PLAT8250_DEV_EXAR_ST16C554 = 6, + PLAT8250_DEV_HUB6 = 7, + PLAT8250_DEV_AU1X00 = 8, + PLAT8250_DEV_SM501 = 9, +}; + +struct irq_info { + struct hlist_node node; + int irq; + spinlock_t lock; + struct list_head *head; +}; + +struct plat_serial8250_port { + unsigned long iobase; + void *membase; + resource_size_t mapbase; + resource_size_t mapsize; + unsigned int uartclk; + unsigned int irq; + unsigned long irqflags; + void *private_data; + unsigned char regshift; + unsigned char iotype; + unsigned char hub6; + unsigned char has_sysrq; + unsigned int type; + upf_t flags; + u16 bugs; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + u32 (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, u32); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); +}; + +struct serial8250_config { + const char *name; + unsigned short fifo_size; + unsigned short tx_loadsz; + unsigned char fcr; + unsigned char rxtrig_bytes[4]; + unsigned int flags; +}; + +struct dw8250_port_data { + int line; + struct uart_8250_dma dma; + u8 dlf_size; + bool hw_rs485_support; +}; + +struct reset_control; + +struct dw8250_platform_data; + +struct dw8250_data { + struct dw8250_port_data data; + const struct dw8250_platform_data *pdata; + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct notifier_block clk_notifier; + struct work_struct clk_work; + struct reset_control *rst; + unsigned int skip_autocfg : 1; + unsigned int uart_16550_compatible : 1; +}; + +struct dw8250_platform_data { + u8 usr_reg; + u32 cpr_val; + unsigned int quirks; +}; + +struct exar8250; + +struct exar8250_board { + unsigned int num_ports; + unsigned int reg_shift; + int (*setup)(struct exar8250 *, struct pci_dev *, struct uart_8250_port *, int); + void (*exit)(struct pci_dev *); +}; + +struct exar8250 { + unsigned int nr; + struct exar8250_board *board; + void *virt; + int line[0]; +}; + +struct exar8250_platform { + int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); + const struct serial_rs485 *rs485_supported; + int (*register_gpio)(struct pci_dev *, struct uart_8250_port *); + void (*unregister_gpio)(struct uart_8250_port *); +}; + +struct lpss8250; + +struct lpss8250_board { + unsigned long freq; + unsigned int base_baud; + int (*setup)(struct lpss8250 *, struct uart_port *); + void (*exit)(struct lpss8250 *); +}; + +struct lpss8250 { + struct dw8250_port_data data; + struct lpss8250_board *board; + struct dw_dma_chip dma_chip; + struct dw_dma_slave dma_param; + u8 dma_maxburst; +}; + +struct mid8250; + +struct mid8250_board { + unsigned long freq; + unsigned int base_baud; + unsigned int bar; + int (*setup)(struct mid8250 *, struct uart_port *); + void (*exit)(struct mid8250 *); +}; + +struct mid8250 { + int line; + int dma_index; + struct pci_dev *dma_dev; + struct uart_8250_dma dma; + struct mid8250_board *board; + struct hsu_dma_chip dma_chip; +}; + +struct hsu_dma_slave { + struct device *dma_dev; + int chan_id; +}; + +struct serial_private; + +struct pciserial_board; + +struct pci_serial_quirk { + u32 vendor; + u32 device; + u32 subvendor; + u32 subdevice; + int (*probe)(struct pci_dev *); + int (*init)(struct pci_dev *); + int (*setup)(struct serial_private *, const struct pciserial_board *, + struct uart_8250_port *, int); + void (*exit)(struct pci_dev *); +}; + +struct serial_private { + struct pci_dev *dev; + unsigned int nr; + struct pci_serial_quirk *quirk; + const struct pciserial_board *board; + int line[0]; +}; + +struct pciserial_board { + unsigned int flags; + unsigned int num_ports; + unsigned int base_baud; + unsigned int uart_offset; + unsigned int reg_shift; + unsigned int first_offset; +}; + +struct timedia_struct { + int num; + const unsigned short *ids; +}; + +enum { + MOXA_SUPP_RS232 = 1, + MOXA_SUPP_RS422 = 2, + MOXA_SUPP_RS485 = 4, +}; + +enum pci_board_num_t { + pbn_default = 0, + pbn_b0_1_115200 = 1, + pbn_b0_2_115200 = 2, + pbn_b0_4_115200 = 3, + pbn_b0_5_115200 = 4, + pbn_b0_8_115200 = 5, + pbn_b0_1_921600 = 6, + pbn_b0_2_921600 = 7, + pbn_b0_4_921600 = 8, + pbn_b0_2_1130000 = 9, + pbn_b0_4_1152000 = 10, + pbn_b0_4_1250000 = 11, + pbn_b0_2_1843200 = 12, + pbn_b0_4_1843200 = 13, + pbn_b0_1_15625000 = 14, + pbn_b0_bt_1_115200 = 15, + pbn_b0_bt_2_115200 = 16, + pbn_b0_bt_4_115200 = 17, + pbn_b0_bt_8_115200 = 18, + pbn_b0_bt_1_460800 = 19, + pbn_b0_bt_2_460800 = 20, + pbn_b0_bt_4_460800 = 21, + pbn_b0_bt_1_921600 = 22, + pbn_b0_bt_2_921600 = 23, + pbn_b0_bt_4_921600 = 24, + pbn_b0_bt_8_921600 = 25, + pbn_b1_1_115200 = 26, + pbn_b1_2_115200 = 27, + pbn_b1_4_115200 = 28, + pbn_b1_8_115200 = 29, + pbn_b1_16_115200 = 30, + pbn_b1_1_921600 = 31, + pbn_b1_2_921600 = 32, + pbn_b1_4_921600 = 33, + pbn_b1_8_921600 = 34, + pbn_b1_2_1250000 = 35, + pbn_b1_bt_1_115200 = 36, + pbn_b1_bt_2_115200 = 37, + pbn_b1_bt_4_115200 = 38, + pbn_b1_bt_2_921600 = 39, + pbn_b1_1_1382400 = 40, + pbn_b1_2_1382400 = 41, + pbn_b1_4_1382400 = 42, + pbn_b1_8_1382400 = 43, + pbn_b2_1_115200 = 44, + pbn_b2_2_115200 = 45, + pbn_b2_4_115200 = 46, + pbn_b2_8_115200 = 47, + pbn_b2_1_460800 = 48, + pbn_b2_4_460800 = 49, + pbn_b2_8_460800 = 50, + pbn_b2_16_460800 = 51, + pbn_b2_1_921600 = 52, + pbn_b2_4_921600 = 53, + pbn_b2_8_921600 = 54, + pbn_b2_8_1152000 = 55, + pbn_b2_bt_1_115200 = 56, + pbn_b2_bt_2_115200 = 57, + pbn_b2_bt_4_115200 = 58, + pbn_b2_bt_2_921600 = 59, + pbn_b2_bt_4_921600 = 60, + pbn_b3_2_115200 = 61, + pbn_b3_4_115200 = 62, + pbn_b3_8_115200 = 63, + pbn_b4_bt_2_921600 = 64, + pbn_b4_bt_4_921600 = 65, + pbn_b4_bt_8_921600 = 66, + pbn_panacom = 67, + pbn_panacom2 = 68, + pbn_panacom4 = 69, + pbn_plx_romulus = 70, + pbn_oxsemi = 71, + pbn_oxsemi_1_15625000 = 72, + pbn_oxsemi_2_15625000 = 73, + pbn_oxsemi_4_15625000 = 74, + pbn_oxsemi_8_15625000 = 75, + pbn_intel_i960 = 76, + pbn_sgi_ioc3 = 77, + pbn_computone_4 = 78, + pbn_computone_6 = 79, + pbn_computone_8 = 80, + pbn_sbsxrsio = 81, + pbn_pasemi_1682M = 82, + pbn_ni8430_2 = 83, + pbn_ni8430_4 = 84, + pbn_ni8430_8 = 85, + pbn_ni8430_16 = 86, + pbn_ADDIDATA_PCIe_1_3906250 = 87, + pbn_ADDIDATA_PCIe_2_3906250 = 88, + pbn_ADDIDATA_PCIe_4_3906250 = 89, + pbn_ADDIDATA_PCIe_8_3906250 = 90, + pbn_ce4100_1_115200 = 91, + pbn_omegapci = 92, + pbn_NETMOS9900_2s_115200 = 93, + pbn_brcm_trumanage = 94, + pbn_fintek_4 = 95, + pbn_fintek_8 = 96, + pbn_fintek_12 = 97, + pbn_fintek_F81504A = 98, + pbn_fintek_F81508A = 99, + pbn_fintek_F81512A = 100, + pbn_wch382_2 = 101, + pbn_wch384_4 = 102, + pbn_wch384_8 = 103, + pbn_sunix_pci_1s = 104, + pbn_sunix_pci_2s = 105, + pbn_sunix_pci_4s = 106, + pbn_sunix_pci_8s = 107, + pbn_sunix_pci_16s = 108, + pbn_titan_1_4000000 = 109, + pbn_titan_2_4000000 = 110, + pbn_titan_4_4000000 = 111, + pbn_titan_8_4000000 = 112, + pbn_moxa_2 = 113, + pbn_moxa_4 = 114, + pbn_moxa_8 = 115, +}; + +struct f815xxa_data { + spinlock_t lock; + int idx; +}; + +struct pericom8250 { + void *virt; + unsigned int nr; + int line[0]; +}; + +struct memdev { + const char *name; + const struct file_operations *fops; + fmode_t fmode; + umode_t mode; +}; + +struct timer_rand_state { + unsigned long last_time; + long last_delta; + long last_delta2; +}; + +enum { + CRNG_EMPTY = 0, + CRNG_EARLY = 1, + CRNG_READY = 2, +}; + +struct batch_u8 { + u8 entropy[96]; + local_lock_t lock; + unsigned long generation; + unsigned int position; +}; + +struct batch_u16 { + u16 entropy[48]; + local_lock_t lock; + unsigned long generation; + unsigned int position; +}; + +struct batch_u32 { + u32 entropy[24]; + local_lock_t lock; + unsigned long generation; + unsigned int position; +}; + +struct batch_u64 { + u64 entropy[12]; + local_lock_t lock; + unsigned long generation; + unsigned int position; +}; + +struct crng { + u8 key[32]; + unsigned long generation; + local_lock_t lock; +}; + +struct fast_pool { + unsigned long pool[4]; + unsigned long last; + unsigned int count; + struct timer_list mix; +}; + +enum { + NUM_TRIAL_SAMPLES = 8192, + MAX_SAMPLES_PER_BIT = 66, +}; + +enum { + MIX_INFLIGHT = 2147483648, +}; + +enum chacha_constants { + CHACHA_CONSTANT_EXPA = 1634760805, + CHACHA_CONSTANT_ND_3 = 857760878, + CHACHA_CONSTANT_2_BY = 2036477234, + CHACHA_CONSTANT_TE_K = 1797285236, +}; + +enum { + POOL_BITS = 256, + POOL_READY_BITS = 256, + POOL_EARLY_BITS = 128, +}; + +enum { + CRNG_RESEED_START_INTERVAL = 1000, + CRNG_RESEED_INTERVAL = 60000, +}; + +struct entropy_timer_state { + unsigned long entropy; + struct timer_list timer; + atomic_t samples; + unsigned int samples_per_bit; +}; + +struct hpets; + +struct hpet; + +struct hpet_timer; + +struct hpet_dev { + struct hpets *hd_hpets; + struct hpet *hd_hpet; + struct hpet_timer *hd_timer; + unsigned long hd_ireqfreq; + unsigned long hd_irqdata; + wait_queue_head_t hd_waitqueue; + struct fasync_struct *hd_async_queue; + unsigned int hd_flags; + unsigned int hd_irq; + unsigned int hd_hdwirq; + char hd_name[7]; +}; + +struct hpets { + struct hpets *hp_next; + struct hpet *hp_hpet; + unsigned long hp_hpet_phys; + struct clocksource *hp_clocksource; + unsigned long long hp_tick_freq; + unsigned long hp_delta; + unsigned int hp_ntimer; + unsigned int hp_which; + struct hpet_dev hp_dev[0]; +}; + +struct hpet_timer { + u64 hpet_config; + union { + u64 _hpet_hc64; + u32 _hpet_hc32; + unsigned long _hpet_compare; + } _u1; + u64 hpet_fsb[2]; +}; + +struct hpet { + u64 hpet_cap; + u64 res0; + u64 hpet_config; + u64 res1; + u64 hpet_isr; + u64 res2[25]; + union { + u64 _hpet_mc64; + u32 _hpet_mc32; + unsigned long _hpet_mc; + } _u0; + u64 res3; + struct hpet_timer hpet_timers[0]; +}; + +struct hpet_info { + unsigned long hi_ireqfreq; + unsigned long hi_flags; + unsigned short hi_hpet; + unsigned short hi_timer; +}; + +struct compat_hpet_info { + compat_ulong_t hi_ireqfreq; + compat_ulong_t hi_flags; + unsigned short hi_hpet; + unsigned short hi_timer; +}; + +struct nvram_ops { + ssize_t (*get_size)(); + unsigned char (*read_byte)(int); + void (*write_byte)(unsigned char, int); + ssize_t (*read)(char *, size_t, loff_t *); + ssize_t (*write)(char *, size_t, loff_t *); + long (*initialize)(); + long (*set_checksum)(); +}; + +struct hwrng { + const char *name; + int (*init)(struct hwrng *); + void (*cleanup)(struct hwrng *); + int (*data_present)(struct hwrng *, int); + int (*data_read)(struct hwrng *, u32 *); + int (*read)(struct hwrng *, void *, size_t, bool); + unsigned long priv; + unsigned short quality; + struct list_head list; + struct kref ref; + struct completion cleanup_done; + struct completion dying; +}; + +enum { + VIA_STRFILT_CNT_SHIFT = 16, + VIA_STRFILT_FAIL = 32768, + VIA_STRFILT_ENABLE = 16384, + VIA_RAWBITS_ENABLE = 8192, + VIA_RNG_ENABLE = 64, + VIA_NOISESRC1 = 256, + VIA_NOISESRC2 = 512, + VIA_XSTORE_CNT_MASK = 15, + VIA_RNG_CHUNK_8 = 0, + VIA_RNG_CHUNK_4 = 1, + VIA_RNG_CHUNK_4_MASK = 4294967295, + VIA_RNG_CHUNK_2 = 2, + VIA_RNG_CHUNK_2_MASK = 65535, + VIA_RNG_CHUNK_1 = 3, + VIA_RNG_CHUNK_1_MASK = 255, +}; + +struct irq_remap_ops { + int capability; + int (*prepare)(); + int (*enable)(); + void (*disable)(); + int (*reenable)(int); + int (*enable_faulting)(); +}; + +struct irq_remap_table; + +struct amd_irte_ops { + void (*prepare)(void *, u32, bool, u8, u32, int); + void (*activate)(struct amd_iommu *, void *, u16, u16); + void (*deactivate)(struct amd_iommu *, void *, u16, u16); + void (*set_affinity)(struct amd_iommu *, void *, u16, u16, u8, u32); + void *(*get)(struct irq_remap_table *, int); + void (*set_allocated)(struct irq_remap_table *, int); + bool (*is_allocated)(struct irq_remap_table *, int); + void (*clear_allocated)(struct irq_remap_table *, int); +}; + +struct amd_iommu_pci_seg; + +struct amd_iommu { + struct list_head list; + int index; + raw_spinlock_t lock; + struct pci_dev *dev; + struct pci_dev *root_pdev; + u64 mmio_phys; + u64 mmio_phys_end; + u8 *mmio_base; + u32 cap; + u8 acpi_flags; + u64 features; + u64 features2; + u16 devid; + u16 cap_ptr; + struct amd_iommu_pci_seg *pci_seg; + u64 exclusion_start; + u64 exclusion_length; + u8 *cmd_buf; + u32 cmd_buf_head; + u32 cmd_buf_tail; + u8 *evt_buf; + unsigned char evt_irq_name[16]; + u8 *ppr_log; + unsigned char ppr_irq_name[16]; + u8 *ga_log; + unsigned char ga_irq_name[16]; + u8 *ga_log_tail; + bool int_enabled; + bool need_sync; + bool irtcachedis_enabled; + struct iommu_device iommu; + u32 stored_addr_lo; + u32 stored_addr_hi; + u32 stored_l1[108]; + u32 stored_l2[131]; + u8 max_banks; + u8 max_counters; + struct irq_domain *ir_domain; + struct amd_irte_ops *irte_ops; + u32 flags; + volatile u64 *cmd_sem; + atomic64_t cmd_sem_val; +}; + +struct dev_table_entry; + +struct amd_iommu_pci_seg { + struct list_head list; + struct llist_head dev_data_list; + u16 id; + u16 last_bdf; + u32 dev_table_size; + u32 alias_table_size; + u32 rlookup_table_size; + struct dev_table_entry *dev_table; + struct amd_iommu **rlookup_table; + struct irq_remap_table **irq_lookup_table; + struct dev_table_entry *old_dev_tbl_cpy; + u16 *alias_table; + struct list_head unity_map; +}; + +struct dev_table_entry { + u64 data[4]; +}; + +struct irq_remap_table { + raw_spinlock_t lock; + unsigned int min_index; + u32 *table; +}; + +enum io_pgtable_fmt { + ARM_32_LPAE_S1 = 0, + ARM_32_LPAE_S2 = 1, + ARM_64_LPAE_S1 = 2, + ARM_64_LPAE_S2 = 3, + ARM_V7S = 4, + ARM_MALI_LPAE = 5, + AMD_IOMMU_V1 = 6, + AMD_IOMMU_V2 = 7, + APPLE_DART = 8, + APPLE_DART2 = 9, + IO_PGTABLE_NUM_FMTS = 10, +}; + +enum amd_iommu_intr_mode_type { + AMD_IOMMU_GUEST_IR_LEGACY = 0, + AMD_IOMMU_GUEST_IR_LEGACY_GA = 1, + AMD_IOMMU_GUEST_IR_VAPIC = 2, +}; + +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 2, +}; + +enum iommu_resv_type { + IOMMU_RESV_DIRECT = 0, + IOMMU_RESV_DIRECT_RELAXABLE = 1, + IOMMU_RESV_RESERVED = 2, + IOMMU_RESV_MSI = 3, + IOMMU_RESV_SW_MSI = 4, +}; + +struct irq_2_irte { + u16 devid; + u16 index; +}; + +struct amd_ir_data { + u32 cached_ga_tag; + struct amd_iommu *iommu; + struct irq_2_irte irq_2_irte; + struct msi_msg msi_entry; + void *entry; + struct irq_cfg *cfg; + int ga_vector; + u64 ga_root_ptr; + u32 ga_tag; +}; + +union irte_ga_lo { + u64 val; + struct { + u64 valid : 1; + u64 no_fault : 1; + u64 int_type : 3; + u64 rq_eoi : 1; + u64 dm : 1; + u64 guest_mode : 1; + u64 destination : 24; + u64 ga_tag : 32; + } fields_remap; + struct { + u64 valid : 1; + u64 no_fault : 1; + u64 ga_log_intr : 1; + u64 rsvd1 : 3; + u64 is_run : 1; + u64 guest_mode : 1; + u64 destination : 24; + u64 ga_tag : 32; + } fields_vapic; +}; + +union irte_ga_hi { + u64 val; + struct { + u64 vector : 8; + u64 rsvd_1 : 4; + u64 ga_root_ptr : 40; + u64 rsvd_2 : 4; + u64 destination : 8; + } fields; +}; + +struct irte_ga { + union { + struct { + union irte_ga_lo lo; + union irte_ga_hi hi; + }; + u128 irte; + }; +}; + +struct protection_domain; + +struct iommu_dev_data { + spinlock_t lock; + struct list_head list; + struct llist_node dev_data_list; + struct protection_domain *domain; + struct device *dev; + u16 devid; + u32 flags; + int ats_qdep; + u8 ats_enabled : 1; + u8 pri_enabled : 1; + u8 pasid_enabled : 1; + u8 pri_tlp : 1; + u8 ppr : 1; + bool use_vapic; + bool defer_attach; + struct ratelimit_state rs; +}; + +struct iommu_flush_ops; + +struct io_pgtable_cfg { + unsigned long quirks; + unsigned long pgsize_bitmap; + unsigned int ias; + unsigned int oas; + bool coherent_walk; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; + union { + struct { + u64 ttbr; + struct { + u32 ips : 3; + u32 tg : 2; + u32 sh : 2; + u32 orgn : 2; + u32 irgn : 2; + u32 tsz : 6; + } tcr; + u64 mair; + } arm_lpae_s1_cfg; + struct { + u64 vttbr; + struct { + u32 ps : 3; + u32 tg : 2; + u32 sh : 2; + u32 orgn : 2; + u32 irgn : 2; + u32 sl : 2; + u32 tsz : 6; + } vtcr; + } arm_lpae_s2_cfg; + struct { + u32 ttbr; + u32 tcr; + u32 nmrr; + u32 prrr; + } arm_v7s_cfg; + struct { + u64 transtab; + u64 memattr; + } arm_mali_lpae_cfg; + struct { + u64 ttbr[4]; + u32 n_ttbrs; + } apple_dart_cfg; + }; +}; + +struct io_pgtable_ops { + int (*map_pages)(struct io_pgtable_ops *, unsigned long, phys_addr_t, size_t, + size_t, int, gfp_t, size_t *); + size_t (*unmap_pages)(struct io_pgtable_ops *, unsigned long, size_t, size_t, + struct iommu_iotlb_gather *); + phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *, unsigned long); + int (*read_and_clear_dirty)(struct io_pgtable_ops *, unsigned long, size_t, + unsigned long, struct iommu_dirty_bitmap *); +}; + +struct io_pgtable { + enum io_pgtable_fmt fmt; + void *cookie; + struct io_pgtable_cfg cfg; + struct io_pgtable_ops ops; +}; + +struct amd_io_pgtable { + struct io_pgtable_cfg pgtbl_cfg; + struct io_pgtable iop; + int mode; + u64 *root; + u64 *pgd; +}; + +struct protection_domain { + struct list_head dev_list; + struct iommu_domain domain; + struct amd_io_pgtable iop; + spinlock_t lock; + u16 id; + int glx; + int nid; + u64 *gcr3_tbl; + unsigned long flags; + bool dirty_tracking; + unsigned int dev_cnt; + unsigned int dev_iommu[32]; +}; + +struct iommu_flush_ops { + void (*tlb_flush_all)(void *); + void (*tlb_flush_walk)(unsigned long, size_t, size_t, void *); + void (*tlb_add_page)(struct iommu_iotlb_gather *, unsigned long, size_t, void *); +}; + +struct acpihid_map_entry { + struct list_head list; + u8 uid[256]; + u8 hid[9]; + u32 devid; + u32 root_devid; + bool cmd_line; + struct iommu_group *group; +}; + +struct unity_map_entry { + struct list_head list; + u16 devid_start; + u16 devid_end; + u64 address_start; + u64 address_end; + int prot; +}; + +union irte { + u32 val; + struct { + u32 valid : 1; + u32 no_fault : 1; + u32 int_type : 3; + u32 rq_eoi : 1; + u32 dm : 1; + u32 rsvd_1 : 1; + u32 destination : 8; + u32 vector : 8; + u32 rsvd_2 : 8; + } fields; +}; + +struct devid_map { + struct list_head list; + u8 id; + u32 devid; + bool cmd_line; +}; + +struct iommu_cmd { + u32 data[4]; +}; + +struct iommu_resv_region { + struct list_head list; + phys_addr_t start; + size_t length; + int prot; + enum iommu_resv_type type; + void (*free)(struct device *, struct iommu_resv_region *); +}; + +struct vcpu_data { + u64 pi_desc_addr; + u32 vector; +}; + +struct amd_iommu_pi_data { + u32 ga_tag; + u32 prev_ga_tag; + u64 base; + bool is_guest_mode; + struct vcpu_data *vcpu_data; + void *ir_data; +}; + +struct iommu_group { + struct kobject kobj; + struct kobject *devices_kobj; + struct list_head devices; + struct xarray pasid_array; + struct mutex mutex; + void *iommu_data; + void (*iommu_data_release)(void *); + char *name; + int id; + struct iommu_domain *default_domain; + struct iommu_domain *blocking_domain; + struct iommu_domain *domain; + struct list_head entry; + unsigned int owner_cnt; + void *owner; +}; + +enum iommu_init_state { + IOMMU_START_STATE = 0, + IOMMU_IVRS_DETECTED = 1, + IOMMU_ACPI_FINISHED = 2, + IOMMU_ENABLED = 3, + IOMMU_PCI_INIT = 4, + IOMMU_INTERRUPTS_EN = 5, + IOMMU_INITIALIZED = 6, + IOMMU_NOT_FOUND = 7, + IOMMU_INIT_ERROR = 8, + IOMMU_CMDLINE_DISABLED = 9, +}; + +enum irq_remap_cap { + IRQ_POSTING_CAP = 0, +}; + +struct ivhd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 cap_ptr; + u64 mmio_phys; + u16 pci_seg; + u16 info; + u32 efr_attr; + u64 efr_reg; + u64 efr_reg2; +}; + +struct ivhd_entry { + u8 type; + u16 devid; + u8 flags; + union { + struct { + u32 ext; + u32 hidh; + }; + struct { + u32 ext; + u32 hidh; + } ext_hid; + }; + u64 cid; + u8 uidf; + u8 uidl; + u8 uid; +} __attribute__((packed)); + +struct ivmd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 aux; + u16 pci_seg; + u8 resv[6]; + u64 range_start; + u64 range_length; +}; + +union intcapxt { + u64 capxt; + struct { + u64 reserved_0 : 2; + u64 dest_mode_logical : 1; + u64 reserved_1 : 5; + u64 destid_0_23 : 24; + u64 vector : 8; + u64 reserved_2 : 16; + u64 destid_24_31 : 8; + }; +}; + +struct ivrs_quirk_entry { + u8 id; + u32 devid; +}; + +struct io_pgtable_init_fns { + struct io_pgtable *(*alloc)(struct io_pgtable_cfg *, void *); + void (*free)(struct io_pgtable *); +}; + +enum acpi_dmar_scope_type { + ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, + ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, + ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, + ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, + ACPI_DMAR_SCOPE_TYPE_HPET = 4, + ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5, + ACPI_DMAR_SCOPE_TYPE_RESERVED = 6, +}; + +enum bus_notifier_event { + BUS_NOTIFY_ADD_DEVICE = 0, + BUS_NOTIFY_DEL_DEVICE = 1, + BUS_NOTIFY_REMOVED_DEVICE = 2, + BUS_NOTIFY_BIND_DRIVER = 3, + BUS_NOTIFY_BOUND_DRIVER = 4, + BUS_NOTIFY_UNBIND_DRIVER = 5, + BUS_NOTIFY_UNBOUND_DRIVER = 6, + BUS_NOTIFY_DRIVER_NOT_BOUND = 7, +}; + +enum latency_type { + DMAR_LATENCY_INV_IOTLB = 0, + DMAR_LATENCY_INV_DEVTLB = 1, + DMAR_LATENCY_INV_IEC = 2, + DMAR_LATENCY_PRQ = 3, + DMAR_LATENCY_NUM = 4, +}; + +enum { + QI_FREE = 0, + QI_IN_USE = 1, + QI_DONE = 2, + QI_ABORT = 3, +}; + +enum acpi_dmar_type { + ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, + ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, + ACPI_DMAR_TYPE_ROOT_ATS = 2, + ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, + ACPI_DMAR_TYPE_NAMESPACE = 4, + ACPI_DMAR_TYPE_SATC = 5, + ACPI_DMAR_TYPE_RESERVED = 6, +}; + +enum faulttype { + DMA_REMAP = 0, + INTR_REMAP = 1, + UNKNOWN = 2, +}; + +struct acpi_dmar_pci_path { + u8 device; + u8 function; +}; + +struct acpi_dmar_header; + +struct dmar_dev_scope; + +struct intel_iommu; + +struct dmar_drhd_unit { + struct list_head list; + struct acpi_dmar_header *hdr; + u64 reg_base_addr; + unsigned long reg_size; + struct dmar_dev_scope *devices; + int devices_cnt; + u16 segment; + u8 ignored : 1; + u8 include_all : 1; + u8 gfx_dedicated : 1; + struct intel_iommu *iommu; +}; + +struct acpi_dmar_header { + u16 type; + u16 length; +}; + +struct dmar_dev_scope { + struct device __attribute__((btf_type_tag("rcu"))) * dev; + u8 bus; + u8 devfn; +}; + +struct iopf_queue; + +struct q_inval; + +struct ir_table; + +struct iommu_pmu; + +struct intel_iommu { + void *reg; + u64 reg_phys; + u64 reg_size; + u64 cap; + u64 ecap; + u64 vccap; + u64 ecmdcap[4]; + u32 gcmd; + raw_spinlock_t register_lock; + int seq_id; + int agaw; + int msagaw; + unsigned int irq; + unsigned int pr_irq; + unsigned int perf_irq; + u16 segment; + unsigned char name[13]; + struct iopf_queue *iopf_queue; + unsigned char iopfq_name[16]; + struct q_inval *qi; + u32 iommu_state[4]; + struct ir_table *ir_table; + struct irq_domain *ir_domain; + struct iommu_device iommu; + int node; + u32 flags; + struct dmar_drhd_unit *drhd; + void *perf_statistic; + struct iommu_pmu *pmu; +}; + +struct q_inval { + raw_spinlock_t q_lock; + void *desc; + int *desc_status; + int free_head; + int free_tail; + int free_cnt; +}; + +struct irte___2; + +struct ir_table { + struct irte___2 *base; + unsigned long *bitmap; +}; + +struct irte___2 { + union { + struct { + union { + struct { + __u64 present : 1; + __u64 fpd : 1; + __u64 __res0 : 6; + __u64 avail : 4; + __u64 __res1 : 3; + __u64 pst : 1; + __u64 vector : 8; + __u64 __res2 : 40; + }; + struct { + __u64 r_present : 1; + __u64 r_fpd : 1; + __u64 dst_mode : 1; + __u64 redir_hint : 1; + __u64 trigger_mode : 1; + __u64 dlvry_mode : 3; + __u64 r_avail : 4; + __u64 r_res0 : 4; + __u64 r_vector : 8; + __u64 r_res1 : 8; + __u64 dest_id : 32; + }; + struct { + __u64 p_present : 1; + __u64 p_fpd : 1; + __u64 p_res0 : 6; + __u64 p_avail : 4; + __u64 p_res1 : 2; + __u64 p_urgent : 1; + __u64 p_pst : 1; + __u64 p_vector : 8; + __u64 p_res2 : 14; + __u64 pda_l : 26; + }; + __u64 low; + }; + union { + struct { + __u64 sid : 16; + __u64 sq : 2; + __u64 svt : 2; + __u64 __res3 : 44; + }; + struct { + __u64 p_sid : 16; + __u64 p_sq : 2; + __u64 p_svt : 2; + __u64 p_res3 : 12; + __u64 pda_h : 32; + }; + __u64 high; + }; + }; + __u128 irte; + }; +}; + +struct iommu_pmu { + struct intel_iommu *iommu; + u32 num_cntr; + u32 num_eg; + u32 cntr_width; + u32 cntr_stride; + u32 filter; + void *base; + void *cfg_reg; + void *cntr_reg; + void *overflow; + u64 *evcap; + u32 **cntr_evcap; + struct pmu pmu; + unsigned long used_mask[1]; + struct perf_event *event_list[64]; + unsigned char irq_name[16]; + struct hlist_node cpuhp_node; + int cpu; +}; + +struct acpi_dmar_hardware_unit { + struct acpi_dmar_header header; + u8 flags; + u8 size; + u16 segment; + u64 address; +}; + +struct acpi_table_dmar { + struct acpi_table_header header; + u8 width; + u8 flags; + u8 reserved[10]; +}; + +struct dmar_pci_path { + u8 bus; + u8 device; + u8 function; +}; + +struct dmar_pci_notify_info { + struct pci_dev *dev; + unsigned long event; + int bus; + u16 seg; + u16 level; + struct dmar_pci_path path[0]; +}; + +struct acpi_dmar_rhsa { + struct acpi_dmar_header header; + u32 reserved; + u64 base_address; + u32 proximity_domain; +} __attribute__((packed)); + +struct acpi_dmar_reserved_memory { + struct acpi_dmar_header header; + u16 reserved; + u16 segment; + u64 base_address; + u64 end_address; +}; + +struct acpi_dmar_atsr { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +struct acpi_dmar_satc { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); + +struct dmar_res_callback { + dmar_res_handler_t cb[6]; + void *arg[6]; + bool ignore_unhandled; + bool print_entry; +}; + +struct qi_desc { + u64 qw0; + u64 qw1; + u64 qw2; + u64 qw3; +}; + +struct acpi_dmar_device_scope { + u8 entry_type; + u8 length; + u16 reserved; + u8 enumeration_id; + u8 bus; +}; + +struct acpi_dmar_andd { + struct acpi_dmar_header header; + u8 reserved[3]; + u8 device_number; + union { + char __pad; + struct { + struct { + } __Empty_device_name; + char device_name[0]; + }; + }; +} __attribute__((packed)); + +typedef void (*btf_trace_qi_submit)(void *, struct intel_iommu *, u64, u64, u64, u64); + +typedef void (*btf_trace_prq_report)(void *, struct intel_iommu *, struct device *, u64, + u64, u64, u64, unsigned long); + +struct trace_event_raw_qi_submit { + struct trace_entry ent; + u64 qw0; + u64 qw1; + u64 qw2; + u64 qw3; + u32 __data_loc_iommu; + char __data[0]; +}; + +struct trace_event_raw_prq_report { + struct trace_entry ent; + u64 dw0; + u64 dw1; + u64 dw2; + u64 dw3; + unsigned long seq; + u32 __data_loc_iommu; + u32 __data_loc_dev; + u32 __data_loc_buff; + char __data[0]; +}; + +struct trace_event_data_offsets_qi_submit { + u32 iommu; +}; + +struct trace_event_data_offsets_prq_report { + u32 iommu; + u32 dev; + u32 buff; +}; + +enum cap_audit_type { + CAP_AUDIT_STATIC_DMAR = 0, + CAP_AUDIT_STATIC_IRQR = 1, + CAP_AUDIT_HOTPLUG_DMAR = 2, + CAP_AUDIT_HOTPLUG_IRQR = 3, +}; + +struct ioapic_scope { + struct intel_iommu *iommu; + unsigned int id; + unsigned int bus; + unsigned int devfn; +}; + +struct hpet_scope { + struct intel_iommu *iommu; + u8 id; + unsigned int bus; + unsigned int devfn; +}; + +enum irq_mode { + IRQ_REMAPPING = 0, + IRQ_POSTING = 1, +}; + +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; + enum irq_mode mode; +}; + +struct set_msi_sid_data { + struct pci_dev *pdev; + u16 alias; + int count; + int busmatch_count; +}; + +struct intel_ir_data { + struct irq_2_iommu irq_2_iommu; + long : 64; + struct irte___2 irte_entry; + union { + struct msi_msg msi_entry; + }; +}; + +struct iova_bitmap_map { + unsigned long iova; + unsigned long pgshift; + unsigned long pgoff; + unsigned long npages; + struct page **pages; +}; + +struct iova_bitmap { + struct iova_bitmap_map mapped; + u64 __attribute__((btf_type_tag("user"))) * bitmap; + unsigned long mapped_base_index; + unsigned long mapped_total_index; + unsigned long iova; + size_t length; +}; + +typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *, unsigned long, size_t, void *); + +struct iommu_group_attribute { + struct attribute attr; + ssize_t (*show)(struct iommu_group *, char *); + ssize_t (*store)(struct iommu_group *, const char *, size_t); +}; + +enum iommu_fault_type { + IOMMU_FAULT_DMA_UNRECOV = 1, + IOMMU_FAULT_PAGE_REQ = 2, +}; + +enum fsl_mc_pool_type { + FSL_MC_POOL_DPMCP = 0, + FSL_MC_POOL_DPBP = 1, + FSL_MC_POOL_DPCON = 2, + FSL_MC_POOL_IRQ = 3, + FSL_MC_NUM_POOL_TYPES = 4, +}; + +enum { + IOMMU_SET_DOMAIN_MUST_SUCCEED = 1, +}; + +struct group_device { + struct list_head list; + struct device *dev; + char *name; +}; + +struct fsl_mc_obj_desc { + char type[16]; + int id; + u16 vendor; + u16 ver_major; + u16 ver_minor; + u8 irq_count; + u8 region_count; + u32 state; + char label[16]; + u16 flags; +}; + +struct fsl_mc_io; + +struct fsl_mc_device_irq; + +struct fsl_mc_resource; + +struct fsl_mc_device { + struct device dev; + u64 dma_mask; + u16 flags; + u32 icid; + u16 mc_handle; + struct fsl_mc_io *mc_io; + struct fsl_mc_obj_desc obj_desc; + struct resource *regions; + struct fsl_mc_device_irq **irqs; + struct fsl_mc_resource *resource; + struct device_link *consumer_link; + const char *driver_override; +}; + +struct fsl_mc_io { + struct device *dev; + u16 flags; + u32 portal_size; + phys_addr_t portal_phys_addr; + void *portal_virt_addr; + struct fsl_mc_device *dpmcp_dev; + union { + struct mutex mutex; + raw_spinlock_t spinlock; + }; +}; + +struct fsl_mc_resource_pool; + +struct fsl_mc_resource { + enum fsl_mc_pool_type type; + s32 id; + void *data; + struct fsl_mc_resource_pool *parent_pool; + struct list_head node; +}; + +struct fsl_mc_device_irq { + unsigned int virq; + struct fsl_mc_device *mc_dev; + u8 dev_irq_index; + struct fsl_mc_resource resource; +}; + +struct group_for_pci_data { + struct pci_dev *pdev; + struct iommu_group *group; +}; + +typedef void (*btf_trace_add_device_to_group)(void *, int, struct device *); + +typedef void (*btf_trace_remove_device_from_group)(void *, int, struct device *); + +typedef void (*btf_trace_attach_device_to_domain)(void *, struct device *); + +typedef void (*btf_trace_map)(void *, unsigned long, phys_addr_t, size_t); + +typedef void (*btf_trace_unmap)(void *, unsigned long, size_t, size_t); + +typedef void (*btf_trace_io_page_fault)(void *, struct device *, unsigned long, int); + +struct trace_event_raw_iommu_group_event { + struct trace_entry ent; + int gid; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_iommu_device_event { + struct trace_entry ent; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_map { + struct trace_entry ent; + u64 iova; + u64 paddr; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_unmap { + struct trace_entry ent; + u64 iova; + size_t size; + size_t unmapped_size; + char __data[0]; +}; + +struct trace_event_raw_iommu_error { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u64 iova; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_iommu_group_event { + u32 device; +}; + +struct trace_event_data_offsets_iommu_device_event { + u32 device; +}; + +struct trace_event_data_offsets_map {}; + +struct trace_event_data_offsets_unmap {}; + +struct trace_event_data_offsets_iommu_error { + u32 device; + u32 driver; +}; + +struct iova { + struct rb_node node; + unsigned long pfn_hi; + unsigned long pfn_lo; +}; + +struct iova_rcache; + +struct iova_domain { + spinlock_t iova_rbtree_lock; + struct rb_root rbroot; + struct rb_node *cached_node; + struct rb_node *cached32_node; + unsigned long granule; + unsigned long start_pfn; + unsigned long dma_32bit_pfn; + unsigned long max32_alloc_size; + struct iova anchor; + struct iova_rcache *rcaches; + struct hlist_node cpuhp_dead; +}; + +enum iommu_dma_cookie_type { + IOMMU_DMA_IOVA_COOKIE = 0, + IOMMU_DMA_MSI_COOKIE = 1, +}; + +enum iommu_dma_queue_type { + IOMMU_DMA_OPTS_PER_CPU_QUEUE = 0, + IOMMU_DMA_OPTS_SINGLE_QUEUE = 1, +}; + +struct iommu_dma_options { + enum iommu_dma_queue_type qt; + size_t fq_size; + unsigned int fq_timeout; +}; + +struct iova_fq; + +struct iommu_dma_cookie { + enum iommu_dma_cookie_type type; + union { + struct { + struct iova_domain iovad; + union { + struct iova_fq *single_fq; + struct iova_fq __attribute__((btf_type_tag("percpu"))) * + percpu_fq; + }; + atomic64_t fq_flush_start_cnt; + atomic64_t fq_flush_finish_cnt; + struct timer_list fq_timer; + atomic_t fq_timer_on; + }; + dma_addr_t msi_iova; + }; + struct list_head msi_page_list; + struct iommu_domain *fq_domain; + struct iommu_dma_options options; + struct mutex mutex; +}; + +struct iova_fq_entry { + unsigned long iova_pfn; + unsigned long pages; + struct list_head freelist; + u64 counter; +}; + +struct iova_fq { + spinlock_t lock; + unsigned int head; + unsigned int tail; + unsigned int mod_mask; + struct iova_fq_entry entries[0]; +}; + +struct iommu_dma_msi_page { + struct list_head list; + dma_addr_t iova; + phys_addr_t phys; +}; + +struct iova_magazine; + +struct iova_cpu_rcache { + spinlock_t lock; + struct iova_magazine *loaded; + struct iova_magazine *prev; +}; + +struct iova_magazine { + union { + unsigned long size; + struct iova_magazine *next; + }; + unsigned long pfns[127]; +}; + +struct iova_rcache { + spinlock_t lock; + unsigned int depot_size; + struct iova_magazine *depot; + struct iova_cpu_rcache __attribute__((btf_type_tag("percpu"))) * cpu_rcaches; + struct iova_domain *iovad; + struct delayed_work work; +}; + +struct cb_id { + __u32 idx; + __u32 val; +}; + +struct cn_callback_id { + unsigned char name[32]; + struct cb_id id; +}; + +struct cn_queue_dev; + +struct cn_msg; + +struct cn_callback_entry { + struct list_head callback_entry; + refcount_t refcnt; + struct cn_queue_dev *pdev; + struct cn_callback_id id; + void (*callback)(struct cn_msg *, struct netlink_skb_parms *); + u32 seq; + u32 group; +}; + +struct cn_queue_dev { + atomic_t refcnt; + unsigned char name[32]; + struct list_head queue_list; + spinlock_t queue_lock; + struct sock *nls; +}; + +struct cn_msg { + struct cb_id id; + __u32 seq; + __u32 ack; + __u16 len; + __u16 flags; + __u8 data[0]; +}; + +struct cn_dev { + struct cb_id id; + u32 seq; + u32 groups; + struct sock *nls; + struct cn_queue_dev *cbdev; +}; + +struct local_event { + local_lock_t lock; + __u32 count; +}; + +enum proc_cn_mcast_op { + PROC_CN_MCAST_LISTEN = 1, + PROC_CN_MCAST_IGNORE = 2, +}; + +struct fork_proc_event { + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; + __kernel_pid_t child_pid; + __kernel_pid_t child_tgid; +}; + +struct exec_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; +}; + +struct id_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + union { + __u32 ruid; + __u32 rgid; + } r; + union { + __u32 euid; + __u32 egid; + } e; +}; + +struct sid_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; +}; + +struct ptrace_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t tracer_pid; + __kernel_pid_t tracer_tgid; +}; + +struct comm_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + char comm[16]; +}; + +struct coredump_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; +}; + +struct exit_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __u32 exit_code; + __u32 exit_signal; + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; +}; + +struct proc_event { + enum proc_cn_event what; + __u32 cpu; + __u64 timestamp_ns; + union { + struct { + __u32 err; + } ack; + struct fork_proc_event fork; + struct exec_proc_event exec; + struct id_proc_event id; + struct sid_proc_event sid; + struct ptrace_proc_event ptrace; + struct comm_proc_event comm; + struct coredump_proc_event coredump; + struct exit_proc_event exit; + } event_data; +}; + +struct proc_input { + enum proc_cn_mcast_op mcast_op; + enum proc_cn_event event_type; +}; + +struct aggregate_device; + +struct component_ops; + +struct component { + struct list_head node; + struct aggregate_device *adev; + bool bound; + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +struct component_master_ops; + +struct component_match; + +struct aggregate_device { + struct list_head node; + bool bound; + const struct component_master_ops *ops; + struct device *parent; + struct component_match *match; +}; + +struct component_master_ops { + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +struct component_match_array; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +enum dpm_order { + DPM_ORDER_NONE = 0, + DPM_ORDER_DEV_AFTER_PARENT = 1, + DPM_ORDER_PARENT_BEFORE_DEV = 2, + DPM_ORDER_DEV_LAST = 3, +}; + +struct fwnode_link { + struct fwnode_handle *supplier; + struct list_head s_hook; + struct fwnode_handle *consumer; + struct list_head c_hook; + u8 flags; +}; + +struct class_dir { + struct kobject kobj; + const struct class *class; +}; + +struct root_device { + struct device dev; + struct module *owner; +}; + +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe : 1; + const struct bus_type *bus; + struct device *dev_root; + struct kset glue_dirs; + const struct class *class; + struct lock_class_key lock_key; +}; + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +struct subsys_interface { + const char *name; + struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *, struct subsys_interface *); + void (*remove_dev)(struct device *, struct subsys_interface *); +}; + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +struct device_attach_data { + struct device *dev; + bool check_async; + bool want_async; + bool have_async; +}; + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(const struct class *, const struct class_attribute *, char *); + ssize_t (*store)(const struct class *, const struct class_attribute *, + const char *, size_t); +}; + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +struct class_compat { + struct kobject *kobj; +}; + +struct platform_object { + struct platform_device pdev; + char name[0]; +}; + +struct irq_affinity_devres { + unsigned int count; + unsigned int irq[0]; +}; + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask *const map; +}; + +struct probe; + +struct kobj_map { + struct probe *probes[255]; + struct mutex *lock; +}; + +struct probe { + struct probe *next; + dev_t dev; + unsigned long range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; +}; + +struct devres_node { + struct list_head entry; + dr_release_t release; + const char *name; + size_t size; +}; + +struct devres { + struct devres_node node; + u8 data[0]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; +}; + +struct action_devres { + void *data; + void (*action)(void *); +}; + +struct pages_devres { + unsigned long addr; + unsigned int order; +}; + +struct attribute_container; + +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + struct device classdev; +}; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); + unsigned long flags; +}; + +struct transport_container; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, struct device *); + int (*configure)(struct transport_container *, struct device *, struct device *); + int (*remove)(struct transport_container *, struct device *, struct device *); +}; + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +typedef enum { + PHY_INTERFACE_MODE_NA = 0, + PHY_INTERFACE_MODE_INTERNAL = 1, + PHY_INTERFACE_MODE_MII = 2, + PHY_INTERFACE_MODE_GMII = 3, + PHY_INTERFACE_MODE_SGMII = 4, + PHY_INTERFACE_MODE_TBI = 5, + PHY_INTERFACE_MODE_REVMII = 6, + PHY_INTERFACE_MODE_RMII = 7, + PHY_INTERFACE_MODE_REVRMII = 8, + PHY_INTERFACE_MODE_RGMII = 9, + PHY_INTERFACE_MODE_RGMII_ID = 10, + PHY_INTERFACE_MODE_RGMII_RXID = 11, + PHY_INTERFACE_MODE_RGMII_TXID = 12, + PHY_INTERFACE_MODE_RTBI = 13, + PHY_INTERFACE_MODE_SMII = 14, + PHY_INTERFACE_MODE_XGMII = 15, + PHY_INTERFACE_MODE_XLGMII = 16, + PHY_INTERFACE_MODE_MOCA = 17, + PHY_INTERFACE_MODE_PSGMII = 18, + PHY_INTERFACE_MODE_QSGMII = 19, + PHY_INTERFACE_MODE_TRGMII = 20, + PHY_INTERFACE_MODE_100BASEX = 21, + PHY_INTERFACE_MODE_1000BASEX = 22, + PHY_INTERFACE_MODE_2500BASEX = 23, + PHY_INTERFACE_MODE_5GBASER = 24, + PHY_INTERFACE_MODE_RXAUI = 25, + PHY_INTERFACE_MODE_XAUI = 26, + PHY_INTERFACE_MODE_10GBASER = 27, + PHY_INTERFACE_MODE_25GBASER = 28, + PHY_INTERFACE_MODE_USXGMII = 29, + PHY_INTERFACE_MODE_10GKR = 30, + PHY_INTERFACE_MODE_QUSGMII = 31, + PHY_INTERFACE_MODE_1000BASEKX = 32, + PHY_INTERFACE_MODE_MAX = 33, +} phy_interface_t; + +typedef void *(*devcon_match_fn_t)(const struct fwnode_handle *, const char *, void *); + +struct swnode { + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + int id; + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + unsigned int allocated : 1; + unsigned int managed : 1; +}; + +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[8]; +}; + +struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; + kuid_t uid; + kgid_t gid; + struct device *dev; +}; + +typedef int (*pm_callback_t)(struct device *); + +struct suspend_stats { + int success; + int fail; + int failed_freeze; + int failed_prepare; + int failed_suspend; + int failed_suspend_late; + int failed_suspend_noirq; + int failed_resume; + int failed_resume_early; + int failed_resume_noirq; + int last_failed_dev; + char failed_devs[80]; + int last_failed_errno; + int errno[2]; + int last_failed_step; + u64 last_hw_sleep; + u64 total_hw_sleep; + u64 max_hw_sleep; + enum suspend_stat_step failed_steps[2]; +}; + +struct generic_pm_domain; + +struct genpd_lock_ops { + void (*lock)(struct generic_pm_domain *); + void (*lock_nested)(struct generic_pm_domain *, int); + int (*lock_interruptible)(struct generic_pm_domain *); + void (*unlock)(struct generic_pm_domain *); +}; + +enum gpd_status { + GENPD_STATE_ON = 0, + GENPD_STATE_OFF = 1, +}; + +struct opp_table; + +struct dev_pm_opp; + +struct gpd_dev_ops { + int (*start)(struct device *); + int (*stop)(struct device *); +}; + +struct dev_power_governor; + +struct genpd_governor_data; + +struct genpd_power_state; + +struct generic_pm_domain { + struct device dev; + struct dev_pm_domain domain; + struct list_head gpd_list_node; + struct list_head parent_links; + struct list_head child_links; + struct list_head dev_list; + struct dev_power_governor *gov; + struct genpd_governor_data *gd; + struct work_struct power_off_work; + struct fwnode_handle *provider; + bool has_provider; + const char *name; + atomic_t sd_count; + enum gpd_status status; + unsigned int device_count; + unsigned int suspended_count; + unsigned int prepared_count; + unsigned int performance_state; + cpumask_var_t cpus; + bool synced_poweroff; + int (*power_off)(struct generic_pm_domain *); + int (*power_on)(struct generic_pm_domain *); + struct raw_notifier_head power_notifiers; + struct opp_table *opp_table; + unsigned int (*opp_to_performance_state)(struct generic_pm_domain *, + struct dev_pm_opp *); + int (*set_performance_state)(struct generic_pm_domain *, unsigned int); + struct gpd_dev_ops dev_ops; + int (*attach_dev)(struct generic_pm_domain *, struct device *); + void (*detach_dev)(struct generic_pm_domain *, struct device *); + unsigned int flags; + struct genpd_power_state *states; + void (*free_states)(struct genpd_power_state *, unsigned int); + unsigned int state_count; + unsigned int state_idx; + u64 on_time; + u64 accounting_time; + const struct genpd_lock_ops *lock_ops; + union { + struct mutex mlock; + struct { + spinlock_t slock; + unsigned long lock_flags; + }; + }; +}; + +struct dev_power_governor { + bool (*power_down_ok)(struct dev_pm_domain *); + bool (*suspend_ok)(struct device *); +}; + +struct genpd_governor_data { + s64 max_off_time_ns; + bool max_off_time_changed; + ktime_t next_wakeup; + ktime_t next_hrtimer; + bool cached_power_down_ok; + bool cached_power_down_state_idx; +}; + +struct genpd_power_state { + s64 power_off_latency_ns; + s64 power_on_latency_ns; + s64 residency_ns; + u64 usage; + u64 rejected; + struct fwnode_handle *fwnode; + u64 idle_time; + void *data; +}; + +enum genpd_notication { + GENPD_NOTIFY_PRE_OFF = 0, + GENPD_NOTIFY_OFF = 1, + GENPD_NOTIFY_PRE_ON = 2, + GENPD_NOTIFY_ON = 3, +}; + +struct gpd_link { + struct generic_pm_domain *parent; + struct list_head parent_node; + struct generic_pm_domain *child; + struct list_head child_node; + unsigned int performance_state; + unsigned int prev_performance_state; +}; + +struct gpd_timing_data; + +struct generic_pm_domain_data { + struct pm_domain_data base; + struct gpd_timing_data *td; + struct notifier_block nb; + struct notifier_block *power_nb; + int cpu; + unsigned int performance_state; + unsigned int default_pstate; + unsigned int rpm_pstate; + void *data; +}; + +struct gpd_timing_data { + s64 suspend_latency_ns; + s64 resume_latency_ns; + s64 effective_constraint_ns; + ktime_t next_wakeup; + bool constraint_changed; + bool cached_suspend_ok; +}; + +struct firmware_fallback_config { + unsigned int force_sysfs_fallback; + unsigned int ignore_sysfs_fallback; + int old_timeout; + int loading_timeout; +}; + +struct firmware_cache { + spinlock_t lock; + struct list_head head; + int state; + spinlock_t name_lock; + struct list_head fw_names; + struct delayed_work work; + struct notifier_block pm_notify; +}; + +enum fw_status { + FW_STATUS_UNKNOWN = 0, + FW_STATUS_LOADING = 1, + FW_STATUS_DONE = 2, + FW_STATUS_ABORTED = 3, +}; + +enum fw_opt { + FW_OPT_UEVENT = 1, + FW_OPT_NOWAIT = 2, + FW_OPT_USERHELPER = 4, + FW_OPT_NO_WARN = 8, + FW_OPT_NOCACHE = 16, + FW_OPT_NOFALLBACK_SYSFS = 32, + FW_OPT_FALLBACK_PLATFORM = 64, + FW_OPT_PARTIAL = 128, +}; + +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; + bool is_paged_buf; + struct page **pages; + int nr_pages; + int page_array_size; + bool need_uevent; + struct list_head pending_list; + const char *fw_name; +}; + +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *, void *); + u32 opt_flags; +}; + +struct fw_cache_entry { + struct list_head list; + const char *name; +}; + +struct fw_name_devm { + unsigned long magic; + const char *name; +}; + +struct fw_sysfs { + bool nowait; + struct device dev; + struct fw_priv *fw_priv; + struct firmware *fw; + void *fw_upload_priv; +}; + +struct builtin_fw { + char *name; + void *data; + unsigned long size; +}; + +struct node_attr { + struct device_attribute attr; + enum node_states state; +}; + +struct node_cache_info { + struct device dev; + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + +struct node_access_nodes { + struct device dev; + struct list_head list_node; + unsigned int access; + struct node_hmem_attrs hmem_attrs; +}; + +struct for_each_memory_block_cb_data { + walk_memory_blocks_func_t func; + void *arg; +}; + +typedef void (*btf_trace_regmap_reg_write)(void *, struct regmap *, unsigned int, unsigned int); + +typedef void (*regmap_lock)(void *); + +typedef void (*regmap_unlock)(void *); + +struct regmap_format { + size_t buf_size; + size_t reg_bytes; + size_t pad_bytes; + size_t val_bytes; + s8 reg_shift; + void (*format_write)(struct regmap *, unsigned int, unsigned int); + void (*format_reg)(void *, unsigned int, unsigned int); + void (*format_val)(void *, unsigned int, unsigned int); + unsigned int (*parse_val)(const void *); + void (*parse_inplace)(void *); +}; + +enum regcache_type { + REGCACHE_NONE = 0, + REGCACHE_RBTREE = 1, + REGCACHE_FLAT = 2, + REGCACHE_MAPLE = 3, +}; + +struct hwspinlock; + +struct regmap_bus; + +struct regmap_access_table; + +struct regcache_ops; + +struct reg_default; + +struct reg_sequence; + +struct regmap { + union { + struct mutex mutex; + struct { + spinlock_t spinlock; + unsigned long spinlock_flags; + }; + struct { + raw_spinlock_t raw_spinlock; + unsigned long raw_spinlock_flags; + }; + }; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + gfp_t alloc_flags; + unsigned int reg_base; + struct device *dev; + void *work_buf; + struct regmap_format format; + const struct regmap_bus *bus; + void *bus_context; + const char *name; + bool async; + spinlock_t async_lock; + wait_queue_head_t async_waitq; + struct list_head async_list; + struct list_head async_free; + int async_ret; + bool debugfs_disable; + struct dentry *debugfs; + const char *debugfs_name; + unsigned int debugfs_reg_len; + unsigned int debugfs_val_len; + unsigned int debugfs_tot_len; + struct list_head debugfs_off_cache; + struct mutex cache_lock; + unsigned int max_register; + bool (*writeable_reg)(struct device *, unsigned int); + bool (*readable_reg)(struct device *, unsigned int); + bool (*volatile_reg)(struct device *, unsigned int); + bool (*precious_reg)(struct device *, unsigned int); + bool (*writeable_noinc_reg)(struct device *, unsigned int); + bool (*readable_noinc_reg)(struct device *, unsigned int); + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + int (*reg_read)(void *, unsigned int, unsigned int *); + int (*reg_write)(void *, unsigned int, unsigned int); + int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); + int (*read)(void *, const void *, size_t, void *, size_t); + int (*write)(void *, const void *, size_t); + bool defer_caching; + unsigned long read_flag_mask; + unsigned long write_flag_mask; + int reg_shift; + int reg_stride; + int reg_stride_order; + bool force_write_field; + const struct regcache_ops *cache_ops; + enum regcache_type cache_type; + unsigned int cache_size_raw; + unsigned int cache_word_size; + unsigned int num_reg_defaults; + unsigned int num_reg_defaults_raw; + bool cache_only; + bool cache_bypass; + bool cache_free; + struct reg_default *reg_defaults; + const void *reg_defaults_raw; + void *cache; + bool cache_dirty; + bool no_sync_defaults; + struct reg_sequence *patch; + int patch_regs; + bool use_single_read; + bool use_single_write; + bool can_multi_write; + size_t max_raw_read; + size_t max_raw_write; + struct rb_root range_tree; + void *selector_work_buf; + struct hwspinlock *hwlock; + bool can_sleep; +}; + +typedef int (*regmap_hw_write)(void *, const void *, size_t); + +typedef int (*regmap_hw_gather_write)(void *, const void *, size_t, const void *, size_t); + +struct regmap_async; + +typedef int (*regmap_hw_async_write)(void *, const void *, size_t, const void *, size_t, + struct regmap_async *); + +typedef int (*regmap_hw_reg_write)(void *, unsigned int, unsigned int); + +typedef int (*regmap_hw_reg_noinc_write)(void *, unsigned int, const void *, size_t); + +typedef int (*regmap_hw_reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); + +typedef int (*regmap_hw_read)(void *, const void *, size_t, void *, size_t); + +typedef int (*regmap_hw_reg_read)(void *, unsigned int, unsigned int *); + +typedef int (*regmap_hw_reg_noinc_read)(void *, unsigned int, void *, size_t); + +typedef void (*regmap_hw_free_context)(void *); + +typedef struct regmap_async *(*regmap_hw_async_alloc)(); + +enum regmap_endian { + REGMAP_ENDIAN_DEFAULT = 0, + REGMAP_ENDIAN_BIG = 1, + REGMAP_ENDIAN_LITTLE = 2, + REGMAP_ENDIAN_NATIVE = 3, +}; + +struct regmap_bus { + bool fast_io; + bool free_on_exit; + regmap_hw_write write; + regmap_hw_gather_write gather_write; + regmap_hw_async_write async_write; + regmap_hw_reg_write reg_write; + regmap_hw_reg_noinc_write reg_noinc_write; + regmap_hw_reg_update_bits reg_update_bits; + regmap_hw_read read; + regmap_hw_reg_read reg_read; + regmap_hw_reg_noinc_read reg_noinc_read; + regmap_hw_free_context free_context; + regmap_hw_async_alloc async_alloc; + u8 read_flag_mask; + enum regmap_endian reg_format_endian_default; + enum regmap_endian val_format_endian_default; + size_t max_raw_read; + size_t max_raw_write; +}; + +struct regmap_async { + struct list_head list; + struct regmap *map; + void *work_buf; +}; + +struct regmap_range; + +struct regmap_access_table { + const struct regmap_range *yes_ranges; + unsigned int n_yes_ranges; + const struct regmap_range *no_ranges; + unsigned int n_no_ranges; +}; + +struct regmap_range { + unsigned int range_min; + unsigned int range_max; +}; + +struct regcache_ops { + const char *name; + enum regcache_type type; + int (*init)(struct regmap *); + int (*exit)(struct regmap *); + void (*debugfs_init)(struct regmap *); + int (*read)(struct regmap *, unsigned int, unsigned int *); + int (*write)(struct regmap *, unsigned int, unsigned int); + int (*sync)(struct regmap *, unsigned int, unsigned int); + int (*drop)(struct regmap *, unsigned int, unsigned int); +}; + +struct reg_default { + unsigned int reg; + unsigned int def; +}; + +struct reg_sequence { + unsigned int reg; + unsigned int def; + unsigned int delay_us; +}; + +typedef void (*btf_trace_regmap_reg_read)(void *, struct regmap *, unsigned int, unsigned int); + +typedef void (*btf_trace_regmap_reg_read_cache)(void *, struct regmap *, unsigned int, + unsigned int); + +typedef void (*btf_trace_regmap_bulk_write)(void *, struct regmap *, unsigned int, + const void *, int); + +typedef void (*btf_trace_regmap_bulk_read)(void *, struct regmap *, unsigned int, + const void *, int); + +typedef void (*btf_trace_regmap_hw_read_start)(void *, struct regmap *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_read_done)(void *, struct regmap *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_write_start)(void *, struct regmap *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_write_done)(void *, struct regmap *, unsigned int, int); + +typedef void (*btf_trace_regcache_sync)(void *, struct regmap *, const char *, const char *); + +typedef void (*btf_trace_regmap_cache_only)(void *, struct regmap *, bool); + +typedef void (*btf_trace_regmap_cache_bypass)(void *, struct regmap *, bool); + +typedef void (*btf_trace_regmap_async_write_start)(void *, struct regmap *, unsigned int, int); + +typedef void (*btf_trace_regmap_async_io_complete)(void *, struct regmap *); + +typedef void (*btf_trace_regmap_async_complete_start)(void *, struct regmap *); + +typedef void (*btf_trace_regmap_async_complete_done)(void *, struct regmap *); + +typedef void (*btf_trace_regcache_drop_region)(void *, struct regmap *, unsigned int, + unsigned int); + +struct trace_event_raw_regmap_reg { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int reg; + unsigned int val; + char __data[0]; +}; + +struct trace_event_raw_regmap_bulk { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int reg; + u32 __data_loc_buf; + int val_len; + char __data[0]; +}; + +struct trace_event_raw_regmap_block { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int reg; + int count; + char __data[0]; +}; + +struct trace_event_raw_regcache_sync { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_status; + u32 __data_loc_type; + char __data[0]; +}; + +struct trace_event_raw_regmap_bool { + struct trace_entry ent; + u32 __data_loc_name; + int flag; + char __data[0]; +}; + +struct trace_event_raw_regmap_async { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_regcache_drop_region { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int from; + unsigned int to; + char __data[0]; +}; + +struct regmap_range_node { + struct rb_node node; + const char *name; + struct regmap *map; + unsigned int range_min; + unsigned int range_max; + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + unsigned int window_start; + unsigned int window_len; +}; + +struct trace_event_data_offsets_regmap_reg { + u32 name; +}; + +struct trace_event_data_offsets_regmap_bulk { + u32 name; + u32 buf; +}; + +struct trace_event_data_offsets_regmap_block { + u32 name; +}; + +struct trace_event_data_offsets_regmap_bool { + u32 name; +}; + +struct trace_event_data_offsets_regmap_async { + u32 name; +}; + +struct trace_event_data_offsets_regcache_drop_region { + u32 name; +}; + +struct regmap_range_cfg; + +struct regmap_config { + const char *name; + int reg_bits; + int reg_stride; + int reg_shift; + unsigned int reg_base; + int pad_bits; + int val_bits; + bool (*writeable_reg)(struct device *, unsigned int); + bool (*readable_reg)(struct device *, unsigned int); + bool (*volatile_reg)(struct device *, unsigned int); + bool (*precious_reg)(struct device *, unsigned int); + bool (*writeable_noinc_reg)(struct device *, unsigned int); + bool (*readable_noinc_reg)(struct device *, unsigned int); + bool disable_locking; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + int (*reg_read)(void *, unsigned int, unsigned int *); + int (*reg_write)(void *, unsigned int, unsigned int); + int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); + int (*read)(void *, const void *, size_t, void *, size_t); + int (*write)(void *, const void *, size_t); + size_t max_raw_read; + size_t max_raw_write; + bool fast_io; + bool io_port; + unsigned int max_register; + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + const struct reg_default *reg_defaults; + unsigned int num_reg_defaults; + enum regcache_type cache_type; + const void *reg_defaults_raw; + unsigned int num_reg_defaults_raw; + unsigned long read_flag_mask; + unsigned long write_flag_mask; + bool zero_flag_mask; + bool use_single_read; + bool use_single_write; + bool use_relaxed_mmio; + bool can_multi_write; + enum regmap_endian reg_format_endian; + enum regmap_endian val_format_endian; + const struct regmap_range_cfg *ranges; + unsigned int num_ranges; + bool use_hwlock; + bool use_raw_spinlock; + unsigned int hwlock_id; + unsigned int hwlock_mode; + bool can_sleep; +}; + +struct regmap_range_cfg { + const char *name; + unsigned int range_min; + unsigned int range_max; + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + unsigned int window_start; + unsigned int window_len; +}; + +struct regmap_field { + struct regmap *regmap; + unsigned int mask; + unsigned int shift; + unsigned int reg; + unsigned int id_size; + unsigned int id_offset; +}; + +struct reg_field { + unsigned int reg; + unsigned int lsb; + unsigned int msb; + unsigned int id_size; + unsigned int id_offset; +}; + +struct trace_event_data_offsets_regcache_sync { + u32 name; + u32 status; + u32 type; +}; + +struct regcache_rbtree_node { + void *block; + unsigned long *cache_present; + unsigned int base_reg; + unsigned int blklen; + struct rb_node node; +}; + +struct regcache_rbtree_ctx { + struct rb_root root; + struct regcache_rbtree_node *cached_rbnode; +}; + +struct regmap_debugfs_node { + struct regmap *map; + struct list_head link; +}; + +struct regmap_debugfs_off_cache { + struct list_head list; + off_t min; + off_t max; + unsigned int base_reg; + unsigned int max_reg; +}; + +struct devcd_entry { + struct device devcd_dev; + void *data; + size_t datalen; + struct mutex mutex; + bool delete_work; + struct module *owner; + ssize_t (*read)(char *, loff_t, size_t, void *, size_t); + void (*free)(void *); + struct delayed_work del_wk; + struct device *failing_dev; +}; + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); + +struct platform_msi_priv_data { + struct device *dev; + void *host_data; + msi_alloc_info_t arg; + irq_write_msi_msg_t write_msg; + int devid; +}; + +typedef void (*btf_trace_devres_log)(void *, struct device *, const char *, void *, + const char *, size_t); + +struct trace_event_raw_devres { + struct trace_entry ent; + u32 __data_loc_devname; + struct device *dev; + const char *op; + void *node; + const char *name; + size_t size; + char __data[0]; +}; + +struct trace_event_data_offsets_devres { + u32 devname; +}; + +struct nvdimm_map { + struct nvdimm_bus *nvdimm_bus; + struct list_head list; + resource_size_t offset; + unsigned long flags; + size_t size; + union { + void *mem; + void *iomem; + }; + struct kref kref; +}; + +struct nd_device_driver { + struct device_driver drv; + unsigned long type; + int (*probe)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + void (*notify)(struct device *, enum nvdimm_event); +}; + +enum nd_async_mode { + ND_SYNC = 0, + ND_ASYNC = 1, +}; + +enum nd_ioctl_mode { + BUS_IOCTL = 0, + DIMM_IOCTL = 1, +}; + +enum nvdimm_claim_class { + NVDIMM_CCLASS_NONE = 0, + NVDIMM_CCLASS_BTT = 1, + NVDIMM_CCLASS_BTT2 = 2, + NVDIMM_CCLASS_PFN = 3, + NVDIMM_CCLASS_DAX = 4, + NVDIMM_CCLASS_UNKNOWN = 5, +}; + +enum nd_pfn_mode { + PFN_MODE_NONE = 0, + PFN_MODE_RAM = 1, + PFN_MODE_PMEM = 2, +}; + +struct nd_namespace_common { + int force_raw; + struct device dev; + struct device *claim; + enum nvdimm_claim_class claim_class; + int (*rw_bytes)(struct nd_namespace_common *, resource_size_t, void *, size_t, + int, unsigned long); +}; + +struct nd_namespace_io { + struct nd_namespace_common common; + struct resource res; + resource_size_t size; + void *addr; + struct badblocks bb; +}; + +struct badrange_entry { + u64 start; + u64 length; + struct list_head list; +}; + +struct clear_badblocks_context { + resource_size_t phys; + resource_size_t cleared; +}; + +struct nd_cmd_vendor_hdr { + __u32 opcode; + __u32 in_length; + __u8 in_buf[0]; +}; + +struct btt; + +struct nd_btt { + struct device dev; + struct nd_namespace_common *ndns; + struct btt *btt; + unsigned long lbasize; + u64 size; + uuid_t *uuid; + int id; + int initial_offset; + u16 version_major; + u16 version_minor; +}; + +struct nd_pfn_sb; + +struct nd_pfn { + int id; + uuid_t *uuid; + struct device dev; + unsigned long align; + unsigned long npfns; + enum nd_pfn_mode mode; + struct nd_pfn_sb *pfn_sb; + struct nd_namespace_common *ndns; +}; + +struct nd_pfn_sb { + u8 signature[16]; + u8 uuid[16]; + u8 parent_uuid[16]; + __le32 flags; + __le16 version_major; + __le16 version_minor; + __le64 dataoff; + __le64 npfns; + __le32 mode; + __le32 start_pad; + __le32 end_trunc; + __le32 align; + __le32 page_size; + __le16 page_struct_size; + u8 padding[3994]; + __le64 checksum; +}; + +struct nd_dax { + struct nd_pfn nd_pfn; +}; + +struct nd_label_id { + char id[50]; +}; + +struct nvdimm_pmu { + struct pmu pmu; + struct device *dev; + int cpu; + struct hlist_node node; + enum cpuhp_state cpuhp_state; + struct cpumask arch_cpumask; +}; + +enum { + ND_MAX_LANES = 256, + INT_LBASIZE_ALIGNMENT = 64, + NVDIMM_IO_ATOMIC = 1, +}; + +struct nd_namespace_label; + +struct nd_label_ent { + struct list_head list; + unsigned long flags; + struct nd_namespace_label *label; +}; + +struct nvdimm_cxl_label { + u8 type[16]; + u8 uuid[16]; + u8 name[64]; + __le32 flags; + __le16 nrange; + __le16 position; + __le64 dpa; + __le64 rawsize; + __le32 slot; + __le32 align; + u8 region_uuid[16]; + u8 abstraction_uuid[16]; + __le16 lbasize; + u8 reserved[86]; + __le64 checksum; +}; + +struct nvdimm_efi_label { + u8 uuid[16]; + u8 name[64]; + __le32 flags; + __le16 nlabel; + __le16 position; + __le64 isetcookie; + __le64 lbasize; + __le64 dpa; + __le64 rawsize; + __le32 slot; + u8 align; + u8 reserved[3]; + guid_t type_guid; + guid_t abstraction_guid; + u8 reserved2[88]; + __le64 checksum; +}; + +struct nd_namespace_label { + union { + struct nvdimm_cxl_label cxl; + struct nvdimm_efi_label efi; + }; +}; + +struct nd_region_data { + int ns_count; + int ns_active; + unsigned int hints_shift; + void *flush_wpq[0]; +}; + +struct nd_namespace_index { + u8 sig[16]; + u8 flags[3]; + u8 labelsize; + __le32 seq; + __le64 myoff; + __le64 mysize; + __le64 otheroff; + __le64 labeloff; + __le32 nslot; + __le16 major; + __le16 minor; + __le64 checksum; + u8 free[0]; +}; + +struct conflict_context { + struct nd_region *nd_region; + resource_size_t start; + resource_size_t size; +}; + +enum { + ND_MIN_NAMESPACE_SIZE = 4096, +}; + +enum alloc_loc { + ALLOC_ERR = 0, + ALLOC_BEFORE = 1, + ALLOC_MID = 2, + ALLOC_AFTER = 3, +}; + +enum nd_label_flags { + ND_LABEL_REAP = 0, +}; + +enum { + NSINDEX_SIG_LEN = 16, + NSINDEX_ALIGN = 256, + NSINDEX_SEQ_MASK = 3, + NSLABEL_UUID_LEN = 16, + NSLABEL_NAME_LEN = 64, + NSLABEL_FLAG_ROLABEL = 1, + NSLABEL_FLAG_LOCAL = 2, + NSLABEL_FLAG_BTT = 4, + NSLABEL_FLAG_UPDATING = 8, + BTT_ALIGN = 4096, + BTTINFO_SIG_LEN = 16, + BTTINFO_UUID_LEN = 16, + BTTINFO_FLAG_ERROR = 1, + BTTINFO_MAJOR_VERSION = 1, + ND_LABEL_MIN_SIZE = 1024, + ND_LABEL_ID_SIZE = 50, + ND_NSINDEX_INIT = 1, +}; + +struct nd_namespace_pmem { + struct nd_namespace_io nsio; + unsigned long lbasize; + char *alt_name; + uuid_t *uuid; + int id; +}; + +struct btt { + struct gendisk *btt_disk; + struct list_head arena_list; + struct dentry *debugfs_dir; + struct nd_btt *nd_btt; + u64 nlba; + unsigned long long rawsize; + u32 lbasize; + u32 sector_size; + struct nd_region *nd_region; + struct mutex init_lock; + int init_state; + int num_arenas; + struct badblocks *phys_bb; +}; + +struct nd_gen_sb { + char reserved[4088]; + __le64 checksum; +}; + +struct btt_sb { + u8 signature[16]; + u8 uuid[16]; + u8 parent_uuid[16]; + __le32 flags; + __le16 version_major; + __le16 version_minor; + __le32 external_lbasize; + __le32 external_nlba; + __le32 internal_lbasize; + __le32 internal_nlba; + __le32 nfree; + __le32 infosize; + __le64 nextoff; + __le64 dataoff; + __le64 mapoff; + __le64 logoff; + __le64 info2off; + u8 padding[3968]; + __le64 checksum; +}; + +enum dax_access_mode { + DAX_ACCESS = 0, + DAX_RECOVERY_WRITE = 1, +}; + +struct dax_operations { + long (*direct_access)(struct dax_device *, unsigned long, long, + enum dax_access_mode, void **, pfn_t *); + bool (*dax_supported)(struct dax_device *, struct block_device *, int, sector_t, + sector_t); + int (*zero_page_range)(struct dax_device *, unsigned long, size_t); + size_t (*recovery_write)(struct dax_device *, unsigned long, void *, size_t, + struct iov_iter *); +}; + +struct pmem_device { + phys_addr_t phys_addr; + phys_addr_t data_offset; + u64 pfn_flags; + void *virt_addr; + size_t size; + u32 pfn_pad; + struct kernfs_node *bb_state; + struct badblocks bb; + struct dax_device *dax_dev; + struct gendisk *disk; + struct dev_pagemap pgmap; +}; + +struct dax_holder_operations; + +struct dax_device { + struct inode inode; + struct cdev cdev; + void *private; + unsigned long flags; + const struct dax_operations *ops; + void *holder_data; + const struct dax_holder_operations *holder_ops; +}; + +struct dax_holder_operations { + int (*notify_failure)(struct dax_device *, u64, u64, int); +}; + +enum btt_init_state { + INIT_UNCHECKED = 0, + INIT_NOTFOUND = 1, + INIT_READY = 2, +}; + +enum log_ent_request { + LOG_NEW_ENT = 0, + LOG_OLD_ENT = 1, +}; + +struct free_entry; + +struct aligned_lock; + +struct arena_info { + u64 size; + u64 external_lba_start; + u32 internal_nlba; + u32 internal_lbasize; + u32 external_nlba; + u32 external_lbasize; + u32 nfree; + u16 version_major; + u16 version_minor; + u32 sector_size; + u64 nextoff; + u64 infooff; + u64 dataoff; + u64 mapoff; + u64 logoff; + u64 info2off; + struct free_entry *freelist; + u32 *rtt; + struct aligned_lock *map_locks; + struct nd_btt *nd_btt; + struct list_head list; + struct dentry *debugfs_dir; + u32 flags; + struct mutex err_lock; + int log_index[2]; +}; + +struct free_entry { + u32 block; + u8 sub; + u8 seq; + u8 has_err; +}; + +struct aligned_lock { + union { + spinlock_t lock; + u8 cacheline_padding[64]; + }; +}; + +struct log_entry { + __le32 lba; + __le32 old_map; + __le32 new_map; + __le32 seq; +}; + +struct log_group { + struct log_entry ent[4]; +}; + +enum dax_device_flags { + DAXDEV_ALIVE = 0, + DAXDEV_WRITE_CACHE = 1, + DAXDEV_SYNC = 2, + DAXDEV_NOCACHE = 3, + DAXDEV_NOMC = 4, +}; + +enum dax_driver_type { + DAXDRV_KMEM_TYPE = 0, + DAXDRV_DEVICE_TYPE = 1, +}; + +enum id_action { + ID_REMOVE = 0, + ID_ADD = 1, +}; + +struct dax_id { + struct list_head list; + char dev_name[30]; +}; + +struct dax_region; + +struct dev_dax_range; + +struct dev_dax { + struct dax_region *region; + struct dax_device *dax_dev; + unsigned int align; + int target_node; + bool dyn_id; + int id; + struct ida ida; + struct device dev; + struct dev_pagemap *pgmap; + int nr_range; + struct dev_dax_range *ranges; +}; + +struct dax_region { + int id; + int target_node; + struct kref kref; + struct device *dev; + unsigned int align; + struct ida ida; + struct resource res; + struct device *seed; + struct device *youngest; +}; + +struct dax_mapping; + +struct dev_dax_range { + unsigned long pgoff; + struct range range; + struct dax_mapping *mapping; +}; + +struct dax_mapping { + struct device dev; + int range_id; + int id; +}; + +struct dax_device_driver { + struct device_driver drv; + struct list_head ids; + enum dax_driver_type type; + int (*probe)(struct dev_dax *); + void (*remove)(struct dev_dax *); +}; + +struct dev_dax_data { + struct dax_region *dax_region; + struct dev_pagemap *pgmap; + resource_size_t size; + int id; +}; + +struct dma_buf_list { + struct list_head head; + struct mutex lock; +}; + +enum dma_resv_usage { + DMA_RESV_USAGE_KERNEL = 0, + DMA_RESV_USAGE_WRITE = 1, + DMA_RESV_USAGE_READ = 2, + DMA_RESV_USAGE_BOOKKEEP = 3, +}; + +struct dma_resv_list; + +struct dma_resv { + struct ww_mutex lock; + struct dma_resv_list __attribute__((btf_type_tag("rcu"))) * fences; +}; + +struct dma_fence; + +struct dma_resv_list { + struct callback_head rcu; + u32 num_fences; + u32 max_fences; + struct dma_fence __attribute__((btf_type_tag("rcu"))) * table[0]; +}; + +struct dma_buf; + +struct dma_buf_attach_ops; + +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; + bool peer2peer; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; + void *priv; +}; + +struct iosys_map { + union { + void *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +struct dma_fence_cb; + +typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); + +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + __poll_t active; +}; + +struct dma_buf_ops; + +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + unsigned int vmapping_counter; + struct iosys_map vmap_ptr; + const char *exp_name; + const char *name; + spinlock_t name_lock; + struct module *owner; + struct list_head list_node; + void *priv; + struct dma_resv *resv; + wait_queue_head_t poll; + struct dma_buf_poll_cb_t cb_in; + struct dma_buf_poll_cb_t cb_out; +}; + +struct dma_buf_ops { + bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + int (*pin)(struct dma_buf_attachment *); + void (*unpin)(struct dma_buf_attachment *); + struct sg_table *(*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, + enum dma_data_direction); + void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*mmap)(struct dma_buf *, struct vm_area_struct *); + int (*vmap)(struct dma_buf *, struct iosys_map *); + void (*vunmap)(struct dma_buf *, struct iosys_map *); +}; + +struct dma_fence_ops; + +struct dma_fence { + spinlock_t *lock; + const struct dma_fence_ops *ops; + union { + struct list_head cb_list; + ktime_t timestamp; + struct callback_head rcu; + }; + u64 context; + u64 seqno; + unsigned long flags; + struct kref refcount; + int error; +}; + +struct dma_fence_ops { + bool use_64bit_seqno; + const char *(*get_driver_name)(struct dma_fence *); + const char *(*get_timeline_name)(struct dma_fence *); + bool (*enable_signaling)(struct dma_fence *); + bool (*signaled)(struct dma_fence *); + long (*wait)(struct dma_fence *, bool, long); + void (*release)(struct dma_fence *); + void (*fence_value_str)(struct dma_fence *, char *, int); + void (*timeline_value_str)(struct dma_fence *, char *, int); + void (*set_deadline)(struct dma_fence *, ktime_t); +}; + +struct dma_buf_attach_ops { + bool allow_peer2peer; + void (*move_notify)(struct dma_buf_attachment *); +}; + +struct dma_buf_import_sync_file { + __u32 flags; + __s32 fd; +}; + +struct dma_fence_unwrap { + struct dma_fence *chain; + struct dma_fence *array; + unsigned int index; +}; + +struct dma_buf_export_sync_file { + __u32 flags; + __s32 fd; +}; + +struct sync_file { + struct file *file; + char user_name[32]; + struct list_head sync_file_list; + wait_queue_head_t wq; + unsigned long flags; + struct dma_fence *fence; + struct dma_fence_cb cb; +}; + +struct dma_resv_iter { + struct dma_resv *obj; + enum dma_resv_usage usage; + struct dma_fence *fence; + enum dma_resv_usage fence_usage; + unsigned int index; + struct dma_resv_list *fences; + unsigned int num_fences; + bool is_restarted; +}; + +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct dma_resv *resv; + void *priv; +}; + +struct dma_buf_sync { + __u64 flags; +}; + +typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT = 0, + DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, + DMA_FENCE_FLAG_USER_BITS = 3, +}; + +struct trace_event_raw_dma_fence { + struct trace_entry ent; + u32 __data_loc_driver; + u32 __data_loc_timeline; + unsigned int context; + unsigned int seqno; + char __data[0]; +}; + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +struct trace_event_data_offsets_dma_fence { + u32 driver; + u32 timeline; +}; + +struct dma_fence_array; + +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +struct dma_fence_array { + struct dma_fence base; + spinlock_t lock; + unsigned int num_fences; + atomic_t num_pending; + struct dma_fence **fences; + struct irq_work work; +}; + +struct dma_fence_chain { + struct dma_fence base; + struct dma_fence __attribute__((btf_type_tag("rcu"))) * prev; + u64 prev_seqno; + struct dma_fence *fence; + union { + struct dma_fence_cb cb; + struct irq_work work; + }; + spinlock_t lock; +}; + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; + unsigned int is_wait_die; +}; + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + __u64 sync_fence_info; +}; + +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +struct udmabuf_create { + __u32 memfd; + __u32 flags; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_item { + __u32 memfd; + __u32 __pad; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_list { + __u32 flags; + __u32 count; + struct udmabuf_create_item list[0]; +}; + +struct udmabuf { + unsigned long pagecount; + struct page **pages; + struct sg_table *sg; + struct miscdevice *device; +}; + +struct drm_dmi_panel_orientation_data { + int width; + int height; + const char *const *bios_dates; + int orientation; +}; + +enum { + NETIF_F_SG_BIT = 0, + NETIF_F_IP_CSUM_BIT = 1, + __UNUSED_NETIF_F_1 = 2, + NETIF_F_HW_CSUM_BIT = 3, + NETIF_F_IPV6_CSUM_BIT = 4, + NETIF_F_HIGHDMA_BIT = 5, + NETIF_F_FRAGLIST_BIT = 6, + NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, + NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, + NETIF_F_VLAN_CHALLENGED_BIT = 10, + NETIF_F_GSO_BIT = 11, + NETIF_F_LLTX_BIT = 12, + NETIF_F_NETNS_LOCAL_BIT = 13, + NETIF_F_GRO_BIT = 14, + NETIF_F_LRO_BIT = 15, + NETIF_F_GSO_SHIFT = 16, + NETIF_F_TSO_BIT = 16, + NETIF_F_GSO_ROBUST_BIT = 17, + NETIF_F_TSO_ECN_BIT = 18, + NETIF_F_TSO_MANGLEID_BIT = 19, + NETIF_F_TSO6_BIT = 20, + NETIF_F_FSO_BIT = 21, + NETIF_F_GSO_GRE_BIT = 22, + NETIF_F_GSO_GRE_CSUM_BIT = 23, + NETIF_F_GSO_IPXIP4_BIT = 24, + NETIF_F_GSO_IPXIP6_BIT = 25, + NETIF_F_GSO_UDP_TUNNEL_BIT = 26, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, + NETIF_F_GSO_PARTIAL_BIT = 28, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, + NETIF_F_GSO_SCTP_BIT = 30, + NETIF_F_GSO_ESP_BIT = 31, + NETIF_F_GSO_UDP_BIT = 32, + NETIF_F_GSO_UDP_L4_BIT = 33, + NETIF_F_GSO_FRAGLIST_BIT = 34, + NETIF_F_GSO_LAST = 34, + NETIF_F_FCOE_CRC_BIT = 35, + NETIF_F_SCTP_CRC_BIT = 36, + NETIF_F_FCOE_MTU_BIT = 37, + NETIF_F_NTUPLE_BIT = 38, + NETIF_F_RXHASH_BIT = 39, + NETIF_F_RXCSUM_BIT = 40, + NETIF_F_NOCACHE_COPY_BIT = 41, + NETIF_F_LOOPBACK_BIT = 42, + NETIF_F_RXFCS_BIT = 43, + NETIF_F_RXALL_BIT = 44, + NETIF_F_HW_VLAN_STAG_TX_BIT = 45, + NETIF_F_HW_VLAN_STAG_RX_BIT = 46, + NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, + NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, + NETIF_F_HW_TC_BIT = 49, + NETIF_F_HW_ESP_BIT = 50, + NETIF_F_HW_ESP_TX_CSUM_BIT = 51, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, + NETIF_F_HW_TLS_TX_BIT = 53, + NETIF_F_HW_TLS_RX_BIT = 54, + NETIF_F_GRO_HW_BIT = 55, + NETIF_F_HW_TLS_RECORD_BIT = 56, + NETIF_F_GRO_FRAGLIST_BIT = 57, + NETIF_F_HW_MACSEC_BIT = 58, + NETIF_F_GRO_UDP_FWD_BIT = 59, + NETIF_F_HW_HSR_TAG_INS_BIT = 60, + NETIF_F_HW_HSR_TAG_RM_BIT = 61, + NETIF_F_HW_HSR_FWD_BIT = 62, + NETIF_F_HW_HSR_DUP_BIT = 63, + NETDEV_FEATURE_COUNT = 64, +}; + +enum { + SKBTX_HW_TSTAMP = 1, + SKBTX_SW_TSTAMP = 2, + SKBTX_IN_PROGRESS = 4, + SKBTX_HW_TSTAMP_USE_CYCLES = 8, + SKBTX_WIFI_STATUS = 16, + SKBTX_HW_TSTAMP_NETDEV = 32, + SKBTX_SCHED_TSTAMP = 64, +}; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; +}; + +struct napi_struct; + +struct page_pool_params { + unsigned int flags; + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; + void (*init_callback)(struct page *, void *); + void *init_arg; +}; + +struct page_pool_alloc_stats { + u64 fast; + u64 slow; + u64 slow_high_order; + u64 empty; + u64 refill; + u64 waive; +}; + +struct pp_alloc_cache { + u32 count; + struct page *cache[128]; +}; + +struct page_pool_recycle_stats; + +struct page_pool { + struct page_pool_params p; + long frag_users; + struct page *frag_page; + unsigned int frag_offset; + u32 pages_state_hold_cnt; + struct delayed_work release_dw; + void (*disconnect)(void *); + unsigned long defer_start; + unsigned long defer_warn; + struct page_pool_alloc_stats alloc_stats; + u32 xdp_mem_id; + struct pp_alloc_cache alloc; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct ptr_ring ring; + struct page_pool_recycle_stats __attribute__((btf_type_tag("percpu"))) * recycle_stats; + atomic_t pages_state_release_cnt; + refcount_t user_cnt; + u64 destroy_cnt; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct gro_list { + struct list_head list; + int count; +}; + +struct napi_struct { + struct list_head poll_list; + unsigned long state; + int weight; + int defer_hard_irqs_count; + unsigned long gro_bitmask; + int (*poll)(struct napi_struct *, int); + int list_owner; + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + unsigned int napi_id; + struct hrtimer timer; + struct task_struct *thread; + struct list_head dev_list; + struct hlist_node napi_hash_node; +}; + +struct page_pool_recycle_stats { + u64 cached; + u64 cache_full; + u64 ring; + u64 ring_full; + u64 released_refcnt; +}; + +struct rt6key { + struct in6_addr addr; + int plen; +}; + +struct rtable; + +struct fnhe_hash_bucket; + +struct fib_nh_common { + struct net_device *nhc_dev; + netdevice_tracker nhc_dev_tracker; + int nhc_oif; + unsigned char nhc_scope; + u8 nhc_family; + u8 nhc_gw_family; + unsigned char nhc_flags; + struct lwtunnel_state *nhc_lwtstate; + union { + __be32 ipv4; + struct in6_addr ipv6; + } nhc_gw; + int nhc_weight; + atomic_t nhc_upper_bound; + struct rtable + __attribute__((btf_type_tag("rcu"))) *__attribute__((btf_type_tag("percp" + "u"))) * + nhc_pcpu_rth_output; + struct rtable __attribute__((btf_type_tag("rcu"))) * nhc_rth_input; + struct fnhe_hash_bucket __attribute__((btf_type_tag("rcu"))) * nhc_exceptions; +}; + +struct rt6_exception_bucket; + +struct fib6_nh { + struct fib_nh_common nh_common; + unsigned long last_probe; + struct rt6_info *__attribute__((btf_type_tag("percpu"))) * rt6i_pcpu; + struct rt6_exception_bucket __attribute__((btf_type_tag("rcu"))) * rt6i_exception_bucket; +}; + +struct fib6_node; + +struct dst_metrics; + +struct nexthop; + +struct fib6_info { + struct fib6_table *fib6_table; + struct fib6_info __attribute__((btf_type_tag("rcu"))) * fib6_next; + struct fib6_node __attribute__((btf_type_tag("rcu"))) * fib6_node; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; + unsigned int fib6_nsiblings; + refcount_t fib6_ref; + unsigned long expires; + struct dst_metrics *fib6_metrics; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; + u32 fib6_metric; + u8 fib6_protocol; + u8 fib6_type; + u8 offload; + u8 trap; + u8 offload_failed; + u8 should_flush : 1; + u8 dst_nocount : 1; + u8 dst_nopolicy : 1; + u8 fib6_destroying : 1; + u8 unused : 4; + struct callback_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; +}; + +struct fib6_node { + struct fib6_node __attribute__((btf_type_tag("rcu"))) * parent; + struct fib6_node __attribute__((btf_type_tag("rcu"))) * left; + struct fib6_node __attribute__((btf_type_tag("rcu"))) * right; + struct fib6_node __attribute__((btf_type_tag("rcu"))) * subtree; + struct fib6_info __attribute__((btf_type_tag("rcu"))) * leaf; + __u16 fn_bit; + __u16 fn_flags; + int fn_sernum; + struct fib6_info __attribute__((btf_type_tag("rcu"))) * rr_ptr; + struct callback_head rcu; +}; + +struct fib6_table { + struct hlist_node tb6_hlist; + u32 tb6_id; + spinlock_t tb6_lock; + struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; + unsigned int flags; + unsigned int fib_seq; +}; + +struct dst_metrics { + u32 metrics[17]; + refcount_t refcnt; +}; + +struct rtable { + struct dst_entry dst; + int rt_genid; + unsigned int rt_flags; + __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; + int rt_iif; + u8 rt_gw_family; + union { + __be32 rt_gw4; + struct in6_addr rt_gw6; + }; + u32 rt_mtu_locked : 1; + u32 rt_pmtu : 31; +}; + +struct fib_nh_exception; + +struct fnhe_hash_bucket { + struct fib_nh_exception __attribute__((btf_type_tag("rcu"))) * chain; +}; + +struct fib_nh_exception { + struct fib_nh_exception __attribute__((btf_type_tag("rcu"))) * fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + bool fnhe_mtu_locked; + __be32 fnhe_gw; + unsigned long fnhe_expires; + struct rtable __attribute__((btf_type_tag("rcu"))) * fnhe_rth_input; + struct rtable __attribute__((btf_type_tag("rcu"))) * fnhe_rth_output; + unsigned long fnhe_stamp; + struct callback_head rcu; +}; + +struct rt6_info { + struct dst_entry dst; + struct fib6_info __attribute__((btf_type_tag("rcu"))) * from; + int sernum; + struct rt6key rt6i_dst; + struct rt6key rt6i_src; + struct in6_addr rt6i_gateway; + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + unsigned short rt6i_nfheader_len; +}; + +struct rt6_exception_bucket { + struct hlist_head chain; + int depth; +}; + +struct rt6_statistics { + __u32 fib_nodes; + __u32 fib_route_nodes; + __u32 fib_rt_entries; + __u32 fib_rt_cache; + __u32 fib_discarded_routes; + atomic_t fib_rt_alloc; +}; + +struct mii_timestamping_ctrl; + +struct mii_timestamping_desc { + struct list_head list; + struct mii_timestamping_ctrl *ctrl; + struct device *device; +}; + +struct mii_timestamper; + +struct mii_timestamping_ctrl { + struct mii_timestamper *(*probe_channel)(struct device *, unsigned int); + void (*release_channel)(struct device *, struct mii_timestamper *); +}; + +struct mii_timestamper { + bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); + void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); + int (*hwtstamp)(struct mii_timestamper *, struct ifreq *); + void (*link_state)(struct mii_timestamper *, struct phy_device *); + int (*ts_info)(struct mii_timestamper *, struct ethtool_ts_info *); + struct device *device; +}; + +struct failover_ops { + int (*slave_pre_register)(struct net_device *, struct net_device *); + int (*slave_register)(struct net_device *, struct net_device *); + int (*slave_pre_unregister)(struct net_device *, struct net_device *); + int (*slave_unregister)(struct net_device *, struct net_device *); + int (*slave_link_change)(struct net_device *, struct net_device *); + int (*slave_name_change)(struct net_device *, struct net_device *); + rx_handler_result_t (*slave_handle_frame)(struct sk_buff **); +}; + +enum netdev_state_t { + __LINK_STATE_START = 0, + __LINK_STATE_PRESENT = 1, + __LINK_STATE_NOCARRIER = 2, + __LINK_STATE_LINKWATCH_PENDING = 3, + __LINK_STATE_DORMANT = 4, + __LINK_STATE_TESTING = 5, +}; + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF = 0, + __QUEUE_STATE_STACK_XOFF = 1, + __QUEUE_STATE_FROZEN = 2, +}; + +struct failover { + struct list_head list; + struct net_device __attribute__((btf_type_tag("rcu"))) * failover_dev; + netdevice_tracker dev_tracker; + struct failover_ops __attribute__((btf_type_tag("rcu"))) * ops; +}; + +struct netdev_lag_lower_state_info { + u8 link_up : 1; + u8 tx_enabled : 1; +}; + +struct net_failover_info { + struct net_device __attribute__((btf_type_tag("rcu"))) * primary_dev; + struct net_device __attribute__((btf_type_tag("rcu"))) * standby_dev; + struct rtnl_link_stats64 primary_stats; + struct rtnl_link_stats64 standby_stats; + struct rtnl_link_stats64 failover_stats; + spinlock_t stats_lock; +}; + +struct udp_hslot; + +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot *hash2; + unsigned int mask; + unsigned int log; +}; + +struct udp_hslot { + struct hlist_head head; + int count; + spinlock_t lock; +}; + +struct qdisc_walker { + int stop; + int skip; + int count; + int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); +}; + +struct mii_bus; + +struct mdio_device { + struct device dev; + struct mii_bus *bus; + char modalias[32]; + int (*bus_match)(struct device *, struct device_driver *); + void (*device_free)(struct mdio_device *); + void (*device_remove)(struct mdio_device *); + int addr; + int flags; + struct gpio_desc *reset_gpio; + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; +}; + +struct phy_c45_device_ids { + u32 devices_in_package; + u32 mmds_present; + u32 device_ids[32]; +}; + +enum phy_state { + PHY_DOWN = 0, + PHY_READY = 1, + PHY_HALTED = 2, + PHY_ERROR = 3, + PHY_UP = 4, + PHY_RUNNING = 5, + PHY_NOLINK = 6, + PHY_CABLETEST = 7, +}; + +struct phylink; + +struct pse_control; + +struct phy_driver; + +struct phy_package_shared; + +struct phy_device { + struct mdio_device mdio; + struct phy_driver *drv; + struct device_link *devlink; + u32 phy_id; + struct phy_c45_device_ids c45_ids; + unsigned int is_c45 : 1; + unsigned int is_internal : 1; + unsigned int is_pseudo_fixed_link : 1; + unsigned int is_gigabit_capable : 1; + unsigned int has_fixups : 1; + unsigned int suspended : 1; + unsigned int suspended_by_mdio_bus : 1; + unsigned int sysfs_links : 1; + unsigned int loopback_enabled : 1; + unsigned int downshifted_rate : 1; + unsigned int is_on_sfp_module : 1; + unsigned int mac_managed_pm : 1; + unsigned int wol_enabled : 1; + unsigned int autoneg : 1; + unsigned int link : 1; + unsigned int autoneg_complete : 1; + unsigned int interrupts : 1; + unsigned int irq_suspended : 1; + unsigned int irq_rerun : 1; + int rate_matching; + enum phy_state state; + u32 dev_flags; + phy_interface_t interface; + int speed; + int duplex; + int port; + int pause; + int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; + unsigned long supported[2]; + unsigned long advertising[2]; + unsigned long lp_advertising[2]; + unsigned long adv_old[2]; + unsigned long supported_eee[2]; + unsigned long advertising_eee[2]; + bool eee_enabled; + unsigned long host_interfaces[1]; + u32 eee_broken_modes; + struct list_head leds; + int irq; + void *priv; + struct phy_package_shared *shared; + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + struct delayed_work state_queue; + struct mutex lock; + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; + struct phylink *phylink; + struct net_device *attached_dev; + struct mii_timestamper *mii_ts; + struct pse_control *psec; + u8 mdix; + u8 mdix_ctrl; + int pma_extable; + unsigned int link_down_events; + void (*phy_link_change)(struct phy_device *, bool); + void (*adjust_link)(struct net_device *); +}; + +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + struct u64_stats_sync syncp; +}; + +struct mii_bus { + struct module *owner; + const char *name; + char id[61]; + void *priv; + int (*read)(struct mii_bus *, int, int); + int (*write)(struct mii_bus *, int, int, u16); + int (*read_c45)(struct mii_bus *, int, int, int); + int (*write_c45)(struct mii_bus *, int, int, int, u16); + int (*reset)(struct mii_bus *); + struct mdio_bus_stats stats[32]; + struct mutex mdio_lock; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED = 2, + MDIOBUS_UNREGISTERED = 3, + MDIOBUS_RELEASED = 4, + } state; + struct device dev; + struct mdio_device *mdio_map[32]; + u32 phy_mask; + u32 phy_ignore_ta_mask; + int irq[32]; + int reset_delay_us; + int reset_post_delay_us; + struct gpio_desc *reset_gpiod; + struct mutex shared_lock; + struct phy_package_shared *shared[32]; +}; + +struct phy_package_shared { + int addr; + refcount_t refcnt; + unsigned long flags; + size_t priv_size; + void *priv; +}; + +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; + +struct phy_tdr_config; + +struct phy_plca_cfg; + +struct phy_plca_status; + +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + const unsigned long *const features; + u32 flags; + const void *driver_data; + int (*soft_reset)(struct phy_device *); + int (*config_init)(struct phy_device *); + int (*probe)(struct phy_device *); + int (*get_features)(struct phy_device *); + int (*get_rate_matching)(struct phy_device *, phy_interface_t); + int (*suspend)(struct phy_device *); + int (*resume)(struct phy_device *); + int (*config_aneg)(struct phy_device *); + int (*aneg_done)(struct phy_device *); + int (*read_status)(struct phy_device *); + int (*config_intr)(struct phy_device *); + irqreturn_t (*handle_interrupt)(struct phy_device *); + void (*remove)(struct phy_device *); + int (*match_phy_device)(struct phy_device *); + int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*link_change_notify)(struct phy_device *); + int (*read_mmd)(struct phy_device *, int, u16); + int (*write_mmd)(struct phy_device *, int, u16, u16); + int (*read_page)(struct phy_device *); + int (*write_page)(struct phy_device *, int); + int (*module_info)(struct phy_device *, struct ethtool_modinfo *); + int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); + int (*cable_test_start)(struct phy_device *); + int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); + int (*cable_test_get_status)(struct phy_device *, bool *); + int (*get_sset_count)(struct phy_device *); + void (*get_strings)(struct phy_device *, u8 *); + void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); + int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); + int (*set_loopback)(struct phy_device *, bool); + int (*get_sqi)(struct phy_device *); + int (*get_sqi_max)(struct phy_device *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*led_brightness_set)(struct phy_device *, u8, enum led_brightness); + int (*led_blink_set)(struct phy_device *, u8, unsigned long *, unsigned long *); + int (*led_hw_is_supported)(struct phy_device *, u8, unsigned long); + int (*led_hw_control_set)(struct phy_device *, u8, unsigned long); + int (*led_hw_control_get)(struct phy_device *, u8, unsigned long *); +}; + +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; + +struct phy_plca_cfg { + int version; + int enabled; + int node_id; + int node_cnt; + int to_tmr; + int burst_cnt; + int burst_tmr; +}; + +struct phy_plca_status { + bool pst; +}; + +struct ib_device; + +struct devlink; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET = 0, + DEVLINK_PORT_TYPE_AUTO = 1, + DEVLINK_PORT_TYPE_ETH = 2, + DEVLINK_PORT_TYPE_IB = 3, +}; + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, + DEVLINK_PORT_FLAVOUR_CPU = 1, + DEVLINK_PORT_FLAVOUR_DSA = 2, + DEVLINK_PORT_FLAVOUR_PCI_PF = 3, + DEVLINK_PORT_FLAVOUR_PCI_VF = 4, + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, + DEVLINK_PORT_FLAVOUR_UNUSED = 6, + DEVLINK_PORT_FLAVOUR_PCI_SF = 7, +}; + +struct devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u32 controller; + u16 pf; + u8 external : 1; +}; + +struct devlink_port_pci_vf_attrs { + u32 controller; + u16 pf; + u16 vf; + u8 external : 1; +}; + +struct devlink_port_pci_sf_attrs { + u32 controller; + u32 sf; + u16 pf; + u8 external : 1; +}; + +struct devlink_port_attrs { + u8 split : 1; + u8 splittable : 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + struct devlink_port_pci_sf_attrs pci_sf; + }; +}; + +struct devlink_linecard; + +struct devlink_port_ops; + +struct devlink_rate; + +struct devlink_port { + struct list_head list; + struct list_head region_list; + struct devlink *devlink; + const struct devlink_port_ops *ops; + unsigned int index; + spinlock_t type_lock; + enum devlink_port_type type; + enum devlink_port_type desired_type; + union { + struct { + struct net_device *netdev; + int ifindex; + char ifname[16]; + } type_eth; + struct { + struct ib_device *ibdev; + } type_ib; + }; + struct devlink_port_attrs attrs; + u8 attrs_set : 1; + u8 switch_port : 1; + u8 registered : 1; + u8 initialized : 1; + struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct devlink_rate *devlink_rate; + struct devlink_linecard *linecard; + u32 rel_index; +}; + +enum devlink_port_fn_state { + DEVLINK_PORT_FN_STATE_INACTIVE = 0, + DEVLINK_PORT_FN_STATE_ACTIVE = 1, +}; + +enum devlink_port_fn_opstate { + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, +}; + +struct devlink_port_ops { + int (*port_split)(struct devlink *, struct devlink_port *, unsigned int, + struct netlink_ext_ack *); + int (*port_unsplit)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_type_set)(struct devlink_port *, enum devlink_port_type); + int (*port_del)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_get)(struct devlink_port *, u8 *, int *, + struct netlink_ext_ack *); + int (*port_fn_hw_addr_set)(struct devlink_port *, const u8 *, int, + struct netlink_ext_ack *); + int (*port_fn_roce_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_roce_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_migratable_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_migratable_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_state_get)(struct devlink_port *, enum devlink_port_fn_state *, + enum devlink_port_fn_opstate *, struct netlink_ext_ack *); + int (*port_fn_state_set)(struct devlink_port *, enum devlink_port_fn_state, + struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_get)(struct devlink_port *, bool *, + struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_get)(struct devlink_port *, bool *, + struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_set)(struct devlink_port *, bool, struct netlink_ext_ack *); +}; + +enum devlink_rate_type { + DEVLINK_RATE_TYPE_LEAF = 0, + DEVLINK_RATE_TYPE_NODE = 1, +}; + +struct devlink_rate { + struct list_head list; + enum devlink_rate_type type; + struct devlink *devlink; + void *priv; + u64 tx_share; + u64 tx_max; + struct devlink_rate *parent; + union { + struct devlink_port *devlink_port; + struct { + char *name; + refcount_t refcnt; + }; + }; + u32 tx_priority; + u32 tx_weight; +}; + +struct nh_info; + +struct nh_group; + +struct nexthop { + struct rb_node rb_node; + struct list_head fi_list; + struct list_head f6i_list; + struct list_head fdb_list; + struct list_head grp_list; + struct net *net; + u32 id; + u8 protocol; + u8 nh_flags; + bool is_group; + refcount_t refcnt; + struct callback_head rcu; + union { + struct nh_info __attribute__((btf_type_tag("rcu"))) * nh_info; + struct nh_group __attribute__((btf_type_tag("rcu"))) * nh_grp; + }; +}; + +struct fib_info; + +struct fib_nh { + struct fib_nh_common nh_common; + struct hlist_node nh_hash; + struct fib_info *nh_parent; + __be32 nh_saddr; + int nh_saddr_genid; +}; + +struct nh_info { + struct hlist_node dev_hash; + struct nexthop *nh_parent; + u8 family; + bool reject_nh; + bool fdb_nh; + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct fib_info { + struct hlist_node fib_hash; + struct hlist_node fib_lhash; + struct list_head nh_list; + struct net *fib_net; + refcount_t fib_treeref; + refcount_t fib_clntref; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_tb_id; + u32 fib_priority; + struct dst_metrics *fib_metrics; + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; + bool pfsrc_removed; + struct nexthop *nh; + struct callback_head rcu; + struct fib_nh fib_nh[0]; +}; + +struct nh_grp_entry { + struct nexthop *nh; + u8 weight; + union { + struct { + atomic_t upper_bound; + } hthr; + struct { + struct list_head uw_nh_entry; + u16 count_buckets; + u16 wants_buckets; + } res; + }; + struct list_head nh_list; + struct nexthop *nh_parent; +}; + +struct nh_res_table; + +struct nh_group { + struct nh_group *spare; + u16 num_nh; + bool is_multipath; + bool hash_threshold; + bool resilient; + bool fdb_nh; + bool has_v4; + struct nh_res_table __attribute__((btf_type_tag("rcu"))) * res_table; + struct nh_grp_entry nh_entries[0]; +}; + +struct nh_res_bucket { + struct nh_grp_entry __attribute__((btf_type_tag("rcu"))) * nh_entry; + atomic_long_t used_time; + unsigned long migrated_time; + bool occupied; + u8 nh_flags; +}; + +struct nh_res_table { + struct net *net; + u32 nhg_id; + struct delayed_work upkeep_dw; + struct list_head uw_nh_entries; + unsigned long unbalanced_since; + u32 idle_timer; + u32 unbalanced_timer; + u16 num_nh_buckets; + struct nh_res_bucket nh_buckets[0]; +}; + +typedef __u16 __sum16; + +struct iphdr { + __u8 ihl : 4; + __u8 version : 4; + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + union { + struct { + __be32 saddr; + __be32 daddr; + }; + struct { + __be32 saddr; + __be32 daddr; + } addrs; + }; +}; + +struct ip_tunnel_parm { + char name[16]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +struct in_ifaddr { + struct hlist_node hash; + struct in_ifaddr __attribute__((btf_type_tag("rcu"))) * ifa_next; + struct in_device *ifa_dev; + struct callback_head callback_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + unsigned char ifa_proto; + __u32 ifa_flags; + char ifa_label[16]; + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + unsigned long ifa_cstamp; + unsigned long ifa_tstamp; +}; + +struct ip_sf_list; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + unsigned long sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list __attribute__((btf_type_tag("rcu"))) * next_rcu; + }; + struct ip_mc_list __attribute__((btf_type_tag("rcu"))) * next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; + unsigned char crcount; + struct callback_head rcu; +}; + +struct rps_map; + +struct rps_dev_flow_table; + +struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; + struct rps_map __attribute__((btf_type_tag("rcu"))) * rps_map; + struct rps_dev_flow_table __attribute__((btf_type_tag("rcu"))) * rps_flow_table; + struct kobject kobj; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct xsk_buff_pool *pool; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[0]; +}; + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[0]; +}; + +struct tcf_walker { + int stop; + int skip; + int count; + bool nonempty; + unsigned long cookie; + int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); +}; + +struct tcf_exts { + int action; + int police; +}; + +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; + u8 hw_priv; +}; + +struct udp_tunnel_nic_shared { + struct udp_tunnel_nic *udp_tunnel_nic_info; + struct list_head devices; +}; + +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; + int flags; +}; + +struct ohci { + void *registers; +}; + +enum amd_chipset_gen { + NOT_AMD_CHIPSET = 0, + AMD_CHIPSET_SB600 = 1, + AMD_CHIPSET_SB700 = 2, + AMD_CHIPSET_SB800 = 3, + AMD_CHIPSET_HUDSON2 = 4, + AMD_CHIPSET_BOLTON = 5, + AMD_CHIPSET_YANGTZE = 6, + AMD_CHIPSET_TAISHAN = 7, + AMD_CHIPSET_UNKNOWN = 8, +}; + +struct amd_chipset_type { + enum amd_chipset_gen gen; + u8 rev; +}; + +struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; + int nb_type; + struct amd_chipset_type sb_type; + int isoc_reqs; + int probe_count; + bool need_pll_quirk; +}; + +struct ehci_caps { + u32 hc_capbase; + u32 hcs_params; + u32 hcc_params; + u8 portroute[8]; +}; + +struct ehci_regs { + u32 command; + u32 status; + u32 intr_enable; + u32 frame_index; + u32 segment; + u32 frame_list; + u32 async_next; + u32 reserved1[2]; + u32 txfill_tuning; + u32 reserved2[6]; + u32 configured_flag; + union { + u32 port_status[15]; + struct { + u32 reserved3[9]; + u32 usbmode; + }; + }; + union { + struct { + u32 reserved4; + u32 hostpc[15]; + }; + u32 brcm_insnreg[4]; + }; + u32 reserved5[2]; + u32 usbmode_ex; +}; + +struct ehci_dbg_port { + u32 control; + u32 pids; + u32 data03; + u32 data47; + u32 address; +}; + +struct ehci_dev { + u32 bus; + u32 slot; + u32 func; +}; + +typedef void (*set_debug_port_t)(int); + +struct usb_hcd; + +struct usb_debug_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDebugInEndpoint; + __u8 bDebugOutEndpoint; +}; + +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +}; + +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO = 1, + INPUT_CLK_BOOT = 2, + INPUT_CLK_MAX = 3, +}; + +union input_seq_state { + struct { + unsigned short pos; + bool mutex_acquired; + }; + void *p; +}; + +struct input_devres { + struct input_dev *input; +}; + +struct input_dev_poller { + void (*poll)(struct input_dev *); + unsigned int poll_interval; + unsigned int poll_interval_max; + unsigned int poll_interval_min; + struct input_dev *input; + struct delayed_work work; +}; + +struct ff_periodic_effect_compat { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + compat_uptr_t custom_data; +}; + +struct ff_effect_compat { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect_compat periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_event_compat { + compat_ulong_t sec; + compat_ulong_t usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct input_event { + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct input_mt_pos { + s16 x; + s16 y; +}; + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +struct ml_effect_state { + struct ff_effect *effect; + unsigned long flags; + int count; + unsigned long play_at; + unsigned long stop_at; + unsigned long adj_at; +}; + +struct ml_device { + void *private; + struct ml_effect_state states[16]; + int gain; + struct timer_list timer; + struct input_dev *dev; + int (*play_effect)(struct input_dev *, void *, struct ff_effect *); +}; + +struct input_led { + struct led_classdev cdev; + struct input_handle *handle; + unsigned int code; +}; + +struct led_init_data { + struct fwnode_handle *fwnode; + const char *default_label; + const char *devicename; + bool devname_mandatory; +}; + +struct input_leds { + struct input_handle handle; + unsigned int num_leds; + struct input_led leds[0]; +}; + +struct evdev; + +struct evdev_client { + unsigned int head; + unsigned int tail; + unsigned int packet_head; + spinlock_t buffer_lock; + wait_queue_head_t wait; + struct fasync_struct *fasync; + struct evdev *evdev; + struct list_head node; + struct callback_head rcu; + enum input_clock_type clk_type; + bool revoked; + unsigned long *evmasks[32]; + unsigned int bufsize; + struct input_event buffer[0]; +}; + +struct evdev { + int open; + struct input_handle handle; + struct evdev_client __attribute__((btf_type_tag("rcu"))) * grab; + struct list_head client_list; + spinlock_t client_lock; + struct mutex mutex; + struct device dev; + struct cdev cdev; + bool exist; +}; + +struct input_mask { + __u32 type; + __u32 codes_size; + __u64 codes_ptr; +}; + +enum uinput_state { + UIST_NEW_DEVICE = 0, + UIST_SETUP_COMPLETE = 1, + UIST_CREATED = 2, +}; + +struct uinput_request; + +struct uinput_device { + struct input_dev *dev; + struct mutex mutex; + enum uinput_state state; + wait_queue_head_t waitq; + unsigned char ready; + unsigned char head; + unsigned char tail; + struct input_event buff[16]; + unsigned int ff_effects_max; + struct uinput_request *requests[16]; + wait_queue_head_t requests_waitq; + spinlock_t requests_lock; +}; + +struct uinput_request { + unsigned int id; + unsigned int code; + int retval; + struct completion done; + union { + unsigned int effect_id; + struct { + struct ff_effect *effect; + struct ff_effect *old; + } upload; + } u; +}; + +struct uinput_user_dev { + char name[80]; + struct input_id id; + __u32 ff_effects_max; + __s32 absmax[64]; + __s32 absmin[64]; + __s32 absfuzz[64]; + __s32 absflat[64]; +}; + +struct uinput_ff_upload { + __u32 request_id; + __s32 retval; + struct ff_effect effect; + struct ff_effect old; +}; + +struct uinput_ff_erase { + __u32 request_id; + __s32 retval; + __u32 effect_id; +}; + +struct uinput_setup { + struct input_id id; + char name[80]; + __u32 ff_effects_max; +}; + +struct uinput_ff_upload_compat { + __u32 request_id; + __s32 retval; + struct ff_effect_compat effect; + struct ff_effect_compat old; +}; + +struct uinput_abs_setup { + __u16 code; + struct input_absinfo absinfo; +}; + +typedef void (*btf_trace_rtc_set_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_set_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_irq_set_freq)(void *, int, int); + +typedef void (*btf_trace_rtc_irq_set_state)(void *, int, int); + +typedef void (*btf_trace_rtc_alarm_irq_enable)(void *, unsigned int, int); + +typedef void (*btf_trace_rtc_set_offset)(void *, long, int); + +typedef void (*btf_trace_rtc_read_offset)(void *, long, int); + +typedef void (*btf_trace_rtc_timer_enqueue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_dequeue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_fired)(void *, struct rtc_timer *); + +enum { + none = 0, + day = 1, + month = 2, + year = 3, +}; + +struct trace_event_raw_rtc_time_alarm_class { + struct trace_entry ent; + time64_t secs; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_irq_set_freq { + struct trace_entry ent; + int freq; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_irq_set_state { + struct trace_entry ent; + int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_alarm_irq_enable { + struct trace_entry ent; + unsigned int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_offset_class { + struct trace_entry ent; + long offset; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_timer_class { + struct trace_entry ent; + struct rtc_timer *timer; + ktime_t expires; + ktime_t period; + char __data[0]; +}; + +struct trace_event_data_offsets_rtc_time_alarm_class {}; + +struct trace_event_data_offsets_rtc_irq_set_freq {}; + +struct trace_event_data_offsets_rtc_irq_set_state {}; + +struct trace_event_data_offsets_rtc_alarm_irq_enable {}; + +struct trace_event_data_offsets_rtc_offset_class {}; + +struct trace_event_data_offsets_rtc_timer_class {}; + +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM = 1, + NVMEM_TYPE_OTP = 2, + NVMEM_TYPE_BATTERY_BACKED = 3, + NVMEM_TYPE_FRAM = 4, +}; + +typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); + +struct nvmem_cell_info; + +struct nvmem_keepout; + +struct nvmem_layout; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + const struct nvmem_cell_info *cells; + int ncells; + bool add_legacy_fixed_of_cells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + enum nvmem_type type; + bool read_only; + bool root_only; + bool ignore_wp; + struct nvmem_layout *layout; + struct device_node *of_node; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + bool compat; + struct device *base_dev; +}; + +typedef int (*nvmem_cell_post_process_t)(void *, const char *, int, unsigned int, void *, + size_t); + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + size_t raw_len; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; + struct device_node *np; + nvmem_cell_post_process_t read_post_process; + void *priv; +}; + +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + +struct nvmem_device; + +struct nvmem_layout { + const char *name; + const struct of_device_id *of_match_table; + int (*add_cells)(struct device *, struct nvmem_device *, struct nvmem_layout *); + void (*fixup_cell_info)(struct nvmem_device *, struct nvmem_layout *, + struct nvmem_cell_info *); + struct module *owner; + struct list_head node; +}; + +struct mc146818_get_time_callback_param { + struct rtc_time *time; + unsigned char ctrl; + unsigned char century; +}; + +struct i2c_board_info { + char type[20]; + unsigned short flags; + unsigned short addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct software_node *swnode; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +struct i2c_adapter; + +struct i2c_msg; + +typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, + const struct i2c_msg *, int); + +struct regulator; + +struct i2c_algorithm; + +struct i2c_lock_operations; + +struct i2c_bus_recovery_info; + +struct i2c_adapter_quirks; + +struct i2c_adapter { + struct module *owner; + unsigned int class; + const struct i2c_algorithm *algo; + void *algo_data; + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + int timeout; + int retries; + struct device dev; + unsigned long locked_flags; + int nr; + char name[48]; + struct completion dev_released; + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; +}; + +union i2c_smbus_data; + +struct i2c_algorithm { + int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + int (*smbus_xfer)(struct i2c_adapter *, u16, unsigned short, char, u8, int, + union i2c_smbus_data *); + int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, unsigned short, char, u8, int, + union i2c_smbus_data *); + u32 (*functionality)(struct i2c_adapter *); +}; + +struct i2c_msg { + __u16 addr; + __u16 flags; + __u16 len; + __u8 *buf; +}; + +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[34]; +}; + +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int); + int (*trylock_bus)(struct i2c_adapter *, unsigned int); + void (*unlock_bus)(struct i2c_adapter *, unsigned int); +}; + +struct pinctrl; + +struct pinctrl_state; + +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int); + int (*get_sda)(struct i2c_adapter *); + void (*set_sda)(struct i2c_adapter *, int); + int (*get_bus_free)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; +}; + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; +}; + +typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, + const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, + const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT = 0, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, +}; + +struct i2c_client; + +struct i2c_device_id; + +struct i2c_driver { + unsigned int class; + int (*probe)(struct i2c_client *); + void (*remove)(struct i2c_client *); + void (*shutdown)(struct i2c_client *); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); + int (*command)(struct i2c_client *, unsigned int, void *); + struct device_driver driver; + const struct i2c_device_id *id_table; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const unsigned short *address_list; + struct list_head clients; + u32 flags; +}; + +struct i2c_client { + unsigned short flags; + unsigned short addr; + char name[20]; + struct i2c_adapter *adapter; + struct device dev; + int init_irq; + int irq; + struct list_head detected; + void *devres_group_id; +}; + +struct i2c_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct trace_event_raw_i2c_write { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_read { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + char __data[0]; +}; + +struct trace_event_raw_i2c_reply { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_result { + struct trace_entry ent; + int adapter_nr; + __u16 nr_msgs; + __s16 ret; + char __data[0]; +}; + +struct trace_event_data_offsets_i2c_write { + u32 buf; +}; + +struct trace_event_data_offsets_i2c_reply { + u32 buf; +}; + +struct trace_event_data_offsets_i2c_read {}; + +struct trace_event_data_offsets_i2c_result {}; + +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; + u32 digital_filter_width_ns; + u32 analog_filter_cutoff_freq_hz; +}; + +struct i2c_cmd_arg { + unsigned int cmd; + void *arg; +}; + +struct i2c_device_identity { + u16 manufacturer_id; + u16 part_id; + u8 die_revision; +}; + +typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, unsigned short, + char, u8, int, const union i2c_smbus_data *); + +typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, + unsigned short, char, u8, int); + +typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, unsigned short, + char, u8, int, const union i2c_smbus_data *, int); + +typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, + unsigned short, char, u8, int, int); + +struct trace_event_raw_smbus_write { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_read { + struct trace_entry ent; + int adapter_nr; + __u16 flags; + __u16 addr; + __u8 command; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_reply { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_result { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 read_write; + __u8 command; + __s16 res; + __u32 protocol; + char __data[0]; +}; + +struct trace_event_data_offsets_smbus_write {}; + +struct trace_event_data_offsets_smbus_read {}; + +struct trace_event_data_offsets_smbus_reply {}; + +struct trace_event_data_offsets_smbus_result {}; + +struct i2c_smbus_alert_setup { + int irq; +}; + +enum i2c_driver_flags { + I2C_DRV_ACPI_WAIVE_D0_PROBE = 1, +}; + +struct gsb_buffer { + u8 status; + u8 len; + union { + u16 wdata; + u8 bdata; + struct { + struct { + } __empty_data; + u8 data[0]; + }; + }; +}; + +struct i2c_acpi_irq_context { + int irq; + bool wake_capable; +}; + +struct i2c_acpi_lookup { + struct i2c_board_info *info; + acpi_handle adapter_handle; + acpi_handle device_handle; + acpi_handle search_handle; + int n; + int index; + u32 speed; + u32 min_speed; + u32 force_speed; +}; + +struct i2c_acpi_handler_data { + struct acpi_connection_info info; + struct i2c_adapter *adapter; +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM = 1, + POWER_SUPPLY_SCOPE_DEVICE = 2, +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED = 0, +}; + +struct psy_am_i_supplied_data { + struct power_supply *psy; + unsigned int count; +}; + +struct psy_get_supplier_prop_data { + struct power_supply *psy; + enum power_supply_property psp; + union power_supply_propval *val; +}; + +struct power_supply_attr { + const char *prop_name; + char attr_name[31]; + struct device_attribute dev_attr; + const char *const *text_values; + int text_values_len; +}; + +enum power_supply_charge_behaviour { + POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0, + POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE = 1, + POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE = 2, +}; + +struct hwmon_ops; + +struct hwmon_channel_info; + +struct hwmon_chip_info { + const struct hwmon_ops *ops; + const struct hwmon_channel_info *const *info; +}; + +enum hwmon_sensor_types { + hwmon_chip = 0, + hwmon_temp = 1, + hwmon_in = 2, + hwmon_curr = 3, + hwmon_power = 4, + hwmon_energy = 5, + hwmon_humidity = 6, + hwmon_fan = 7, + hwmon_pwm = 8, + hwmon_intrusion = 9, + hwmon_max = 10, +}; + +struct hwmon_ops { + umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); + int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long *); + int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); + int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long); +}; + +struct hwmon_channel_info { + enum hwmon_sensor_types type; + const u32 *config; +}; + +struct hwmon_type_attr_list { + const u32 *attrs; + size_t n_attrs; +}; + +enum hwmon_temp_attributes { + hwmon_temp_enable = 0, + hwmon_temp_input = 1, + hwmon_temp_type = 2, + hwmon_temp_lcrit = 3, + hwmon_temp_lcrit_hyst = 4, + hwmon_temp_min = 5, + hwmon_temp_min_hyst = 6, + hwmon_temp_max = 7, + hwmon_temp_max_hyst = 8, + hwmon_temp_crit = 9, + hwmon_temp_crit_hyst = 10, + hwmon_temp_emergency = 11, + hwmon_temp_emergency_hyst = 12, + hwmon_temp_alarm = 13, + hwmon_temp_lcrit_alarm = 14, + hwmon_temp_min_alarm = 15, + hwmon_temp_max_alarm = 16, + hwmon_temp_crit_alarm = 17, + hwmon_temp_emergency_alarm = 18, + hwmon_temp_fault = 19, + hwmon_temp_offset = 20, + hwmon_temp_label = 21, + hwmon_temp_lowest = 22, + hwmon_temp_highest = 23, + hwmon_temp_reset_history = 24, + hwmon_temp_rated_min = 25, + hwmon_temp_rated_max = 26, + hwmon_temp_beep = 27, +}; + +enum hwmon_in_attributes { + hwmon_in_enable = 0, + hwmon_in_input = 1, + hwmon_in_min = 2, + hwmon_in_max = 3, + hwmon_in_lcrit = 4, + hwmon_in_crit = 5, + hwmon_in_average = 6, + hwmon_in_lowest = 7, + hwmon_in_highest = 8, + hwmon_in_reset_history = 9, + hwmon_in_label = 10, + hwmon_in_alarm = 11, + hwmon_in_min_alarm = 12, + hwmon_in_max_alarm = 13, + hwmon_in_lcrit_alarm = 14, + hwmon_in_crit_alarm = 15, + hwmon_in_rated_min = 16, + hwmon_in_rated_max = 17, + hwmon_in_beep = 18, +}; + +enum hwmon_curr_attributes { + hwmon_curr_enable = 0, + hwmon_curr_input = 1, + hwmon_curr_min = 2, + hwmon_curr_max = 3, + hwmon_curr_lcrit = 4, + hwmon_curr_crit = 5, + hwmon_curr_average = 6, + hwmon_curr_lowest = 7, + hwmon_curr_highest = 8, + hwmon_curr_reset_history = 9, + hwmon_curr_label = 10, + hwmon_curr_alarm = 11, + hwmon_curr_min_alarm = 12, + hwmon_curr_max_alarm = 13, + hwmon_curr_lcrit_alarm = 14, + hwmon_curr_crit_alarm = 15, + hwmon_curr_rated_min = 16, + hwmon_curr_rated_max = 17, + hwmon_curr_beep = 18, +}; + +struct power_supply_hwmon { + struct power_supply *psy; + unsigned long *props; +}; + +typedef void (*btf_trace_hwmon_attr_show)(void *, int, const char *, long); + +typedef void (*btf_trace_hwmon_attr_store)(void *, int, const char *, long); + +typedef void (*btf_trace_hwmon_attr_show_string)(void *, int, const char *, const char *); + +enum hwmon_chip_attributes { + hwmon_chip_temp_reset_history = 0, + hwmon_chip_in_reset_history = 1, + hwmon_chip_curr_reset_history = 2, + hwmon_chip_power_reset_history = 3, + hwmon_chip_register_tz = 4, + hwmon_chip_update_interval = 5, + hwmon_chip_alarms = 6, + hwmon_chip_samples = 7, + hwmon_chip_curr_samples = 8, + hwmon_chip_in_samples = 9, + hwmon_chip_power_samples = 10, + hwmon_chip_temp_samples = 11, + hwmon_chip_beep_enable = 12, +}; + +enum hwmon_power_attributes { + hwmon_power_enable = 0, + hwmon_power_average = 1, + hwmon_power_average_interval = 2, + hwmon_power_average_interval_max = 3, + hwmon_power_average_interval_min = 4, + hwmon_power_average_highest = 5, + hwmon_power_average_lowest = 6, + hwmon_power_average_max = 7, + hwmon_power_average_min = 8, + hwmon_power_input = 9, + hwmon_power_input_highest = 10, + hwmon_power_input_lowest = 11, + hwmon_power_reset_history = 12, + hwmon_power_accuracy = 13, + hwmon_power_cap = 14, + hwmon_power_cap_hyst = 15, + hwmon_power_cap_max = 16, + hwmon_power_cap_min = 17, + hwmon_power_min = 18, + hwmon_power_max = 19, + hwmon_power_crit = 20, + hwmon_power_lcrit = 21, + hwmon_power_label = 22, + hwmon_power_alarm = 23, + hwmon_power_cap_alarm = 24, + hwmon_power_min_alarm = 25, + hwmon_power_max_alarm = 26, + hwmon_power_lcrit_alarm = 27, + hwmon_power_crit_alarm = 28, + hwmon_power_rated_min = 29, + hwmon_power_rated_max = 30, +}; + +enum hwmon_energy_attributes { + hwmon_energy_enable = 0, + hwmon_energy_input = 1, + hwmon_energy_label = 2, +}; + +enum hwmon_humidity_attributes { + hwmon_humidity_enable = 0, + hwmon_humidity_input = 1, + hwmon_humidity_label = 2, + hwmon_humidity_min = 3, + hwmon_humidity_min_hyst = 4, + hwmon_humidity_max = 5, + hwmon_humidity_max_hyst = 6, + hwmon_humidity_alarm = 7, + hwmon_humidity_fault = 8, + hwmon_humidity_rated_min = 9, + hwmon_humidity_rated_max = 10, +}; + +enum hwmon_fan_attributes { + hwmon_fan_enable = 0, + hwmon_fan_input = 1, + hwmon_fan_label = 2, + hwmon_fan_min = 3, + hwmon_fan_max = 4, + hwmon_fan_div = 5, + hwmon_fan_pulses = 6, + hwmon_fan_target = 7, + hwmon_fan_alarm = 8, + hwmon_fan_min_alarm = 9, + hwmon_fan_max_alarm = 10, + hwmon_fan_fault = 11, + hwmon_fan_beep = 12, +}; + +struct trace_event_raw_hwmon_attr_class { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + long val; + char __data[0]; +}; + +struct trace_event_raw_hwmon_attr_show_string { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + u32 __data_loc_label; + char __data[0]; +}; + +struct hwmon_device_attribute { + struct device_attribute dev_attr; + const struct hwmon_ops *ops; + enum hwmon_sensor_types type; + u32 attr; + int index; + char name[32]; +}; + +struct hwmon_device { + const char *name; + const char *label; + struct device dev; + const struct hwmon_chip_info *chip; + struct list_head tzdata; + struct attribute_group group; + const struct attribute_group **groups; +}; + +struct trace_event_data_offsets_hwmon_attr_class { + u32 attr_name; +}; + +struct trace_event_data_offsets_hwmon_attr_show_string { + u32 attr_name; + u32 label; +}; + +typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); + +typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, unsigned long); + +typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, + enum thermal_trip_type); + +struct thermal_instance { + int id; + char name[20]; + struct thermal_zone_device *tz; + struct thermal_cooling_device *cdev; + const struct thermal_trip *trip; + bool initialized; + unsigned long upper; + unsigned long lower; + unsigned long target; + char attr_name[20]; + struct device_attribute attr; + char weight_attr_name[20]; + struct device_attribute weight_attr; + struct list_head tz_node; + struct list_head cdev_node; + unsigned int weight; + bool upper_no_limit; +}; + +struct trace_event_raw_thermal_temperature { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int temp_prev; + int temp; + char __data[0]; +}; + +struct trace_event_raw_cdev_update { + struct trace_entry ent; + u32 __data_loc_type; + unsigned long target; + char __data[0]; +}; + +struct trace_event_raw_thermal_zone_trip { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int trip; + enum thermal_trip_type trip_type; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_temperature { + u32 thermal_zone; +}; + +struct trace_event_data_offsets_cdev_update { + u32 type; +}; + +struct trace_event_data_offsets_thermal_zone_trip { + u32 thermal_zone; +}; + +struct cooling_dev_stats { + spinlock_t lock; + unsigned int total_trans; + unsigned long state; + ktime_t last_time; + ktime_t *time_in_state; + unsigned int *trans_table; +}; + +struct thermal_hwmon_device { + char type[20]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; + struct thermal_hwmon_attr temp_crit; +}; + +struct mddev; + +struct md_rdev; + +struct md_cluster_operations { + int (*join)(struct mddev *, int); + int (*leave)(struct mddev *); + int (*slot_number)(struct mddev *); + int (*resync_info_update)(struct mddev *, sector_t, sector_t); + void (*resync_info_get)(struct mddev *, sector_t *, sector_t *); + int (*metadata_update_start)(struct mddev *); + int (*metadata_update_finish)(struct mddev *); + void (*metadata_update_cancel)(struct mddev *); + int (*resync_start)(struct mddev *); + int (*resync_finish)(struct mddev *); + int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); + int (*add_new_disk)(struct mddev *, struct md_rdev *); + void (*add_new_disk_cancel)(struct mddev *); + int (*new_disk_ack)(struct mddev *, bool); + int (*remove_disk)(struct mddev *, struct md_rdev *); + void (*load_bitmaps)(struct mddev *, int); + int (*gather_bitmaps)(struct md_rdev *); + int (*resize_bitmaps)(struct mddev *, sector_t, sector_t); + int (*lock_all_bitmaps)(struct mddev *); + void (*unlock_all_bitmaps)(struct mddev *); + void (*update_size)(struct mddev *, sector_t); +}; + +struct md_cluster_info; + +struct md_personality; + +struct md_thread; + +struct bitmap; + +struct mddev { + void *private; + struct md_personality *pers; + dev_t unit; + int md_minor; + struct list_head disks; + unsigned long flags; + unsigned long sb_flags; + int suspended; + struct mutex suspend_mutex; + struct percpu_ref active_io; + int ro; + int sysfs_active; + struct gendisk *gendisk; + struct kobject kobj; + int hold_active; + int major_version; + int minor_version; + int patch_version; + int persistent; + int external; + char metadata_type[17]; + int chunk_sectors; + time64_t ctime; + time64_t utime; + int level; + int layout; + char clevel[16]; + int raid_disks; + int max_disks; + sector_t dev_sectors; + sector_t array_sectors; + int external_size; + __u64 events; + int can_decrease_events; + char uuid[16]; + sector_t reshape_position; + int delta_disks; + int new_level; + int new_layout; + int new_chunk_sectors; + int reshape_backwards; + struct md_thread __attribute__((btf_type_tag("rcu"))) * thread; + struct md_thread __attribute__((btf_type_tag("rcu"))) * sync_thread; + char *last_sync_action; + sector_t curr_resync; + sector_t curr_resync_completed; + unsigned long resync_mark; + sector_t resync_mark_cnt; + sector_t curr_mark_cnt; + sector_t resync_max_sectors; + atomic64_t resync_mismatches; + sector_t suspend_lo; + sector_t suspend_hi; + int sync_speed_min; + int sync_speed_max; + int parallel_resync; + int ok_start_degraded; + unsigned long recovery; + int recovery_disabled; + int in_sync; + struct mutex open_mutex; + struct mutex reconfig_mutex; + atomic_t active; + atomic_t openers; + int changed; + int degraded; + atomic_t recovery_active; + wait_queue_head_t recovery_wait; + sector_t recovery_cp; + sector_t resync_min; + sector_t resync_max; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_action; + struct kernfs_node *sysfs_completed; + struct kernfs_node *sysfs_degraded; + struct kernfs_node *sysfs_level; + struct work_struct del_work; + struct work_struct sync_work; + spinlock_t lock; + wait_queue_head_t sb_wait; + atomic_t pending_writes; + unsigned int safemode; + unsigned int safemode_delay; + struct timer_list safemode_timer; + struct percpu_ref writes_pending; + int sync_checkers; + struct request_queue *queue; + struct bitmap *bitmap; + struct { + struct file *file; + loff_t offset; + unsigned long space; + loff_t default_offset; + unsigned long default_space; + struct mutex mutex; + unsigned long chunksize; + unsigned long daemon_sleep; + unsigned long max_write_behind; + int external; + int nodes; + char cluster_name[64]; + } bitmap_info; + atomic_t max_corr_read_errors; + struct list_head all_mddevs; + const struct attribute_group *to_remove; + struct bio_set bio_set; + struct bio_set sync_set; + struct bio_set io_clone_set; + struct bio *flush_bio; + atomic_t flush_pending; + ktime_t start_flush; + ktime_t prev_flush_start; + struct work_struct flush_work; + struct work_struct event_work; + mempool_t *serial_info_pool; + void (*sync_super)(struct mddev *, struct md_rdev *); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; + unsigned int noio_flag; + struct list_head deleting; + struct mutex sync_mutex; + atomic_t sync_seq; + bool has_superblocks : 1; + bool fail_last_dev : 1; + bool serialize_policy : 1; +}; + +struct md_personality { + char *name; + int level; + struct list_head list; + struct module *owner; + bool (*make_request)(struct mddev *, struct bio *); + int (*run)(struct mddev *); + int (*start)(struct mddev *); + void (*free)(struct mddev *, void *); + void (*status)(struct seq_file *, struct mddev *); + void (*error_handler)(struct mddev *, struct md_rdev *); + int (*hot_add_disk)(struct mddev *, struct md_rdev *); + int (*hot_remove_disk)(struct mddev *, struct md_rdev *); + int (*spare_active)(struct mddev *); + sector_t (*sync_request)(struct mddev *, sector_t, int *); + int (*resize)(struct mddev *, sector_t); + sector_t (*size)(struct mddev *, sector_t, int); + int (*check_reshape)(struct mddev *); + int (*start_reshape)(struct mddev *); + void (*finish_reshape)(struct mddev *); + void (*update_reshape_pos)(struct mddev *); + void (*quiesce)(struct mddev *, int); + void *(*takeover)(struct mddev *); + int (*change_consistency_policy)(struct mddev *, const char *); +}; + +struct serial_in_rdev; + +struct md_rdev { + struct list_head same_set; + sector_t sectors; + struct mddev *mddev; + int last_events; + struct block_device *meta_bdev; + struct block_device *bdev; + struct bdev_handle *bdev_handle; + struct page *sb_page; + struct page *bb_page; + int sb_loaded; + __u64 sb_events; + sector_t data_offset; + sector_t new_data_offset; + sector_t sb_start; + int sb_size; + int preferred_minor; + struct kobject kobj; + unsigned long flags; + wait_queue_head_t blocked_wait; + int desc_nr; + int raid_disk; + int new_raid_disk; + int saved_raid_disk; + union { + sector_t recovery_offset; + sector_t journal_tail; + }; + atomic_t nr_pending; + atomic_t read_errors; + time64_t last_read_error; + atomic_t corrected_errors; + struct serial_in_rdev *serial; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_unack_badblocks; + struct kernfs_node *sysfs_badblocks; + struct badblocks badblocks; + struct { + short offset; + unsigned int size; + sector_t sector; + } ppl; +}; + +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + +struct md_thread { + void (*run)(struct md_thread *); + struct mddev *mddev; + wait_queue_head_t wqueue; + unsigned long flags; + struct task_struct *tsk; + unsigned long timeout; + void *private; +}; + +struct bitmap_page; + +struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + unsigned long pages; + unsigned long missing_pages; + unsigned long chunkshift; + unsigned long chunks; +}; + +struct bitmap_storage { + struct file *file; + struct page *sb_page; + unsigned long sb_index; + struct page **filemap; + unsigned long *filemap_attr; + unsigned long file_pages; + unsigned long bytes; +}; + +struct bitmap { + struct bitmap_counts counts; + struct mddev *mddev; + __u64 events_cleared; + int need_sync; + struct bitmap_storage storage; + unsigned long flags; + int allclean; + atomic_t behind_writes; + unsigned long behind_writes_used; + unsigned long daemon_lastrun; + unsigned long last_end_sync; + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + struct kernfs_node *sysfs_can_clear; + int cluster_slot; +}; + +struct bitmap_page { + char *map; + unsigned int hijacked : 1; + unsigned int pending : 1; + unsigned int count : 30; +}; + +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct mddev *, char *); + ssize_t (*store)(struct mddev *, const char *, size_t); +}; + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct md_rdev *, char *); + ssize_t (*store)(struct md_rdev *, const char *, size_t); +}; + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(struct md_rdev *, struct md_rdev *, int); + int (*validate_super)(struct mddev *, struct md_rdev *, struct md_rdev *); + void (*sync_super)(struct mddev *, struct md_rdev *); + unsigned long long (*rdev_size_change)(struct md_rdev *, sector_t); + int (*allow_new_offset)(struct md_rdev *, unsigned long long); +}; + +enum flag_bits { + Faulty = 0, + In_sync = 1, + Bitmap_sync = 2, + WriteMostly = 3, + AutoDetected = 4, + Blocked = 5, + WriteErrorSeen = 6, + FaultRecorded = 7, + BlockedBadBlocks = 8, + WantReplacement = 9, + Replacement = 10, + Candidate = 11, + Journal = 12, + ClusterRemove = 13, + RemoveSynchronized = 14, + ExternalBbl = 15, + FailFast = 16, + LastDev = 17, + CollisionCheck = 18, +}; + +enum mddev_flags { + MD_ARRAY_FIRST_USE = 0, + MD_CLOSING = 1, + MD_JOURNAL_CLEAN = 2, + MD_HAS_JOURNAL = 3, + MD_CLUSTER_RESYNC_LOCKED = 4, + MD_FAILFAST_SUPPORTED = 5, + MD_HAS_PPL = 6, + MD_HAS_MULTIPLE_PPLS = 7, + MD_NOT_READY = 8, + MD_BROKEN = 9, + MD_DELETED = 10, +}; + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS = 0, + MD_SB_CHANGE_CLEAN = 1, + MD_SB_CHANGE_PENDING = 2, + MD_SB_NEED_REWRITE = 3, +}; + +enum recovery_flags { + MD_RECOVERY_RUNNING = 0, + MD_RECOVERY_SYNC = 1, + MD_RECOVERY_RECOVER = 2, + MD_RECOVERY_INTR = 3, + MD_RECOVERY_DONE = 4, + MD_RECOVERY_NEEDED = 5, + MD_RECOVERY_REQUESTED = 6, + MD_RECOVERY_CHECK = 7, + MD_RECOVERY_RESHAPE = 8, + MD_RECOVERY_FROZEN = 9, + MD_RECOVERY_ERROR = 10, + MD_RECOVERY_WAIT = 11, + MD_RESYNCING_REMOTE = 12, +}; + +enum md_ro_state { + MD_RDWR = 0, + MD_RDONLY = 1, + MD_AUTO_READ = 2, + MD_MAX_STATE = 3, +}; + +enum { + MD_RESYNC_NONE = 0, + MD_RESYNC_YIELDED = 1, + MD_RESYNC_DELAYED = 2, + MD_RESYNC_ACTIVE = 3, +}; + +enum array_state { + clear = 0, + inactive = 1, + suspended = 2, + readonly = 3, + read_auto = 4, + clean = 5, + active = 6, + write_pending = 7, + active_idle = 8, + broken = 9, + bad_word = 10, +}; + +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +struct md_io_clone { + struct mddev *mddev; + struct bio *orig_bio; + unsigned long start_time; + struct bio bio_clone; +}; + +struct mdp_superblock_1 { + __le32 magic; + __le32 major_version; + __le32 feature_map; + __le32 pad0; + __u8 set_uuid[16]; + char set_name[32]; + __le64 ctime; + __le32 level; + __le32 layout; + __le64 size; + __le32 chunksize; + __le32 raid_disks; + union { + __le32 bitmap_offset; + struct { + __le16 offset; + __le16 size; + } ppl; + }; + __le32 new_level; + __le64 reshape_position; + __le32 delta_disks; + __le32 new_layout; + __le32 new_chunk; + __le32 new_offset; + __le64 data_offset; + __le64 data_size; + __le64 super_offset; + union { + __le64 recovery_offset; + __le64 journal_tail; + }; + __le32 dev_number; + __le32 cnt_corrected_read; + __u8 device_uuid[16]; + __u8 devflags; + __u8 bblog_shift; + __le16 bblog_size; + __le32 bblog_offset; + __le64 utime; + __le64 events; + __le64 resync_offset; + __le32 sb_csum; + __le32 max_dev; + __u8 pad3[32]; + __le16 dev_roles[0]; +}; + +struct mdu_version_s { + int major; + int minor; + int patchlevel; +}; + +typedef struct mdu_version_s mdu_version_t; + +struct mdu_array_info_s { + int major_version; + int minor_version; + int patch_version; + unsigned int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + unsigned int utime; + int state; + int active_disks; + int working_disks; + int failed_disks; + int spare_disks; + int layout; + int chunk_size; +}; + +typedef struct mdu_array_info_s mdu_array_info_t; + +struct mdu_bitmap_file_s { + char pathname[4096]; +}; + +typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; + +struct mdu_disk_info_s { + int number; + int major; + int minor; + int raid_disk; + int state; +}; + +typedef struct mdu_disk_info_s mdu_disk_info_t; + +struct mdp_device_descriptor_s { + __u32 number; + __u32 major; + __u32 minor; + __u32 raid_disk; + __u32 state; + __u32 reserved[27]; +}; + +typedef struct mdp_device_descriptor_s mdp_disk_t; + +struct mdp_superblock_s { + __u32 md_magic; + __u32 major_version; + __u32 minor_version; + __u32 patch_version; + __u32 gvalid_words; + __u32 set_uuid0; + __u32 ctime; + __u32 level; + __u32 size; + __u32 nr_disks; + __u32 raid_disks; + __u32 md_minor; + __u32 not_persistent; + __u32 set_uuid1; + __u32 set_uuid2; + __u32 set_uuid3; + __u32 gstate_creserved[16]; + __u32 utime; + __u32 state; + __u32 active_disks; + __u32 working_disks; + __u32 failed_disks; + __u32 spare_disks; + __u32 sb_csum; + __u32 events_lo; + __u32 events_hi; + __u32 cp_events_lo; + __u32 cp_events_hi; + __u32 recovery_cp; + __u64 reshape_position; + __u32 new_level; + __u32 delta_disks; + __u32 new_layout; + __u32 new_chunk; + __u32 gstate_sreserved[14]; + __u32 layout; + __u32 chunk_size; + __u32 root_pv; + __u32 root_block; + __u32 pstate_reserved[60]; + mdp_disk_t disks[27]; + __u32 reserved[0]; + mdp_disk_t this_disk; +}; + +typedef struct mdp_superblock_s mdp_super_t; + +enum bitmap_state { + BITMAP_STALE = 1, + BITMAP_WRITE_ERROR = 2, + BITMAP_HOSTENDIAN = 15, +}; + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, + BITMAP_PAGE_PENDING = 1, + BITMAP_PAGE_NEEDWRITE = 2, +}; + +typedef __u16 bitmap_counter_t; + +struct bitmap_unplug_work { + struct work_struct work; + struct bitmap *bitmap; + struct completion *done; +}; + +struct bitmap_super_s { + __le32 magic; + __le32 version; + __u8 uuid[16]; + __le64 events; + __le64 events_cleared; + __le64 sync_size; + __le32 state; + __le32 chunksize; + __le32 daemon_sleep; + __le32 write_behind; + __le32 sectors_reserved; + __le32 nodes; + __u8 cluster_name[64]; + __u8 pad[120]; +}; + +typedef struct bitmap_super_s bitmap_super_t; + +struct md_setup_args { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +}; + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +struct cpufreq_policy_data; + +struct freq_attr; + +struct cpufreq_driver { + char name[16]; + u16 flags; + void *driver_data; + int (*init)(struct cpufreq_policy *); + int (*verify)(struct cpufreq_policy_data *); + int (*setpolicy)(struct cpufreq_policy *); + int (*target)(struct cpufreq_policy *, unsigned int, unsigned int); + int (*target_index)(struct cpufreq_policy *, unsigned int); + unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int); + void (*adjust_perf)(unsigned int, unsigned long, unsigned long, unsigned long); + unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int); + int (*target_intermediate)(struct cpufreq_policy *, unsigned int); + unsigned int (*get)(unsigned int); + void (*update_limits)(unsigned int); + int (*bios_limit)(int, unsigned int *); + int (*online)(struct cpufreq_policy *); + int (*offline)(struct cpufreq_policy *); + int (*exit)(struct cpufreq_policy *); + int (*suspend)(struct cpufreq_policy *); + int (*resume)(struct cpufreq_policy *); + void (*ready)(struct cpufreq_policy *); + struct freq_attr **attr; + bool boost_enabled; + int (*set_boost)(struct cpufreq_policy *, int); + void (*register_em)(struct cpufreq_policy *); +}; + +struct cpufreq_policy_data { + struct cpufreq_cpuinfo cpuinfo; + struct cpufreq_frequency_table *freq_table; + unsigned int cpu; + unsigned int min; + unsigned int max; +}; + +struct freq_attr { + struct attribute attr; + ssize_t (*show)(struct cpufreq_policy *, char *); + ssize_t (*store)(struct cpufreq_policy *, const char *, size_t); +}; + +struct cpufreq_stats { + unsigned int total_trans; + unsigned long long last_time; + unsigned int max_state; + unsigned int state_num; + unsigned int last_index; + u64 *time_in_state; + unsigned int *freq_table; + unsigned int *trans_table; + unsigned int reset_pending; + unsigned long long reset_time; +}; + +struct userspace_policy { + unsigned int is_managed; + unsigned int setspeed; + struct mutex mutex; +}; + +struct od_ops { + unsigned int (*powersave_bias_target)(struct cpufreq_policy *, unsigned int, + unsigned int); +}; + +struct dbs_data; + +struct policy_dbs_info; + +struct dbs_governor { + struct cpufreq_governor gov; + struct kobj_type kobj_type; + struct dbs_data *gdbs_data; + unsigned int (*gov_dbs_update)(struct cpufreq_policy *); + struct policy_dbs_info *(*alloc)(); + void (*free)(struct policy_dbs_info *); + int (*init)(struct dbs_data *); + void (*exit)(struct dbs_data *); + void (*start)(struct cpufreq_policy *); +}; + +struct dbs_data { + struct gov_attr_set attr_set; + struct dbs_governor *gov; + void *tuners; + unsigned int ignore_nice_load; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int io_is_busy; +}; + +struct policy_dbs_info { + struct cpufreq_policy *policy; + struct mutex update_mutex; + u64 last_sample_time; + s64 sample_delay_ns; + atomic_t work_count; + struct irq_work irq_work; + struct work_struct work; + struct dbs_data *dbs_data; + struct list_head list; + unsigned int rate_mult; + unsigned int idle_periods; + bool is_shared; + bool work_in_progress; +}; + +enum { + OD_NORMAL_SAMPLE = 0, + OD_SUB_SAMPLE = 1, +}; + +struct od_policy_dbs_info { + struct policy_dbs_info policy_dbs; + unsigned int freq_lo; + unsigned int freq_lo_delay_us; + unsigned int freq_hi_delay_us; + unsigned int sample_type : 1; +}; + +struct od_dbs_tuners { + unsigned int powersave_bias; +}; + +struct cpu_dbs_info { + u64 prev_cpu_idle; + u64 prev_update_time; + u64 prev_cpu_nice; + unsigned int prev_load; + struct update_util_data update_util; + struct policy_dbs_info *policy_dbs; +}; + +enum { + UNDEFINED_CAPABLE = 0, + SYSTEM_INTEL_MSR_CAPABLE = 1, + SYSTEM_AMD_MSR_CAPABLE = 2, + SYSTEM_IO_CAPABLE = 3, +}; + +struct acpi_cpufreq_data { + unsigned int resume; + unsigned int cpu_feature; + unsigned int acpi_perf_cpu; + cpumask_var_t freqdomain_cpus; + void (*cpu_freq_write)(struct acpi_pct_register *, u32); + u32 (*cpu_freq_read)(struct acpi_pct_register *); +}; + +struct drv_cmd { + struct acpi_pct_register *reg; + u32 val; + union { + void (*write)(struct acpi_pct_register *, u32); + u32 (*read)(struct acpi_pct_register *); + } func; +}; + +typedef int (*cppc_mode_transition_fn)(int); + +enum amd_pstate_mode { + AMD_PSTATE_UNDEFINED = 0, + AMD_PSTATE_DISABLE = 1, + AMD_PSTATE_PASSIVE = 2, + AMD_PSTATE_ACTIVE = 3, + AMD_PSTATE_GUIDED = 4, + AMD_PSTATE_MAX = 5, +}; + +enum acpi_preferred_pm_profiles { + PM_UNSPECIFIED = 0, + PM_DESKTOP = 1, + PM_MOBILE = 2, + PM_WORKSTATION = 3, + PM_ENTERPRISE_SERVER = 4, + PM_SOHO_SERVER = 5, + PM_APPLIANCE_PC = 6, + PM_PERFORMANCE_SERVER = 7, + PM_TABLET = 8, + NR_PM_PROFILES = 9, +}; + +enum energy_perf_value_index { + EPP_INDEX_DEFAULT = 0, + EPP_INDEX_PERFORMANCE = 1, + EPP_INDEX_BALANCE_PERFORMANCE = 2, + EPP_INDEX_BALANCE_POWERSAVE = 3, + EPP_INDEX_POWERSAVE = 4, +}; + +struct amd_aperf_mperf { + u64 aperf; + u64 mperf; + u64 tsc; +}; + +struct amd_cpudata { + int cpu; + struct freq_qos_request req[2]; + u64 cppc_req_cached; + u32 highest_perf; + u32 nominal_perf; + u32 lowest_nonlinear_perf; + u32 lowest_perf; + u32 min_limit_perf; + u32 max_limit_perf; + u32 min_limit_freq; + u32 max_limit_freq; + u32 max_freq; + u32 min_freq; + u32 nominal_freq; + u32 lowest_nonlinear_freq; + struct amd_aperf_mperf cur; + struct amd_aperf_mperf prev; + u64 freq; + bool boost_supported; + s16 epp_policy; + s16 epp_cached; + u32 policy; + u64 cppc_cap1_cached; + bool suspended; +}; + +typedef void (*btf_trace_amd_pstate_perf)(void *, unsigned long, unsigned long, unsigned long, + u64, u64, u64, u64, unsigned int, bool, bool); + +struct trace_event_raw_amd_pstate_perf { + struct trace_entry ent; + unsigned long min_perf; + unsigned long target_perf; + unsigned long capacity; + unsigned long long freq; + unsigned long long mperf; + unsigned long long aperf; + unsigned long long tsc; + unsigned int cpu_id; + bool changed; + bool fast_switch; + char __data[0]; +}; + +struct trace_event_data_offsets_amd_pstate_perf {}; + +struct pstate_data { + int current_pstate; + int min_pstate; + int max_pstate; + int max_pstate_physical; + int perf_ctl_scaling; + int scaling; + int turbo_pstate; + unsigned int min_freq; + unsigned int max_freq; + unsigned int turbo_freq; +}; + +struct vid_data { + int min; + int max; + int turbo; + int32_t ratio; +}; + +struct sample { + int32_t core_avg_perf; + int32_t busy_scaled; + u64 aperf; + u64 mperf; + u64 tsc; + u64 time; +}; + +struct cpudata { + int cpu; + unsigned int policy; + struct update_util_data update_util; + bool update_util_set; + struct pstate_data pstate; + struct vid_data vid; + u64 last_update; + u64 last_sample_time; + u64 aperf_mperf_shift; + u64 prev_aperf; + u64 prev_mperf; + u64 prev_tsc; + u64 prev_cummulative_iowait; + struct sample sample; + int32_t min_perf_ratio; + int32_t max_perf_ratio; + struct acpi_processor_performance acpi_perf_data; + bool valid_pss_table; + unsigned int iowait_boost; + s16 epp_powersave; + s16 epp_policy; + s16 epp_default; + s16 epp_cached; + u64 hwp_req_cached; + u64 hwp_cap_cached; + u64 last_io_update; + unsigned int sched_flags; + u32 hwp_boost_min; + bool suspended; + struct delayed_work hwp_notify_work; +}; + +struct pstate_funcs { + int (*get_max)(int); + int (*get_max_physical)(int); + int (*get_min)(int); + int (*get_turbo)(int); + int (*get_scaling)(); + int (*get_cpu_scaling)(int); + int (*get_aperf_mperf_shift)(); + u64 (*get_val)(struct cpudata *, int); + void (*get_vid)(struct cpudata *); +}; + +struct global_params { + bool no_turbo; + bool turbo_disabled; + bool turbo_disabled_mf; + int max_perf_pct; + int min_perf_pct; +}; + +enum { + PSS = 0, + PPC = 1, +}; + +struct cpuidle_governor { + char name[16]; + struct list_head governor_list; + unsigned int rating; + int (*enable)(struct cpuidle_driver *, struct cpuidle_device *); + void (*disable)(struct cpuidle_driver *, struct cpuidle_device *); + int (*select)(struct cpuidle_driver *, struct cpuidle_device *, bool *); + void (*reflect)(struct cpuidle_device *, int); +}; + +struct cpuidle_state_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *); + ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, + const char *, size_t); +}; + +struct cpuidle_state_kobj { + struct cpuidle_state *state; + struct cpuidle_state_usage *state_usage; + struct completion kobj_unregister; + struct kobject kobj; + struct cpuidle_device *device; +}; + +struct cpuidle_device_kobj { + struct cpuidle_device *dev; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_device *, char *); + ssize_t (*store)(struct cpuidle_device *, const char *, size_t); +}; + +struct menu_device { + int needs_update; + int tick_wakeup; + u64 next_timer_ns; + unsigned int bucket; + unsigned int correction_factor[12]; + unsigned int intervals[8]; + int interval_ptr; +}; + +enum led_default_state { + LEDS_DEFSTATE_OFF = 0, + LEDS_DEFSTATE_ON = 1, + LEDS_DEFSTATE_KEEP = 2, +}; + +struct led_properties { + u32 color; + bool color_present; + const char *function; + u32 func_enum; + bool func_enum_present; + const char *label; +}; + +struct led_lookup_data { + struct list_head list; + const char *provider; + const char *dev_id; + const char *con_id; +}; + +struct dmi_memdev_info { + const char *device; + const char *bank; + u64 size; + u16 handle; + u8 type; +}; + +struct dmi_device_attribute { + struct device_attribute dev_attr; + int field; +}; + +struct mafield { + const char *prefix; + int field; +}; + +struct firmware_map_entry; + +struct memmap_attribute { + struct attribute attr; + ssize_t (*show)(struct firmware_map_entry *, char *); +}; + +struct firmware_map_entry { + u64 start; + u64 end; + const char *type; + struct list_head list; + struct kobject kobj; +}; + +struct simplefb_platform_data { + u32 width; + u32 height; + u32 stride; + const char *format; +}; + +struct acpi_table_bgrt { + struct acpi_table_header header; + u16 version; + u8 status; + u8 image_type; + u64 image_address; + u32 image_offset_x; + u32 image_offset_y; +}; + +struct bmp_header { + u16 id; + u32 size; +} __attribute__((packed)); + +typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *); + +typedef efi_status_t efi_set_time_t(efi_time_t *); + +typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *); + +typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *); + +typedef efi_status_t +efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, unsigned long *, void *); + +typedef efi_status_t efi_get_next_variable_t(unsigned long *, efi_char16_t *, efi_guid_t *); + +typedef efi_status_t +efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, unsigned long, void *); + +typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *); + +typedef efi_status_t +efi_update_capsule_t(efi_capsule_header_t **, unsigned long, unsigned long); + +typedef efi_status_t +efi_query_capsule_caps_t(efi_capsule_header_t **, unsigned long, u64 *, int *); + +typedef efi_status_t efi_get_next_high_mono_count_t(u32 *); + +typedef void efi_reset_system_t(int, efi_status_t, unsigned long, efi_char16_t *); + +struct efi { + const efi_runtime_services_t *runtime; + unsigned int runtime_version; + unsigned int runtime_supported_mask; + unsigned long acpi; + unsigned long acpi20; + unsigned long smbios; + unsigned long smbios3; + unsigned long esrt; + unsigned long tpm_log; + unsigned long tpm_final_log; + unsigned long mokvar_table; + unsigned long coco_secret; + unsigned long unaccepted; + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_info_t *query_variable_info; + efi_query_variable_info_t *query_variable_info_nonblocking; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + struct efi_memory_map memmap; + unsigned long flags; +}; + +struct linux_efi_memreserve { + int size; + atomic_t count; + phys_addr_t next; + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; +}; + +typedef efi_status_t efi_query_variable_store_t(u32, unsigned long, bool); + +struct efivar_operations { + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_store_t *query_variable_store; + efi_query_variable_info_t *query_variable_info; +}; + +struct efivars { + struct kset *kset; + const struct efivar_operations *ops; +}; + +typedef struct { + u16 version; + u16 length; + u32 runtime_services_supported; +} efi_rt_properties_table_t; + +struct linux_efi_initrd { + unsigned long base; + unsigned long size; +}; + +struct efi_unaccepted_memory { + u32 version; + u32 unit_size; + u64 phys_base; + u64 size; + unsigned long bitmap[0]; +}; + +struct linux_efi_random_seed { + u32 size; + u8 bits[0]; +}; + +typedef struct { + u32 version; + u32 num_entries; + u32 desc_size; + u32 flags; + efi_memory_desc_t entry[0]; +} efi_memory_attributes_table_t; + +enum tcpa_event_types { + PREBOOT = 0, + POST_CODE = 1, + UNUSED = 2, + NO_ACTION = 3, + SEPARATOR = 4, + ACTION = 5, + EVENT_TAG = 6, + SCRTM_CONTENTS = 7, + SCRTM_VERSION = 8, + CPU_MICROCODE = 9, + PLATFORM_CONFIG_FLAGS = 10, + TABLE_OF_DEVICES = 11, + COMPACT_HASH = 12, + IPL = 13, + IPL_PARTITION_DATA = 14, + NONHOST_CODE = 15, + NONHOST_CONFIG = 16, + NONHOST_INFO = 17, +}; + +struct tpm_digest { + u16 alg_id; + u8 digest[64]; +}; + +struct tcg_pcr_event2_head { + u32 pcr_idx; + u32 event_type; + u32 count; + struct tpm_digest digests[0]; +}; + +struct tcg_efi_specid_event_algs { + u16 alg_id; + u16 digest_size; +}; + +struct tcg_efi_specid_event_head { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintnsize; + u32 num_algs; + struct tcg_efi_specid_event_algs digest_sizes[0]; +}; + +struct tcg_event_field { + u32 event_size; + u8 event[0]; +}; + +struct tcg_pcr_event { + u32 pcr_idx; + u32 event_type; + u8 digest[20]; + u32 event_size; + u8 event[0]; +}; + +struct linux_efi_tpm_eventlog { + u32 size; + u32 final_events_preboot_size; + u8 version; + u8 log[0]; +}; + +struct efi_tcg2_final_events_table { + u64 version; + u64 nr_events; + u8 events[0]; +}; + +struct efi_system_resource_table { + u32 fw_resource_count; + u32 fw_resource_count_max; + u64 fw_resource_version; + u8 entries[0]; +}; + +struct esre_entry; + +struct esre_attribute { + struct attribute attr; + ssize_t (*show)(struct esre_entry *, char *); + ssize_t (*store)(struct esre_entry *, const char *, size_t); +}; + +struct efi_system_resource_entry_v1; + +struct esre_entry { + union { + struct efi_system_resource_entry_v1 *esre1; + } esre; + struct kobject kobj; + struct list_head list; +}; + +struct efi_system_resource_entry_v1 { + efi_guid_t fw_class; + u32 fw_type; + u32 fw_version; + u32 lowest_supported_fw_version; + u32 capsule_flags; + u32 last_attempt_version; + u32 last_attempt_status; +}; + +struct cper_mem_err_compact { + u64 validation_bits; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_pos; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u16 rank; + u16 mem_array_handle; + u16 mem_dev_handle; + u8 extended; +} __attribute__((packed)); + +struct cper_sec_proc_generic { + u64 validation_bits; + u8 proc_type; + u8 proc_isa; + u8 proc_error_type; + u8 operation; + u8 flags; + u8 level; + u16 reserved; + u64 cpu_version; + char cpu_brand[128]; + u64 proc_id; + u64 target_addr; + u64 requestor_id; + u64 responder_id; + u64 ip; +}; + +struct cper_sec_proc_ia { + u64 validation_bits; + u64 lapic_id; + u8 cpuid[48]; +}; + +struct cper_sec_fw_err_rec_ref { + u8 record_type; + u8 revision; + u8 reserved[6]; + u64 record_identifier; + guid_t record_identifier_guid; +}; + +struct cper_sec_prot_err { + u64 valid_bits; + u8 agent_type; + u8 reserved[7]; + union { + u64 rcrb_base_addr; + struct { + u8 function; + u8 device; + u8 bus; + u16 segment; + u8 reserved_1[3]; + } __attribute__((packed)); + } agent_addr; + struct { + u16 vendor_id; + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_id; + u8 class_code[2]; + u16 slot; + u8 reserved_1[4]; + } device_id; + struct { + u32 lower_dw; + u32 upper_dw; + } dev_serial_num; + u8 capability[60]; + u16 dvsec_len; + u16 err_len; + u8 reserved_2[4]; +} __attribute__((packed)); + +enum { + RCD = 0, + RCH_DP = 1, + DEVICE = 2, + LD = 3, + FMLD = 4, + RP = 5, + DSP = 6, + USP = 7, +}; + +struct cxl_ras_capability_regs { + u32 uncor_status; + u32 uncor_mask; + u32 uncor_severity; + u32 cor_status; + u32 cor_mask; + u32 cap_control; + u32 header_log[16]; +}; + +union efi_rts_args; + +struct efi_runtime_work { + union efi_rts_args *args; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; + const void *caller; +}; + +union efi_rts_args { + struct { + efi_time_t *time; + efi_time_cap_t *capabilities; + } GET_TIME; + struct { + efi_time_t *time; + } SET_TIME; + struct { + efi_bool_t *enabled; + efi_bool_t *pending; + efi_time_t *time; + } GET_WAKEUP_TIME; + struct { + efi_bool_t enable; + efi_time_t *time; + } SET_WAKEUP_TIME; + struct { + efi_char16_t *name; + efi_guid_t *vendor; + u32 *attr; + unsigned long *data_size; + void *data; + } GET_VARIABLE; + struct { + unsigned long *name_size; + efi_char16_t *name; + efi_guid_t *vendor; + } GET_NEXT_VARIABLE; + struct { + efi_char16_t *name; + efi_guid_t *vendor; + u32 attr; + unsigned long data_size; + void *data; + } SET_VARIABLE; + struct { + u32 attr; + u64 *storage_space; + u64 *remaining_space; + u64 *max_variable_size; + } QUERY_VARIABLE_INFO; + struct { + u32 *high_count; + } GET_NEXT_HIGH_MONO_COUNT; + struct { + efi_capsule_header_t **capsules; + unsigned long count; + unsigned long sg_list; + } UPDATE_CAPSULE; + struct { + efi_capsule_header_t **capsules; + unsigned long count; + u64 *max_size; + int *reset_type; + } QUERY_CAPSULE_CAPS; + struct { + efi_status_t (*acpi_prm_handler)(u64, void *); + u64 param_buffer_addr; + void *context; + } ACPI_PRM_HANDLER; +}; + +struct efifb_dmi_info { + char *optname; + unsigned long base; + int stride; + int width; + int height; + int flags; +}; + +enum { + M_I17 = 0, + M_I20 = 1, + M_I20_SR = 2, + M_I24 = 3, + M_I24_8_1 = 4, + M_I24_10_1 = 5, + M_I27_11_1 = 6, + M_MINI = 7, + M_MINI_3_1 = 8, + M_MINI_4_1 = 9, + M_MB = 10, + M_MB_2 = 11, + M_MB_3 = 12, + M_MB_5_1 = 13, + M_MB_6_1 = 14, + M_MB_7_1 = 15, + M_MB_SR = 16, + M_MBA = 17, + M_MBA_3 = 18, + M_MBP = 19, + M_MBP_2 = 20, + M_MBP_2_2 = 21, + M_MBP_SR = 22, + M_MBP_4 = 23, + M_MBP_5_1 = 24, + M_MBP_5_2 = 25, + M_MBP_5_3 = 26, + M_MBP_6_1 = 27, + M_MBP_6_2 = 28, + M_MBP_7_1 = 29, + M_MBP_8_2 = 30, + M_UNKNOWN = 31, +}; + +enum { + OVERRIDE_NONE = 0, + OVERRIDE_BASE = 1, + OVERRIDE_STRIDE = 2, + OVERRIDE_HEIGHT = 4, + OVERRIDE_WIDTH = 8, +}; + +enum err_types { + ERR_TYPE_CACHE = 0, + ERR_TYPE_TLB = 1, + ERR_TYPE_BUS = 2, + ERR_TYPE_MS = 3, + N_ERR_TYPES = 4, +}; + +struct cper_ia_err_info { + guid_t err_type; + u64 validation_bits; + u64 check_info; + u64 target_id; + u64 requestor_id; + u64 responder_id; + u64 ip; +}; + +struct accept_range { + struct list_head list; + unsigned long start; + unsigned long end; +}; + +enum ccp_memtype { + CCP_MEMTYPE_SYSTEM = 0, + CCP_MEMTYPE_SB = 1, + CCP_MEMTYPE_LOCAL = 2, + CCP_MEMTYPE__LAST = 3, +}; + +enum ccp_aes_type { + CCP_AES_TYPE_128 = 0, + CCP_AES_TYPE_192 = 1, + CCP_AES_TYPE_256 = 2, + CCP_AES_TYPE__LAST = 3, +}; + +enum ccp_aes_mode { + CCP_AES_MODE_ECB = 0, + CCP_AES_MODE_CBC = 1, + CCP_AES_MODE_OFB = 2, + CCP_AES_MODE_CFB = 3, + CCP_AES_MODE_CTR = 4, + CCP_AES_MODE_CMAC = 5, + CCP_AES_MODE_GHASH = 6, + CCP_AES_MODE_GCTR = 7, + CCP_AES_MODE_GCM = 8, + CCP_AES_MODE_GMAC = 9, + CCP_AES_MODE__LAST = 10, +}; + +enum ccp_aes_action { + CCP_AES_ACTION_DECRYPT = 0, + CCP_AES_ACTION_ENCRYPT = 1, + CCP_AES_ACTION__LAST = 2, +}; + +enum ccp_xts_aes_unit_size { + CCP_XTS_AES_UNIT_SIZE_16 = 0, + CCP_XTS_AES_UNIT_SIZE_512 = 1, + CCP_XTS_AES_UNIT_SIZE_1024 = 2, + CCP_XTS_AES_UNIT_SIZE_2048 = 3, + CCP_XTS_AES_UNIT_SIZE_4096 = 4, + CCP_XTS_AES_UNIT_SIZE__LAST = 5, +}; + +enum ccp_des3_type { + CCP_DES3_TYPE_168 = 1, + CCP_DES3_TYPE__LAST = 2, +}; + +enum ccp_des3_mode { + CCP_DES3_MODE_ECB = 0, + CCP_DES3_MODE_CBC = 1, + CCP_DES3_MODE_CFB = 2, + CCP_DES3_MODE__LAST = 3, +}; + +enum ccp_des3_action { + CCP_DES3_ACTION_DECRYPT = 0, + CCP_DES3_ACTION_ENCRYPT = 1, + CCP_DES3_ACTION__LAST = 2, +}; + +enum ccp_sha_type { + CCP_SHA_TYPE_1 = 1, + CCP_SHA_TYPE_224 = 2, + CCP_SHA_TYPE_256 = 3, + CCP_SHA_TYPE_384 = 4, + CCP_SHA_TYPE_512 = 5, + CCP_SHA_TYPE__LAST = 6, +}; + +enum ccp_passthru_bitwise { + CCP_PASSTHRU_BITWISE_NOOP = 0, + CCP_PASSTHRU_BITWISE_AND = 1, + CCP_PASSTHRU_BITWISE_OR = 2, + CCP_PASSTHRU_BITWISE_XOR = 3, + CCP_PASSTHRU_BITWISE_MASK = 4, + CCP_PASSTHRU_BITWISE__LAST = 5, +}; + +enum ccp_passthru_byteswap { + CCP_PASSTHRU_BYTESWAP_NOOP = 0, + CCP_PASSTHRU_BYTESWAP_32BIT = 1, + CCP_PASSTHRU_BYTESWAP_256BIT = 2, + CCP_PASSTHRU_BYTESWAP__LAST = 3, +}; + +enum ccp_ecc_function { + CCP_ECC_FUNCTION_MMUL_384BIT = 0, + CCP_ECC_FUNCTION_MADD_384BIT = 1, + CCP_ECC_FUNCTION_MINV_384BIT = 2, + CCP_ECC_FUNCTION_PADD_384BIT = 3, + CCP_ECC_FUNCTION_PMUL_384BIT = 4, + CCP_ECC_FUNCTION_PDBL_384BIT = 5, +}; + +struct sp_dev_vdata; + +struct sp_device { + struct list_head entry; + struct device *dev; + struct sp_dev_vdata *dev_vdata; + unsigned int ord; + char name[32]; + void *dev_specific; + void *io_map; + unsigned int axcache; + struct sp_device *(*get_psp_master_device)(); + void (*set_psp_master_device)(struct sp_device *); + void (*clear_psp_master_device)(struct sp_device *); + bool irq_registered; + bool use_tasklet; + unsigned int ccp_irq; + irq_handler_t ccp_irq_handler; + void *ccp_irq_data; + unsigned int psp_irq; + irq_handler_t psp_irq_handler; + void *psp_irq_data; + void *ccp_data; + void *psp_data; +}; + +struct ccp_vdata; + +struct psp_vdata; + +struct sp_dev_vdata { + const unsigned int bar; + const struct ccp_vdata *ccp_vdata; + const struct psp_vdata *psp_vdata; +}; + +struct ccp_device; + +struct ccp_actions; + +struct ccp_vdata { + const unsigned int version; + const unsigned int dma_chan_attr; + void (*setup)(struct ccp_device *); + const struct ccp_actions *perform; + const unsigned int offset; + const unsigned int rsamax; +}; + +struct ccp5_desc; + +struct ccp_cmd_queue { + struct ccp_device *ccp; + u32 id; + struct dma_pool *dma_pool; + struct ccp5_desc *qbase; + long : 64; + long : 64; + long : 64; + long : 64; + struct mutex q_mutex; + unsigned int qidx; + unsigned int qsize; + dma_addr_t qbase_dma; + dma_addr_t qdma_tail; + u32 sb_key; + u32 sb_ctx; + unsigned long lsbmask[1]; + int lsb; + unsigned long lsbmap[1]; + struct task_struct *kthread; + unsigned int active; + unsigned int suspended; + unsigned int free_slots; + u32 int_ok; + u32 int_err; + void *reg_control; + void *reg_tail_lo; + void *reg_head_lo; + void *reg_int_enable; + void *reg_interrupt_status; + void *reg_status; + void *reg_int_status; + void *reg_dma_status; + void *reg_dma_read_status; + void *reg_dma_write_status; + u32 qcontrol; + u32 int_status; + u32 q_status; + u32 q_int_status; + u32 cmd_error; + wait_queue_head_t int_queue; + unsigned int int_rcvd; + unsigned long total_ops; + unsigned long total_aes_ops; + unsigned long total_xts_aes_ops; + unsigned long total_3des_ops; + unsigned long total_sha_ops; + unsigned long total_rsa_ops; + unsigned long total_pt_ops; + unsigned long total_ecc_ops; +}; + +struct ccp_dma_chan; + +struct ccp_device { + struct list_head entry; + struct ccp_vdata *vdata; + unsigned int ord; + char name[16]; + char rngname[16]; + struct device *dev; + struct sp_device *sp; + void *dev_specific; + unsigned int qim; + unsigned int irq; + bool use_tasklet; + struct tasklet_struct irq_tasklet; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct mutex req_mutex; + void *io_regs; + long : 64; + long : 64; + long : 64; + spinlock_t cmd_lock; + unsigned int cmd_count; + struct list_head cmd; + struct list_head backlog; + long : 64; + long : 64; + long : 64; + struct ccp_cmd_queue cmd_q[5]; + unsigned int cmd_q_count; + unsigned int max_q_count; + struct hwrng hwrng; + unsigned int hwrng_retries; + struct dma_device dma_dev; + struct ccp_dma_chan *ccp_dma_chan; + struct kmem_cache *dma_cmd_cache; + struct kmem_cache *dma_desc_cache; + atomic_t current_id; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct mutex sb_mutex; + unsigned long sb[1]; + wait_queue_head_t sb_queue; + unsigned int sb_avail; + unsigned int sb_count; + u32 sb_start; + unsigned long lsbmap[2]; + unsigned int suspending; + wait_queue_head_t suspend_queue; + unsigned int axcache; + unsigned long total_interrupts; + struct dentry *debugfs_instance; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct dword0 { + unsigned int soc : 1; + unsigned int ioc : 1; + unsigned int rsvd1 : 1; + unsigned int init : 1; + unsigned int eom : 1; + unsigned int function : 15; + unsigned int engine : 4; + unsigned int prot : 1; + unsigned int rsvd2 : 7; +}; + +struct dword3 { + unsigned int src_hi : 16; + unsigned int src_mem : 2; + unsigned int lsb_cxt_id : 8; + unsigned int rsvd1 : 5; + unsigned int fixed : 1; +}; + +union dword4 { + u32 dst_lo; + u32 sha_len_lo; +}; + +union dword5 { + struct { + unsigned int dst_hi : 16; + unsigned int dst_mem : 2; + unsigned int rsvd1 : 13; + unsigned int fixed : 1; + } fields; + u32 sha_len_hi; +}; + +struct dword7 { + unsigned int key_hi : 16; + unsigned int key_mem : 2; + unsigned int rsvd1 : 14; +}; + +struct ccp5_desc { + struct dword0 dw0; + u32 length; + u32 src_lo; + struct dword3 dw3; + union dword4 dw4; + union dword5 dw5; + u32 key_lo; + struct dword7 dw7; +}; + +struct ccp_dma_chan { + struct ccp_device *ccp; + spinlock_t lock; + struct list_head created; + struct list_head pending; + struct list_head active; + struct list_head complete; + struct tasklet_struct cleanup_tasklet; + enum dma_status status; + struct dma_chan___2 dma_chan; +}; + +struct ccp_op; + +struct ccp_actions { + int (*aes)(struct ccp_op *); + int (*xts_aes)(struct ccp_op *); + int (*des3)(struct ccp_op *); + int (*sha)(struct ccp_op *); + int (*rsa)(struct ccp_op *); + int (*passthru)(struct ccp_op *); + int (*ecc)(struct ccp_op *); + u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); + void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); + unsigned int (*get_free_slots)(struct ccp_cmd_queue *); + int (*init)(struct ccp_device *); + void (*destroy)(struct ccp_device *); + irqreturn_t (*irqhandler)(int, void *); +}; + +struct ccp_aes_op { + enum ccp_aes_type type; + enum ccp_aes_mode mode; + enum ccp_aes_action action; + unsigned int size; +}; + +struct ccp_xts_aes_op { + enum ccp_aes_type type; + enum ccp_aes_action action; + enum ccp_xts_aes_unit_size unit_size; +}; + +struct ccp_des3_op { + enum ccp_des3_type type; + enum ccp_des3_mode mode; + enum ccp_des3_action action; +}; + +struct ccp_sha_op { + enum ccp_sha_type type; + u64 msg_bits; +}; + +struct ccp_rsa_op { + u32 mod_size; + u32 input_len; +}; + +struct ccp_passthru_op { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; +}; + +struct ccp_ecc_op { + enum ccp_ecc_function function; +}; + +struct ccp_dma_info { + dma_addr_t address; + unsigned int offset; + unsigned int length; + enum dma_data_direction dir; +} __attribute__((packed)); + +struct ccp_mem { + enum ccp_memtype type; + union { + struct ccp_dma_info dma; + u32 sb; + } u; +}; + +struct ccp_op { + struct ccp_cmd_queue *cmd_q; + u32 jobid; + u32 ioc; + u32 soc; + u32 sb_key; + u32 sb_ctx; + u32 init; + u32 eom; + struct ccp_mem src; + struct ccp_mem dst; + struct ccp_mem exp; + union { + struct ccp_aes_op aes; + struct ccp_xts_aes_op xts; + struct ccp_des3_op des3; + struct ccp_sha_op sha; + struct ccp_rsa_op rsa; + struct ccp_passthru_op passthru; + struct ccp_ecc_op ecc; + } u; +}; + +struct sev_vdata; + +struct tee_vdata; + +struct platform_access_vdata; + +struct psp_vdata { + const struct sev_vdata *sev; + const struct tee_vdata *tee; + const struct platform_access_vdata *platform_access; + const unsigned int cmdresp_reg; + const unsigned int cmdbuff_addr_lo_reg; + const unsigned int cmdbuff_addr_hi_reg; + const unsigned int feature_reg; + const unsigned int inten_reg; + const unsigned int intsts_reg; + const unsigned int bootloader_info_reg; + const unsigned int platform_features; +}; + +struct sev_vdata { + const unsigned int cmdresp_reg; + const unsigned int cmdbuff_addr_lo_reg; + const unsigned int cmdbuff_addr_hi_reg; +}; + +struct tee_vdata { + const unsigned int cmdresp_reg; + const unsigned int cmdbuff_addr_lo_reg; + const unsigned int cmdbuff_addr_hi_reg; + const unsigned int ring_wptr_reg; + const unsigned int ring_rptr_reg; + const unsigned int info_reg; +}; + +struct platform_access_vdata { + const unsigned int cmdresp_reg; + const unsigned int cmdbuff_addr_lo_reg; + const unsigned int cmdbuff_addr_hi_reg; + const unsigned int doorbell_button_reg; + const unsigned int doorbell_cmd_reg; +}; + +struct sp_platform { + int coherent; + unsigned int irq_count; +}; + +enum ccp_engine { + CCP_ENGINE_AES = 0, + CCP_ENGINE_XTS_AES_128 = 1, + CCP_ENGINE_DES3 = 2, + CCP_ENGINE_SHA = 3, + CCP_ENGINE_RSA = 4, + CCP_ENGINE_PASSTHRU = 5, + CCP_ENGINE_ZLIB_DECOMPRESS = 6, + CCP_ENGINE_ECC = 7, + CCP_ENGINE__LAST = 8, +}; + +struct ccp_cmd; + +struct ccp_tasklet_data { + struct completion completion; + struct ccp_cmd *cmd; +}; + +struct ccp_aes_engine { + enum ccp_aes_type type; + enum ccp_aes_mode mode; + enum ccp_aes_action action; + u32 authsize; + struct scatterlist *key; + u32 key_len; + struct scatterlist *iv; + u32 iv_len; + struct scatterlist *src; + struct scatterlist *dst; + u64 src_len; + u32 cmac_final; + struct scatterlist *cmac_key; + u32 cmac_key_len; + u32 aad_len; +}; + +struct ccp_xts_aes_engine { + enum ccp_aes_type type; + enum ccp_aes_action action; + enum ccp_xts_aes_unit_size unit_size; + struct scatterlist *key; + u32 key_len; + struct scatterlist *iv; + u32 iv_len; + struct scatterlist *src; + struct scatterlist *dst; + u64 src_len; + u32 final; +}; + +struct ccp_des3_engine { + enum ccp_des3_type type; + enum ccp_des3_mode mode; + enum ccp_des3_action action; + struct scatterlist *key; + u32 key_len; + struct scatterlist *iv; + u32 iv_len; + struct scatterlist *src; + struct scatterlist *dst; + u64 src_len; +}; + +struct ccp_sha_engine { + enum ccp_sha_type type; + struct scatterlist *ctx; + u32 ctx_len; + struct scatterlist *src; + u64 src_len; + struct scatterlist *opad; + u32 opad_len; + u32 first; + u32 final; + u64 msg_bits; +}; + +struct ccp_rsa_engine { + u32 key_size; + struct scatterlist *exp; + u32 exp_len; + struct scatterlist *mod; + u32 mod_len; + struct scatterlist *src; + struct scatterlist *dst; + u32 src_len; +}; + +struct ccp_passthru_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + struct scatterlist *mask; + u32 mask_len; + struct scatterlist *src; + struct scatterlist *dst; + u64 src_len; + u32 final; +}; + +struct ccp_passthru_nomap_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + dma_addr_t mask; + u32 mask_len; + dma_addr_t src_dma; + dma_addr_t dst_dma; + u64 src_len; + u32 final; +}; + +struct ccp_ecc_modular_math { + struct scatterlist *operand_1; + unsigned int operand_1_len; + struct scatterlist *operand_2; + unsigned int operand_2_len; + struct scatterlist *result; + unsigned int result_len; +}; + +struct ccp_ecc_point { + struct scatterlist *x; + unsigned int x_len; + struct scatterlist *y; + unsigned int y_len; +}; + +struct ccp_ecc_point_math { + struct ccp_ecc_point point_1; + struct ccp_ecc_point point_2; + struct scatterlist *domain_a; + unsigned int domain_a_len; + struct scatterlist *scalar; + unsigned int scalar_len; + struct ccp_ecc_point result; +}; + +struct ccp_ecc_engine { + enum ccp_ecc_function function; + struct scatterlist *mod; + u32 mod_len; + union { + struct ccp_ecc_modular_math mm; + struct ccp_ecc_point_math pm; + } u; + u16 ecc_result; +}; + +struct ccp_cmd { + struct list_head entry; + struct work_struct work; + struct ccp_device *ccp; + int ret; + u32 flags; + enum ccp_engine engine; + u32 engine_error; + union { + struct ccp_aes_engine aes; + struct ccp_xts_aes_engine xts; + struct ccp_des3_engine des3; + struct ccp_sha_engine sha; + struct ccp_rsa_engine rsa; + struct ccp_passthru_engine passthru; + struct ccp_passthru_nomap_engine passthru_nomap; + struct ccp_ecc_engine ecc; + } u; + void (*callback)(void *, int); + void *data; +}; + +struct ccp_dm_workarea { + struct device *dev; + struct dma_pool *dma_pool; + u8 *address; + struct ccp_dma_info dma; + unsigned int length; +}; + +struct ccp_sg_workarea { + struct scatterlist *sg; + int nents; + unsigned int sg_used; + struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; + struct device *dma_dev; + unsigned int dma_count; + enum dma_data_direction dma_dir; + u64 bytes_left; +}; + +struct ccp_data { + struct ccp_sg_workarea sg_wa; + struct ccp_dm_workarea dm_wa; +}; + +union ccp_function { + struct { + u16 size : 7; + u16 encrypt : 1; + u16 mode : 5; + u16 type : 2; + } aes; + struct { + u16 size : 7; + u16 encrypt : 1; + u16 rsvd : 5; + u16 type : 2; + } aes_xts; + struct { + u16 size : 7; + u16 encrypt : 1; + u16 mode : 5; + u16 type : 2; + } des3; + struct { + u16 rsvd1 : 10; + u16 type : 4; + u16 rsvd2 : 1; + } sha; + struct { + u16 mode : 3; + u16 size : 12; + } rsa; + struct { + u16 byteswap : 2; + u16 bitwise : 3; + u16 reflect : 2; + u16 rsvd : 8; + } pt; + struct { + u16 rsvd : 13; + } zlib; + struct { + u16 size : 10; + u16 type : 2; + u16 mode : 3; + } ecc; + u16 raw; +}; + +struct ccp_dma_desc { + struct list_head entry; + struct ccp_device *ccp; + struct list_head pending; + struct list_head active; + enum dma_status status; + struct dma_async_tx_descriptor tx_desc; + size_t len; +}; + +struct ccp_dma_cmd { + struct list_head entry; + struct ccp_cmd ccp_cmd; +}; + +struct sp_pci { + int msix_count; + struct msix_entry msix_entry[2]; +}; + +typedef void (*psp_irq_handler_t)(int, void *, unsigned int); + +struct psp_device { + struct list_head entry; + struct psp_vdata *vdata; + char name[16]; + struct device *dev; + struct sp_device *sp; + void *io_regs; + struct mutex mailbox_mutex; + psp_irq_handler_t sev_irq_handler; + void *sev_irq_data; + void *sev_data; + void *tee_data; + void *platform_access_data; + void *dbc_data; + unsigned int capability; +}; + +enum psp_cmd { + PSP_CMD_TEE_RING_INIT = 1, + PSP_CMD_TEE_RING_DESTROY = 2, + PSP_CMD_TEE_EXTENDED_CMD = 14, + PSP_CMD_MAX = 15, +}; + +struct psp_ext_req_buffer_hdr { + u32 payload_size; + u32 sub_cmd_id; + u32 status; +}; + +struct psp_ext_request { + struct psp_ext_req_buffer_hdr header; + void *buf; +} __attribute__((packed)); + +struct sev_misc_dev { + struct kref refcount; + struct miscdevice misc; +}; + +enum sev_cmd { + SEV_CMD_INIT = 1, + SEV_CMD_SHUTDOWN = 2, + SEV_CMD_FACTORY_RESET = 3, + SEV_CMD_PLATFORM_STATUS = 4, + SEV_CMD_PEK_GEN = 5, + SEV_CMD_PEK_CSR = 6, + SEV_CMD_PEK_CERT_IMPORT = 7, + SEV_CMD_PDH_CERT_EXPORT = 8, + SEV_CMD_PDH_GEN = 9, + SEV_CMD_DF_FLUSH = 10, + SEV_CMD_DOWNLOAD_FIRMWARE = 11, + SEV_CMD_GET_ID = 12, + SEV_CMD_INIT_EX = 13, + SEV_CMD_DECOMMISSION = 32, + SEV_CMD_ACTIVATE = 33, + SEV_CMD_DEACTIVATE = 34, + SEV_CMD_GUEST_STATUS = 35, + SEV_CMD_LAUNCH_START = 48, + SEV_CMD_LAUNCH_UPDATE_DATA = 49, + SEV_CMD_LAUNCH_UPDATE_VMSA = 50, + SEV_CMD_LAUNCH_MEASURE = 51, + SEV_CMD_LAUNCH_UPDATE_SECRET = 52, + SEV_CMD_LAUNCH_FINISH = 53, + SEV_CMD_ATTESTATION_REPORT = 54, + SEV_CMD_SEND_START = 64, + SEV_CMD_SEND_UPDATE_DATA = 65, + SEV_CMD_SEND_UPDATE_VMSA = 66, + SEV_CMD_SEND_FINISH = 67, + SEV_CMD_SEND_CANCEL = 68, + SEV_CMD_RECEIVE_START = 80, + SEV_CMD_RECEIVE_UPDATE_DATA = 81, + SEV_CMD_RECEIVE_UPDATE_VMSA = 82, + SEV_CMD_RECEIVE_FINISH = 83, + SEV_CMD_DBG_DECRYPT = 96, + SEV_CMD_DBG_ENCRYPT = 97, + SEV_CMD_MAX = 98, +}; + +enum sev_state { + SEV_STATE_UNINIT = 0, + SEV_STATE_INIT = 1, + SEV_STATE_WORKING = 2, + SEV_STATE_MAX = 3, +}; + +enum { + SEV_FACTORY_RESET = 0, + SEV_PLATFORM_STATUS = 1, + SEV_PEK_GEN = 2, + SEV_PEK_CSR = 3, + SEV_PDH_GEN = 4, + SEV_PDH_CERT_EXPORT = 5, + SEV_PEK_CERT_IMPORT = 6, + SEV_GET_ID = 7, + SEV_GET_ID2 = 8, + SEV_MAX = 9, +}; + +struct sev_device { + struct device *dev; + struct psp_device *psp; + void *io_regs; + struct sev_vdata *vdata; + int state; + unsigned int int_rcvd; + wait_queue_head_t int_queue; + struct sev_misc_dev *misc; + u8 api_major; + u8 api_minor; + u8 build; + void *cmd_buf; +}; + +struct sev_data_init_ex { + u32 length; + u32 flags; + u64 tmr_address; + u32 tmr_len; + u32 reserved; + u64 nv_address; + u32 nv_len; +} __attribute__((packed)); + +struct sev_data_init { + u32 flags; + u32 reserved; + u64 tmr_address; + u32 tmr_len; +} __attribute__((packed)); + +struct sev_user_data_status { + __u8 api_major; + __u8 api_minor; + __u8 state; + __u32 flags; + __u8 build; + __u32 guest_count; +} __attribute__((packed)); + +struct sev_data_download_firmware { + u64 address; + u32 len; +} __attribute__((packed)); + +struct sev_data_deactivate { + u32 handle; +}; + +struct sev_data_activate { + u32 handle; + u32 asid; +}; + +struct sev_data_decommission { + u32 handle; +}; + +struct sev_issue_cmd { + __u32 cmd; + __u64 data; + __u32 error; +} __attribute__((packed)); + +struct sev_user_data_pek_csr { + __u64 address; + __u32 length; +} __attribute__((packed)); + +struct sev_data_pek_csr { + u64 address; + u32 len; +} __attribute__((packed)); + +struct sev_user_data_pek_cert_import { + __u64 pek_cert_address; + __u32 pek_cert_len; + __u64 oca_cert_address; + __u32 oca_cert_len; +} __attribute__((packed)); + +struct sev_data_pek_cert_import { + u64 pek_cert_address; + u32 pek_cert_len; + u32 reserved; + u64 oca_cert_address; + u32 oca_cert_len; +} __attribute__((packed)); + +struct sev_user_data_pdh_cert_export { + __u64 pdh_cert_address; + __u32 pdh_cert_len; + __u64 cert_chain_address; + __u32 cert_chain_len; +} __attribute__((packed)); + +struct sev_data_pdh_cert_export { + u64 pdh_cert_address; + u32 pdh_cert_len; + u32 reserved; + u64 cert_chain_address; + u32 cert_chain_len; +} __attribute__((packed)); + +struct sev_data_get_id { + u64 address; + u32 len; +} __attribute__((packed)); + +struct sev_user_data_get_id2 { + __u64 address; + __u32 length; +} __attribute__((packed)); + +enum tee_cmd_id { + TEE_CMD_ID_LOAD_TA = 1, + TEE_CMD_ID_UNLOAD_TA = 2, + TEE_CMD_ID_OPEN_SESSION = 3, + TEE_CMD_ID_CLOSE_SESSION = 4, + TEE_CMD_ID_INVOKE_CMD = 5, + TEE_CMD_ID_MAP_SHARED_MEM = 6, + TEE_CMD_ID_UNMAP_SHARED_MEM = 7, +}; + +enum cmd_resp_state { + CMD_RESPONSE_INVALID = 0, + CMD_WAITING_FOR_RESPONSE = 1, + CMD_RESPONSE_TIMEDOUT = 2, + CMD_RESPONSE_COPIED = 3, +}; + +enum tee_cmd_state { + TEE_CMD_STATE_INIT = 0, + TEE_CMD_STATE_PROCESS = 1, + TEE_CMD_STATE_COMPLETED = 2, +}; + +struct tee_ring_cmd { + u32 cmd_id; + u32 cmd_state; + u32 status; + u32 res0[1]; + u64 pdata; + u32 res1[2]; + u8 buf[988]; + u32 flag; +}; + +struct ring_buf_manager { + struct mutex mutex; + void *ring_start; + u32 ring_size; + phys_addr_t ring_pa; + u32 wptr; +}; + +struct psp_tee_device { + struct device *dev; + struct psp_device *psp; + void *io_regs; + struct tee_vdata *vdata; + struct ring_buf_manager rb_mgr; +}; + +struct tee_init_ring_cmd { + u32 low_addr; + u32 hi_addr; + u32 size; +}; + +enum psp_platform_access_msg { + PSP_CMD_NONE = 0, + PSP_I2C_REQ_BUS_CMD = 100, + PSP_DYNAMIC_BOOST_GET_NONCE = 101, + PSP_DYNAMIC_BOOST_SET_UID = 102, + PSP_DYNAMIC_BOOST_GET_PARAMETER = 103, + PSP_DYNAMIC_BOOST_SET_PARAMETER = 104, +}; + +struct psp_req_buffer_hdr { + u32 payload_size; + u32 status; +}; + +struct psp_request { + struct psp_req_buffer_hdr header; + void *buf; +}; + +struct psp_platform_access_device { + struct device *dev; + struct psp_device *psp; + struct platform_access_vdata *vdata; + struct mutex mailbox_mutex; + struct mutex doorbell_mutex; + void *platform_access_data; +}; + +struct error_map { + u32 psp; + int ret; +}; + +enum dbc_cmd_msg { + PARAM_GET_FMAX_CAP = 3, + PARAM_SET_FMAX_CAP = 4, + PARAM_GET_PWR_CAP = 5, + PARAM_SET_PWR_CAP = 6, + PARAM_GET_GFX_MODE = 7, + PARAM_SET_GFX_MODE = 8, + PARAM_GET_CURR_TEMP = 9, + PARAM_GET_FMAX_MAX = 10, + PARAM_GET_FMAX_MIN = 11, + PARAM_GET_SOC_PWR_MAX = 12, + PARAM_GET_SOC_PWR_MIN = 13, + PARAM_GET_SOC_PWR_CUR = 14, +}; + +struct dbc_user_param { + __u32 msg_index; + __u32 param; + __u8 signature[32]; +}; + +union dbc_buffer; + +struct psp_dbc_device { + struct device *dev; + struct psp_device *psp; + union dbc_buffer *mbox; + struct mutex ioctl_mutex; + struct miscdevice char_dev; + bool use_ext; + u32 header_size; + u32 *payload_size; + u32 *result; + void *payload; +}; + +union dbc_buffer { + struct psp_request pa_req; + struct psp_ext_request ext_req; +}; + +enum hid_report_type { + HID_INPUT_REPORT = 0, + HID_OUTPUT_REPORT = 1, + HID_FEATURE_REPORT = 2, + HID_REPORT_TYPES = 3, +}; + +enum hid_class_request { + HID_REQ_GET_REPORT = 1, + HID_REQ_GET_IDLE = 2, + HID_REQ_GET_PROTOCOL = 3, + HID_REQ_SET_REPORT = 9, + HID_REQ_SET_IDLE = 10, + HID_REQ_SET_PROTOCOL = 11, +}; + +struct hid_report; + +struct hid_report_enum; + +struct hid_device; + +struct hid_bpf_ops { + struct hid_report *(*hid_get_report)(struct hid_report_enum *, const u8 *); + int (*hid_hw_raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, + enum hid_report_type, enum hid_class_request); + struct module *owner; + struct bus_type *bus_type; +}; + +struct hid_field; + +struct hid_field_entry; + +struct hid_report { + struct list_head list; + struct list_head hidinput_list; + struct list_head field_entry_list; + unsigned int id; + enum hid_report_type type; + unsigned int application; + struct hid_field *field[256]; + struct hid_field_entry *field_entries; + unsigned int maxfield; + unsigned int size; + struct hid_device *device; + bool tool_active; + unsigned int tool; +}; + +struct hid_usage; + +struct hid_input; + +struct hid_field { + unsigned int physical; + unsigned int logical; + unsigned int application; + struct hid_usage *usage; + unsigned int maxusage; + unsigned int flags; + unsigned int report_offset; + unsigned int report_size; + unsigned int report_count; + unsigned int report_type; + __s32 *value; + __s32 *new_value; + __s32 *usages_priorities; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + bool ignored; + struct hid_report *report; + unsigned int index; + struct hid_input *hidinput; + __u16 dpad; + unsigned int slot_idx; +}; + +struct hid_usage { + unsigned int hid; + unsigned int collection_index; + unsigned int usage_index; + __s8 resolution_multiplier; + __s8 wheel_factor; + __u16 code; + __u8 type; + __s8 hat_min; + __s8 hat_max; + __s8 hat_dir; + __s16 wheel_accumulated; +}; + +struct hid_input { + struct list_head list; + struct hid_report *report; + struct input_dev *input; + const char *name; + struct list_head reports; + unsigned int application; + bool registered; +}; + +struct hid_field_entry { + struct list_head list; + struct hid_field *field; + unsigned int index; + __s32 priority; +}; + +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE = 1, + HID_TYPE_USBNONE = 2, +}; + +struct hid_report_enum { + unsigned int numbered; + struct list_head report_list; + struct hid_report *report_id_hash[256]; +}; + +enum hid_battery_status { + HID_BATTERY_UNKNOWN = 0, + HID_BATTERY_QUERIED = 1, + HID_BATTERY_REPORTED = 2, +}; + +struct hid_bpf_prog_list; + +struct hid_bpf { + u8 *device_data; + u32 allocated_data; + struct hid_bpf_prog_list __attribute__((btf_type_tag("rcu"))) * progs[2]; + bool destroyed; + spinlock_t progs_lock; +}; + +struct hid_collection; + +struct hid_driver; + +struct hid_ll_driver; + +struct hid_device { + __u8 *dev_rdesc; + unsigned int dev_rsize; + __u8 *rdesc; + unsigned int rsize; + struct hid_collection *collection; + unsigned int collection_size; + unsigned int maxcollection; + unsigned int maxapplication; + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + __u32 version; + enum hid_type type; + unsigned int country; + struct hid_report_enum report_enum[3]; + struct work_struct led_work; + struct semaphore driver_input_lock; + struct device dev; + struct hid_driver *driver; + void *devres_group_id; + const struct hid_ll_driver *ll_driver; + struct mutex ll_open_lock; + unsigned int ll_open_count; + struct power_supply *battery; + __s32 battery_capacity; + __s32 battery_min; + __s32 battery_max; + __s32 battery_report_type; + __s32 battery_report_id; + __s32 battery_charge_status; + enum hid_battery_status battery_status; + bool battery_avoid_query; + ktime_t battery_ratelimit_time; + unsigned long status; + unsigned int claimed; + unsigned int quirks; + unsigned int initial_quirks; + bool io_started; + struct list_head inputs; + void *hiddev; + void *hidraw; + char name[128]; + char phys[64]; + char uniq[64]; + void *driver_data; + int (*ff_init)(struct hid_device *); + int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); + void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, + struct hid_usage *, __s32); + void (*hiddev_report_event)(struct hid_device *, struct hid_report *); + unsigned short debug; + struct dentry *debug_dir; + struct dentry *debug_rdesc; + struct dentry *debug_events; + struct list_head debug_list; + spinlock_t debug_list_lock; + wait_queue_head_t debug_wait; + struct kref ref; + unsigned int id; + struct hid_bpf bpf; +}; + +struct hid_collection { + int parent_idx; + unsigned int type; + unsigned int usage; + unsigned int level; +}; + +struct hid_device_id; + +struct hid_report_id; + +struct hid_usage_id; + +struct hid_driver { + char *name; + const struct hid_device_id *id_table; + struct list_head dyn_list; + spinlock_t dyn_lock; + bool (*match)(struct hid_device *, bool); + int (*probe)(struct hid_device *, const struct hid_device_id *); + void (*remove)(struct hid_device *); + const struct hid_report_id *report_table; + int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); + const struct hid_usage_id *usage_table; + int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*report)(struct hid_device *, struct hid_report *); + __u8 *(*report_fixup)(struct hid_device *, __u8 *, unsigned int *); + int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, + struct hid_usage *, unsigned long **, int *); + int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, + struct hid_usage *, unsigned long **, int *); + int (*input_configured)(struct hid_device *, struct hid_input *); + void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); + int (*suspend)(struct hid_device *, pm_message_t); + int (*resume)(struct hid_device *); + int (*reset_resume)(struct hid_device *); + struct device_driver driver; +}; + +struct hid_device_id { + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + kernel_ulong_t driver_data; +}; + +struct hid_report_id { + __u32 report_type; +}; + +struct hid_usage_id { + __u32 usage_hid; + __u32 usage_type; + __u32 usage_code; +}; + +struct hid_ll_driver { + int (*start)(struct hid_device *); + void (*stop)(struct hid_device *); + int (*open)(struct hid_device *); + void (*close)(struct hid_device *); + int (*power)(struct hid_device *, int); + int (*parse)(struct hid_device *); + void (*request)(struct hid_device *, struct hid_report *, int); + int (*wait)(struct hid_device *); + int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, + unsigned char, int); + int (*output_report)(struct hid_device *, __u8 *, size_t); + int (*idle)(struct hid_device *, int, int, int); + bool (*may_wakeup)(struct hid_device *); + unsigned int max_buffer_size; +}; + +struct hid_bpf_prog_list { + u16 prog_idx[64]; + u8 prog_cnt; +}; + +enum hid_bpf_prog_type { + HID_BPF_PROG_TYPE_UNDEF = -1, + HID_BPF_PROG_TYPE_DEVICE_EVENT = 0, + HID_BPF_PROG_TYPE_RDESC_FIXUP = 1, + HID_BPF_PROG_TYPE_MAX = 2, +}; + +enum hid_bpf_attach_flags { + HID_BPF_FLAG_NONE = 0, + HID_BPF_FLAG_INSERT_HEAD = 1, + HID_BPF_FLAG_MAX = 2, +}; + +struct hid_bpf_ctx { + __u32 index; + const struct hid_device *hid; + __u32 allocated_size; + enum hid_report_type report_type; + union { + __s32 retval; + __s32 size; + }; +}; + +struct hid_bpf_ctx_kern { + struct hid_bpf_ctx ctx; + u8 *data; +}; + +struct hid_bpf_prog_entry { + struct bpf_prog *prog; + struct hid_device *hdev; + enum hid_bpf_prog_type type; + u16 idx; +}; + +struct hid_bpf_jmp_table { + struct bpf_map *map; + struct hid_bpf_prog_entry entries[1024]; + int tail; + int head; + struct bpf_prog *progs[1024]; + unsigned long enabled[16]; +}; + +struct bpf_map_desc { + int map_fd; + __u32 max_entries; + __u64 initial_value; +}; + +struct bpf_prog_desc { + int prog_fd; +}; + +struct bpf_loader_ctx { + __u32 sz; + __u32 flags; + __u32 log_level; + __u32 log_size; + __u64 log_buf; +}; + +struct entrypoints_bpf { + struct bpf_loader_ctx ctx; + struct { + struct bpf_map_desc hid_jmp_table; + } maps; + struct { + struct bpf_prog_desc hid_tail_call; + } progs; + struct { + int hid_tail_call_fd; + } links; +}; + +enum { + BPF_SKEL_KERNEL = 1, +}; + +struct hid_bpf_link { + struct bpf_link link; + int hid_table_index; +}; + +struct bpf_load_and_run_opts { + struct bpf_loader_ctx *ctx; + const void *data; + const void *insns; + __u32 data_sz; + __u32 insns_sz; + const char *errstr; +}; + +struct pcc_chan_reg { + void *vaddr; + struct acpi_generic_address *gas; + u64 preserve_mask; + u64 set_mask; + u64 status_mask; +}; + +struct pcc_chan_info { + struct pcc_mbox_chan chan; + struct pcc_chan_reg db; + struct pcc_chan_reg plat_irq_ack; + struct pcc_chan_reg cmd_complete; + struct pcc_chan_reg cmd_update; + struct pcc_chan_reg error; + int plat_irq; + u8 type; + unsigned int plat_irq_flags; + bool chan_in_use; +}; + +enum acpi_pcct_type { + ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, + ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, + ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, + ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5, + ACPI_PCCT_TYPE_RESERVED = 6, +}; + +struct acpi_pcct_subspace { + struct acpi_subtable_header header; + u8 reserved[6]; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +} __attribute__((packed)); + +struct acpi_table_pcct { + struct acpi_table_header header; + u32 flags; + u64 reserved; +}; + +struct acpi_pcct_hw_reduced { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +} __attribute__((packed)); + +struct acpi_pcct_ext_pcc_master { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved1; + u64 base_address; + u32 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u32 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_set_mask; + u64 reserved2; + struct acpi_generic_address cmd_complete_register; + u64 cmd_complete_mask; + struct acpi_generic_address cmd_update_register; + u64 cmd_update_preserve_mask; + u64 cmd_update_set_mask; + struct acpi_generic_address error_status_register; + u64 error_status_mask; +} __attribute__((packed)); + +struct acpi_pcct_hw_reduced_type2 { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_write_mask; +} __attribute__((packed)); + +enum { + VMGENID_SIZE = 16, +}; + +struct vmgenid_state { + u8 *next_id; + u8 this_id[16]; +}; + +struct powercap_constraint_attr { + struct device_attribute power_limit_attr; + struct device_attribute time_window_attr; + struct device_attribute max_power_attr; + struct device_attribute min_power_attr; + struct device_attribute max_time_window_attr; + struct device_attribute min_time_window_attr; + struct device_attribute name_attr; +}; + +struct powercap_control_type_ops; + +struct powercap_control_type { + struct device dev; + struct idr idr; + int nr_zones; + const struct powercap_control_type_ops *ops; + struct mutex lock; + bool allocated; + struct list_head node; +}; + +struct powercap_control_type_ops { + int (*set_enable)(struct powercap_control_type *, bool); + int (*get_enable)(struct powercap_control_type *, bool *); + int (*release)(struct powercap_control_type *); +}; + +struct powercap_zone_ops; + +struct powercap_zone_constraint; + +struct powercap_zone { + int id; + char *name; + void *control_type_inst; + const struct powercap_zone_ops *ops; + struct device dev; + int const_id_cnt; + struct idr idr; + struct idr *parent_idr; + void *private_data; + struct attribute **zone_dev_attrs; + int zone_attr_count; + struct attribute_group dev_zone_attr_group; + const struct attribute_group *dev_attr_groups[2]; + bool allocated; + struct powercap_zone_constraint *constraints; +}; + +struct powercap_zone_ops { + int (*get_max_energy_range_uj)(struct powercap_zone *, u64 *); + int (*get_energy_uj)(struct powercap_zone *, u64 *); + int (*reset_energy_uj)(struct powercap_zone *); + int (*get_max_power_range_uw)(struct powercap_zone *, u64 *); + int (*get_power_uw)(struct powercap_zone *, u64 *); + int (*set_enable)(struct powercap_zone *, bool); + int (*get_enable)(struct powercap_zone *, bool *); + int (*release)(struct powercap_zone *); +}; + +struct powercap_zone_constraint_ops; + +struct powercap_zone_constraint { + int id; + struct powercap_zone *power_zone; + const struct powercap_zone_constraint_ops *ops; +}; + +struct powercap_zone_constraint_ops { + int (*set_power_limit_uw)(struct powercap_zone *, int, u64); + int (*get_power_limit_uw)(struct powercap_zone *, int, u64 *); + int (*set_time_window_us)(struct powercap_zone *, int, u64); + int (*get_time_window_us)(struct powercap_zone *, int, u64 *); + int (*get_max_power_uw)(struct powercap_zone *, int, u64 *); + int (*get_min_power_uw)(struct powercap_zone *, int, u64 *); + int (*get_max_time_window_us)(struct powercap_zone *, int, u64 *); + int (*get_min_time_window_us)(struct powercap_zone *, int, u64 *); + const char *(*get_name)(struct powercap_zone *, int); +}; + +typedef void (*btf_trace_mc_event)(void *, const unsigned int, const char *, const char *, + const int, const u8, const s8, const s8, const s8, + unsigned long, const u8, unsigned long, const char *); + +typedef void (*btf_trace_arm_event)(void *, const struct cper_sec_proc_arm *); + +typedef void (*btf_trace_non_standard_event)(void *, const guid_t *, const guid_t *, + const char *, const u8, const u8 *, const u32); + +typedef void (*btf_trace_aer_event)(void *, const char *, const u32, const u8, const u8, + struct aer_header_log_regs *); + +typedef void (*btf_trace_memory_failure_event)(void *, unsigned long, int, int); + +enum hw_event_mc_err_type { + HW_EVENT_ERR_CORRECTED = 0, + HW_EVENT_ERR_UNCORRECTED = 1, + HW_EVENT_ERR_DEFERRED = 2, + HW_EVENT_ERR_FATAL = 3, + HW_EVENT_ERR_INFO = 4, +}; + +struct trace_event_raw_mc_event { + struct trace_entry ent; + unsigned int error_type; + u32 __data_loc_msg; + u32 __data_loc_label; + u16 error_count; + u8 mc_index; + s8 top_layer; + s8 middle_layer; + s8 lower_layer; + long address; + u8 grain_bits; + long syndrome; + u32 __data_loc_driver_detail; + char __data[0]; +}; + +struct trace_event_raw_arm_event { + struct trace_entry ent; + u64 mpidr; + u64 midr; + u32 running_state; + u32 psci_state; + u8 affinity; + char __data[0]; +}; + +struct trace_event_raw_non_standard_event { + struct trace_entry ent; + char sec_type[16]; + char fru_id[16]; + u32 __data_loc_fru_text; + u8 sev; + u32 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_aer_event { + struct trace_entry ent; + u32 __data_loc_dev_name; + u32 status; + u8 severity; + u8 tlp_header_valid; + u32 tlp_header[4]; + char __data[0]; +}; + +struct trace_event_raw_memory_failure_event { + struct trace_entry ent; + unsigned long pfn; + int type; + int result; + char __data[0]; +}; + +struct trace_event_data_offsets_non_standard_event { + u32 fru_text; + u32 buf; +}; + +struct trace_event_data_offsets_aer_event { + u32 dev_name; +}; + +struct trace_event_data_offsets_mc_event { + u32 msg; + u32 label; + u32 driver_detail; +}; + +struct trace_event_data_offsets_arm_event {}; + +struct trace_event_data_offsets_memory_failure_event {}; + +struct binder_features { + bool oneway_spam_detection; + bool extended_error; +}; + +enum binderfs_stats_mode { + binderfs_stats_mode_unset = 0, + binderfs_stats_mode_global = 1, +}; + +enum binderfs_param { + Opt_max___2 = 0, + Opt_stats_mode = 1, +}; + +enum binder_work_type { + BINDER_WORK_TRANSACTION = 1, + BINDER_WORK_TRANSACTION_COMPLETE = 2, + BINDER_WORK_TRANSACTION_PENDING = 3, + BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT = 4, + BINDER_WORK_RETURN_ERROR = 5, + BINDER_WORK_NODE = 6, + BINDER_WORK_DEAD_BINDER = 7, + BINDER_WORK_DEAD_BINDER_AND_CLEAR = 8, + BINDER_WORK_CLEAR_DEATH_NOTIFICATION = 9, +}; + +struct binderfs_device { + char name[256]; + __u32 major; + __u32 minor; +}; + +struct binderfs_mount_opts { + int max; + int stats_mode; +}; + +struct binderfs_info { + struct ipc_namespace *ipc_ns; + struct dentry *control_dentry; + kuid_t root_uid; + kgid_t root_gid; + struct binderfs_mount_opts mount_opts; + int device_count; + struct dentry *proc_log_dir; +}; + +struct binder_node; + +struct binder_context { + struct binder_node *binder_context_mgr_node; + struct mutex context_mgr_node_lock; + kuid_t binder_context_mgr_uid; + const char *name; +}; + +struct binder_device { + struct hlist_node hlist; + struct miscdevice miscdev; + struct binder_context context; + struct inode *binderfs_inode; + refcount_t ref; +}; + +struct binder_work { + struct list_head entry; + enum binder_work_type type; +}; + +typedef __u64 binder_uintptr_t; + +struct binder_proc; + +struct binder_node { + int debug_id; + spinlock_t lock; + struct binder_work work; + union { + struct rb_node rb_node; + struct hlist_node dead_node; + }; + struct binder_proc *proc; + struct hlist_head refs; + int internal_strong_refs; + int local_weak_refs; + int local_strong_refs; + int tmp_refs; + binder_uintptr_t ptr; + binder_uintptr_t cookie; + struct { + u8 has_strong_ref : 1; + u8 pending_strong_ref : 1; + u8 has_weak_ref : 1; + u8 pending_weak_ref : 1; + }; + struct { + u8 accept_fds : 1; + u8 txn_security_ctx : 1; + u8 min_priority; + }; + bool has_async_transaction; + struct list_head async_todo; +}; + +struct binder_stats { + atomic_t br[21]; + atomic_t bc[19]; + atomic_t obj_created[7]; + atomic_t obj_deleted[7]; +}; + +struct binder_lru_page; + +struct binder_alloc { + struct mutex mutex; + struct vm_area_struct *vma; + struct mm_struct *mm; + void __attribute__((btf_type_tag("user"))) * buffer; + struct list_head buffers; + struct rb_root free_buffers; + struct rb_root allocated_buffers; + size_t free_async_space; + struct binder_lru_page *pages; + size_t buffer_size; + int pid; + size_t pages_high; + bool oneway_spam_detected; +}; + +struct binder_proc { + struct hlist_node proc_node; + struct rb_root threads; + struct rb_root nodes; + struct rb_root refs_by_desc; + struct rb_root refs_by_node; + struct list_head waiting_threads; + int pid; + struct task_struct *tsk; + const struct cred *cred; + struct hlist_node deferred_work_node; + int deferred_work; + int outstanding_txns; + bool is_dead; + bool is_frozen; + bool sync_recv; + bool async_recv; + wait_queue_head_t freeze_wait; + struct list_head todo; + struct binder_stats stats; + struct list_head delivered_death; + int max_threads; + int requested_threads; + int requested_threads_started; + int tmp_ref; + long default_priority; + struct dentry *debugfs_entry; + struct binder_alloc alloc; + struct binder_context *context; + spinlock_t inner_lock; + spinlock_t outer_lock; + struct dentry *binderfs_entry; + bool oneway_spam_detection_enabled; +}; + +struct binder_lru_page { + struct list_head lru; + struct page *page_ptr; + struct binder_alloc *alloc; +}; + +struct binder_debugfs_entry { + const char *name; + umode_t mode; + const struct file_operations *fops; + void *data; +}; + +typedef void (*btf_trace_binder_ioctl)(void *, unsigned int, unsigned long); + +typedef void (*btf_trace_binder_lock)(void *, const char *); + +typedef void (*btf_trace_binder_locked)(void *, const char *); + +typedef void (*btf_trace_binder_unlock)(void *, const char *); + +typedef void (*btf_trace_binder_ioctl_done)(void *, int); + +typedef void (*btf_trace_binder_write_done)(void *, int); + +typedef void (*btf_trace_binder_read_done)(void *, int); + +typedef void (*btf_trace_binder_wait_for_work)(void *, bool, bool, bool); + +struct binder_transaction; + +typedef void (*btf_trace_binder_txn_latency_free)(void *, struct binder_transaction *, + int, int, int, int); + +struct binder_thread; + +struct binder_buffer; + +struct binder_transaction { + int debug_id; + struct binder_work work; + struct binder_thread *from; + pid_t from_pid; + pid_t from_tid; + struct binder_transaction *from_parent; + struct binder_proc *to_proc; + struct binder_thread *to_thread; + struct binder_transaction *to_parent; + unsigned int need_reply : 1; + struct binder_buffer *buffer; + unsigned int code; + unsigned int flags; + long priority; + long saved_priority; + kuid_t sender_euid; + ktime_t start_time; + struct list_head fd_fixups; + binder_uintptr_t security_ctx; + spinlock_t lock; +}; + +struct binder_error { + struct binder_work work; + uint32_t cmd; +}; + +struct binder_extended_error { + __u32 id; + __u32 command; + __s32 param; +}; + +struct binder_thread { + struct binder_proc *proc; + struct rb_node rb_node; + struct list_head waiting_thread_node; + int pid; + int looper; + bool looper_need_return; + struct binder_transaction *transaction_stack; + struct list_head todo; + bool process_todo; + struct binder_error return_error; + struct binder_error reply_error; + struct binder_extended_error ee; + wait_queue_head_t wait; + struct binder_stats stats; + atomic_t tmp_ref; + bool is_dead; +}; + +struct binder_buffer { + struct list_head entry; + struct rb_node rb_node; + unsigned int free : 1; + unsigned int clear_on_free : 1; + unsigned int allow_user_free : 1; + unsigned int async_transaction : 1; + unsigned int oneway_spam_suspect : 1; + unsigned int debug_id : 27; + struct binder_transaction *transaction; + struct binder_node *target_node; + size_t data_size; + size_t offsets_size; + size_t extra_buffers_size; + void __attribute__((btf_type_tag("user"))) * user_data; + int pid; +}; + +typedef void (*btf_trace_binder_transaction)(void *, bool, struct binder_transaction *, + struct binder_node *); + +typedef void (*btf_trace_binder_transaction_received)(void *, struct binder_transaction *); + +struct binder_ref_data; + +typedef void (*btf_trace_binder_transaction_node_to_ref)(void *, struct binder_transaction *, + struct binder_node *, + struct binder_ref_data *); + +struct binder_ref_data { + int debug_id; + uint32_t desc; + int strong; + int weak; +}; + +typedef void (*btf_trace_binder_transaction_ref_to_node)(void *, struct binder_transaction *, + struct binder_node *, + struct binder_ref_data *); + +typedef void (*btf_trace_binder_transaction_ref_to_ref)(void *, struct binder_transaction *, + struct binder_node *, + struct binder_ref_data *, + struct binder_ref_data *); + +typedef void (*btf_trace_binder_transaction_fd_send)(void *, struct binder_transaction *, + int, size_t); + +typedef void (*btf_trace_binder_transaction_fd_recv)(void *, struct binder_transaction *, + int, size_t); + +typedef void (*btf_trace_binder_transaction_alloc_buf)(void *, struct binder_buffer *); + +typedef void (*btf_trace_binder_transaction_buffer_release)(void *, struct binder_buffer *); + +typedef void (*btf_trace_binder_transaction_failed_buffer_release)(void *, + struct binder_buffer *); + +typedef void (*btf_trace_binder_transaction_update_buffer_release)(void *, + struct binder_buffer *); + +typedef void (*btf_trace_binder_update_page_range)(void *, struct binder_alloc *, bool, + void + __attribute__((btf_type_tag("user"))) *, + void + __attribute__((btf_type_tag("user"))) *); + +typedef void (*btf_trace_binder_alloc_lru_start)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_alloc_lru_end)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_free_lru_start)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_free_lru_end)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_alloc_page_start)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_alloc_page_end)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_unmap_user_start)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_unmap_user_end)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_unmap_kernel_start)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_unmap_kernel_end)(void *, const struct binder_alloc *, size_t); + +typedef void (*btf_trace_binder_command)(void *, uint32_t); + +typedef void (*btf_trace_binder_return)(void *, uint32_t); + +struct binder_transaction_log_entry { + int debug_id; + int debug_id_done; + int call_type; + int from_proc; + int from_thread; + int target_handle; + int to_proc; + int to_thread; + int to_node; + int data_size; + int offsets_size; + int return_error_line; + uint32_t return_error; + uint32_t return_error_param; + char context_name[256]; +}; + +struct binder_transaction_log { + atomic_t cur; + bool full; + struct binder_transaction_log_entry entry[32]; +}; + +enum { + BINDER_LOOPER_STATE_REGISTERED = 1, + BINDER_LOOPER_STATE_ENTERED = 2, + BINDER_LOOPER_STATE_EXITED = 4, + BINDER_LOOPER_STATE_INVALID = 8, + BINDER_LOOPER_STATE_WAITING = 16, + BINDER_LOOPER_STATE_POLL = 32, +}; + +enum binder_stat_types { + BINDER_STAT_PROC = 0, + BINDER_STAT_THREAD = 1, + BINDER_STAT_NODE = 2, + BINDER_STAT_REF = 3, + BINDER_STAT_DEATH = 4, + BINDER_STAT_TRANSACTION = 5, + BINDER_STAT_TRANSACTION_COMPLETE = 6, + BINDER_STAT_COUNT = 7, +}; + +enum binder_driver_return_protocol { + BR_ERROR = 2147774976, + BR_OK = 29185, + BR_TRANSACTION_SEC_CTX = 2152231426, + BR_TRANSACTION = 2151707138, + BR_REPLY = 2151707139, + BR_ACQUIRE_RESULT = 2147774980, + BR_DEAD_REPLY = 29189, + BR_TRANSACTION_COMPLETE = 29190, + BR_INCREFS = 2148561415, + BR_ACQUIRE = 2148561416, + BR_RELEASE = 2148561417, + BR_DECREFS = 2148561418, + BR_ATTEMPT_ACQUIRE = 2149085707, + BR_NOOP = 29196, + BR_SPAWN_LOOPER = 29197, + BR_FINISHED = 29198, + BR_DEAD_BINDER = 2148037135, + BR_CLEAR_DEATH_NOTIFICATION_DONE = 2148037136, + BR_FAILED_REPLY = 29201, + BR_FROZEN_REPLY = 29202, + BR_ONEWAY_SPAM_SUSPECT = 29203, + BR_TRANSACTION_PENDING_FROZEN = 29204, +}; + +enum { + BINDER_DEBUG_USER_ERROR = 1, + BINDER_DEBUG_FAILED_TRANSACTION = 2, + BINDER_DEBUG_DEAD_TRANSACTION = 4, + BINDER_DEBUG_OPEN_CLOSE = 8, + BINDER_DEBUG_DEAD_BINDER = 16, + BINDER_DEBUG_DEATH_NOTIFICATION = 32, + BINDER_DEBUG_READ_WRITE = 64, + BINDER_DEBUG_USER_REFS = 128, + BINDER_DEBUG_THREADS = 256, + BINDER_DEBUG_TRANSACTION = 512, + BINDER_DEBUG_TRANSACTION_COMPLETE = 1024, + BINDER_DEBUG_FREE_BUFFER = 2048, + BINDER_DEBUG_INTERNAL_REFS = 4096, + BINDER_DEBUG_PRIORITY_CAP = 8192, + BINDER_DEBUG_SPINLOCKS = 16384, +}; + +enum binder_driver_command_protocol { + BC_TRANSACTION = 1077961472, + BC_REPLY = 1077961473, + BC_ACQUIRE_RESULT = 1074029314, + BC_FREE_BUFFER = 1074291459, + BC_INCREFS = 1074029316, + BC_ACQUIRE = 1074029317, + BC_RELEASE = 1074029318, + BC_DECREFS = 1074029319, + BC_INCREFS_DONE = 1074815752, + BC_ACQUIRE_DONE = 1074815753, + BC_ATTEMPT_ACQUIRE = 1074291466, + BC_REGISTER_LOOPER = 25355, + BC_ENTER_LOOPER = 25356, + BC_EXIT_LOOPER = 25357, + BC_REQUEST_DEATH_NOTIFICATION = 1074553614, + BC_CLEAR_DEATH_NOTIFICATION = 1074553615, + BC_DEAD_BINDER_DONE = 1074291472, + BC_TRANSACTION_SG = 1078485777, + BC_REPLY_SG = 1078485778, +}; + +enum { + BINDER_TYPE_BINDER = 1935813253, + BINDER_TYPE_WEAK_BINDER = 2002922117, + BINDER_TYPE_HANDLE = 1936206469, + BINDER_TYPE_WEAK_HANDLE = 2003315333, + BINDER_TYPE_FD = 1717840517, + BINDER_TYPE_FDA = 1717854597, + BINDER_TYPE_PTR = 1886661253, +}; + +enum transaction_flags { + TF_ONE_WAY = 1, + TF_ROOT_OBJECT = 4, + TF_STATUS_CODE = 8, + TF_ACCEPT_FDS = 16, + TF_CLEAR_BUF = 32, + TF_UPDATE_TXN = 64, +}; + +enum { + FLAT_BINDER_FLAG_PRIORITY_MASK = 255, + FLAT_BINDER_FLAG_ACCEPTS_FDS = 256, + FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 4096, +}; + +enum { + BINDER_BUFFER_FLAG_HAS_PARENT = 1, +}; + +enum binder_deferred_state { + BINDER_DEFERRED_FLUSH = 1, + BINDER_DEFERRED_RELEASE = 2, +}; + +struct binder_ref_death { + struct binder_work work; + binder_uintptr_t cookie; +}; + +struct binder_ref { + struct binder_ref_data data; + struct rb_node rb_node_desc; + struct rb_node rb_node_node; + struct hlist_node node_entry; + struct binder_proc *proc; + struct binder_node *node; + struct binder_ref_death *death; +}; + +struct binder_object_header { + __u32 type; +}; + +struct flat_binder_object { + struct binder_object_header hdr; + __u32 flags; + union { + binder_uintptr_t binder; + __u32 handle; + }; + binder_uintptr_t cookie; +}; + +typedef __u64 binder_size_t; + +struct binder_fd_array_object { + struct binder_object_header hdr; + __u32 pad; + binder_size_t num_fds; + binder_size_t parent; + binder_size_t parent_offset; +}; + +struct binder_task_work_cb { + struct callback_head twork; + struct file *file; +}; + +struct binder_fd_object { + struct binder_object_header hdr; + __u32 pad_flags; + union { + binder_uintptr_t pad_binder; + __u32 fd; + }; + binder_uintptr_t cookie; +}; + +struct binder_buffer_object { + struct binder_object_header hdr; + __u32 flags; + binder_uintptr_t buffer; + binder_size_t length; + binder_size_t parent; + binder_size_t parent_offset; +}; + +struct binder_ptr_fixup { + binder_size_t offset; + size_t skip_size; + binder_uintptr_t fixup_data; + struct list_head node; +}; + +struct binder_sg_copy { + binder_size_t offset; + const void __attribute__((btf_type_tag("user"))) * sender_uaddr; + size_t length; + struct list_head node; +}; + +struct binder_txn_fd_fixup { + struct list_head fixup_entry; + struct file *file; + size_t offset; + int target_fd; +}; + +struct trace_event_raw_binder_ioctl { + struct trace_entry ent; + unsigned int cmd; + unsigned long arg; + char __data[0]; +}; + +struct trace_event_raw_binder_lock_class { + struct trace_entry ent; + const char *tag; + char __data[0]; +}; + +struct trace_event_raw_binder_function_return_class { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_binder_wait_for_work { + struct trace_entry ent; + bool proc_work; + bool transaction_stack; + bool thread_todo; + char __data[0]; +}; + +struct trace_event_raw_binder_txn_latency_free { + struct trace_entry ent; + int debug_id; + int from_proc; + int from_thread; + int to_proc; + int to_thread; + unsigned int code; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_binder_transaction { + struct trace_entry ent; + int debug_id; + int target_node; + int to_proc; + int to_thread; + int reply; + unsigned int code; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_binder_transaction_received { + struct trace_entry ent; + int debug_id; + char __data[0]; +}; + +struct trace_event_raw_binder_transaction_node_to_ref { + struct trace_entry ent; + int debug_id; + int node_debug_id; + binder_uintptr_t node_ptr; + int ref_debug_id; + uint32_t ref_desc; + char __data[0]; +}; + +struct trace_event_raw_binder_transaction_ref_to_node { + struct trace_entry ent; + int debug_id; + int ref_debug_id; + uint32_t ref_desc; + int node_debug_id; + binder_uintptr_t node_ptr; + char __data[0]; +}; + +struct trace_event_raw_binder_transaction_ref_to_ref { + struct trace_entry ent; + int debug_id; + int node_debug_id; + int src_ref_debug_id; + uint32_t src_ref_desc; + int dest_ref_debug_id; + uint32_t dest_ref_desc; + char __data[0]; +}; + +struct trace_event_raw_binder_transaction_fd_send { + struct trace_entry ent; + int debug_id; + int fd; + size_t offset; + char __data[0]; +}; + +struct trace_event_raw_binder_transaction_fd_recv { + struct trace_entry ent; + int debug_id; + int fd; + size_t offset; + char __data[0]; +}; + +struct trace_event_raw_binder_buffer_class { + struct trace_entry ent; + int debug_id; + size_t data_size; + size_t offsets_size; + size_t extra_buffers_size; + char __data[0]; +}; + +struct trace_event_raw_binder_update_page_range { + struct trace_entry ent; + int proc; + bool allocate; + size_t offset; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_binder_lru_page_class { + struct trace_entry ent; + int proc; + size_t page_index; + char __data[0]; +}; + +struct trace_event_raw_binder_command { + struct trace_entry ent; + uint32_t cmd; + char __data[0]; +}; + +struct trace_event_raw_binder_return { + struct trace_entry ent; + uint32_t cmd; + char __data[0]; +}; + +struct binder_freeze_info { + __u32 pid; + __u32 enable; + __u32 timeout_ms; +}; + +struct binder_transaction_data { + union { + __u32 handle; + binder_uintptr_t ptr; + } target; + binder_uintptr_t cookie; + __u32 code; + __u32 flags; + __kernel_pid_t sender_pid; + __kernel_uid32_t sender_euid; + binder_size_t data_size; + binder_size_t offsets_size; + union { + struct { + binder_uintptr_t buffer; + binder_uintptr_t offsets; + } ptr; + __u8 buf[8]; + } data; +}; + +struct binder_transaction_data_sg { + struct binder_transaction_data transaction_data; + binder_size_t buffers_size; +}; + +struct binder_transaction_data_secctx { + struct binder_transaction_data transaction_data; + binder_uintptr_t secctx; +}; + +struct binder_object { + union { + struct binder_object_header hdr; + struct flat_binder_object fbo; + struct binder_fd_object fdo; + struct binder_buffer_object bbo; + struct binder_fd_array_object fdao; + }; +}; + +struct binder_node_info_for_ref { + __u32 handle; + __u32 strong_count; + __u32 weak_count; + __u32 reserved1; + __u32 reserved2; + __u32 reserved3; +}; + +struct binder_node_debug_info { + binder_uintptr_t ptr; + binder_uintptr_t cookie; + __u32 has_strong_ref; + __u32 has_weak_ref; +}; + +struct binder_frozen_status_info { + __u32 pid; + __u32 sync_recv; + __u32 async_recv; +}; + +struct binder_version { + __s32 protocol_version; +}; + +struct trace_event_data_offsets_binder_ioctl {}; + +struct trace_event_data_offsets_binder_lock_class {}; + +struct trace_event_data_offsets_binder_function_return_class {}; + +struct trace_event_data_offsets_binder_wait_for_work {}; + +struct trace_event_data_offsets_binder_txn_latency_free {}; + +struct trace_event_data_offsets_binder_transaction {}; + +struct trace_event_data_offsets_binder_transaction_received {}; + +struct trace_event_data_offsets_binder_transaction_node_to_ref {}; + +struct trace_event_data_offsets_binder_transaction_ref_to_node {}; + +struct trace_event_data_offsets_binder_transaction_ref_to_ref {}; + +struct trace_event_data_offsets_binder_transaction_fd_send {}; + +struct trace_event_data_offsets_binder_transaction_fd_recv {}; + +struct trace_event_data_offsets_binder_buffer_class {}; + +struct trace_event_data_offsets_binder_update_page_range {}; + +struct trace_event_data_offsets_binder_lru_page_class {}; + +struct trace_event_data_offsets_binder_command {}; + +struct trace_event_data_offsets_binder_return {}; + +struct binder_write_read { + binder_size_t write_size; + binder_size_t write_consumed; + binder_uintptr_t write_buffer; + binder_size_t read_size; + binder_size_t read_consumed; + binder_uintptr_t read_buffer; +}; + +enum { + BINDER_DEBUG_USER_ERROR___2 = 1, + BINDER_DEBUG_OPEN_CLOSE___2 = 2, + BINDER_DEBUG_BUFFER_ALLOC = 4, + BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 8, +}; + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE = 2, + NVMEM_CELL_ADD = 3, + NVMEM_CELL_REMOVE = 4, + NVMEM_LAYOUT_ADD = 5, + NVMEM_LAYOUT_REMOVE = 6, +}; + +struct nvmem_device { + struct module *owner; + struct device dev; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + struct nvmem_layout *layout; + void *priv; +}; + +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + +struct nvmem_cell_entry { + const char *name; + int offset; + size_t raw_len; + int bytes; + int bit_offset; + int nbits; + nvmem_cell_post_process_t read_post_process; + void *priv; + struct device_node *np; + struct nvmem_device *nvmem; + struct list_head node; +}; + +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +struct nvmem_cell { + struct nvmem_cell_entry *entry; + const char *id; + int index; +}; + +struct tee_desc; + +struct tee_shm_pool; + +struct tee_device { + char name[32]; + const struct tee_desc *desc; + int id; + unsigned int flags; + struct device dev; + struct cdev cdev; + size_t num_users; + struct completion c_no_users; + struct mutex mutex; + struct idr idr; + struct tee_shm_pool *pool; +}; + +struct tee_driver_ops; + +struct tee_desc { + const char *name; + const struct tee_driver_ops *ops; + struct module *owner; + u32 flags; +}; + +struct tee_ioctl_version_data; + +struct tee_context; + +struct tee_ioctl_open_session_arg; + +struct tee_param; + +struct tee_ioctl_invoke_arg; + +struct tee_shm; + +struct tee_driver_ops { + void (*get_version)(struct tee_device *, struct tee_ioctl_version_data *); + int (*open)(struct tee_context *); + void (*release)(struct tee_context *); + int (*open_session)(struct tee_context *, struct tee_ioctl_open_session_arg *, + struct tee_param *); + int (*close_session)(struct tee_context *, u32); + int (*invoke_func)(struct tee_context *, struct tee_ioctl_invoke_arg *, + struct tee_param *); + int (*cancel_req)(struct tee_context *, u32, u32); + int (*supp_recv)(struct tee_context *, u32 *, u32 *, struct tee_param *); + int (*supp_send)(struct tee_context *, u32, u32, struct tee_param *); + int (*shm_register)(struct tee_context *, struct tee_shm *, struct page **, + size_t, unsigned long); + int (*shm_unregister)(struct tee_context *, struct tee_shm *); +}; + +struct tee_ioctl_version_data { + __u32 impl_id; + __u32 impl_caps; + __u32 gen_caps; +}; + +struct tee_context { + struct tee_device *teedev; + void *data; + struct kref refcount; + bool releasing; + bool supp_nowait; + bool cap_memref_null; +}; + +struct tee_ioctl_param { + __u64 attr; + __u64 a; + __u64 b; + __u64 c; +}; + +struct tee_ioctl_open_session_arg { + __u8 uuid[16]; + __u8 clnt_uuid[16]; + __u32 clnt_login; + __u32 cancel_id; + __u32 session; + __u32 ret; + __u32 ret_origin; + __u32 num_params; + struct tee_ioctl_param params[0]; +}; + +struct tee_param_memref { + size_t shm_offs; + size_t size; + struct tee_shm *shm; +}; + +struct tee_param_value { + u64 a; + u64 b; + u64 c; +}; + +struct tee_param { + u64 attr; + union { + struct tee_param_memref memref; + struct tee_param_value value; + } u; +}; + +struct tee_shm { + struct tee_context *ctx; + phys_addr_t paddr; + void *kaddr; + size_t size; + unsigned int offset; + struct page **pages; + size_t num_pages; + refcount_t refcount; + u32 flags; + int id; + u64 sec_world_id; +}; + +struct tee_ioctl_invoke_arg { + __u32 func; + __u32 session; + __u32 cancel_id; + __u32 ret; + __u32 ret_origin; + __u32 num_params; + struct tee_ioctl_param params[0]; +}; + +struct tee_shm_pool_ops; + +struct tee_shm_pool { + const struct tee_shm_pool_ops *ops; + void *private_data; +}; + +struct tee_shm_pool_ops { + int (*alloc)(struct tee_shm_pool *, struct tee_shm *, size_t, size_t); + void (*free)(struct tee_shm_pool *, struct tee_shm *); + void (*destroy_pool)(struct tee_shm_pool *); +}; + +struct tee_client_device_id; + +struct tee_client_driver { + const struct tee_client_device_id *id_table; + struct device_driver driver; +}; + +struct tee_client_device_id { + uuid_t uuid; +}; + +struct tee_client_device { + struct tee_client_device_id id; + struct device dev; +}; + +struct tee_ioctl_shm_register_data { + __u64 addr; + __u64 length; + __u32 flags; + __s32 id; +}; + +struct tee_ioctl_cancel_arg { + __u32 cancel_id; + __u32 session; +}; + +struct tee_ioctl_buf_data { + __u64 buf_ptr; + __u64 buf_len; +}; + +struct tee_iocl_supp_recv_arg { + __u32 func; + __u32 num_params; + struct tee_ioctl_param params[0]; +}; + +struct tee_ioctl_close_session_arg { + __u32 session; +}; + +struct tee_iocl_supp_send_arg { + __u32 ret; + __u32 num_params; + struct tee_ioctl_param params[0]; +}; + +struct tee_ioctl_shm_alloc_data { + __u64 size; + __u32 flags; + __s32 id; +}; + +struct match_dev_data { + struct tee_ioctl_version_data *vers; + const void *data; + int (*match)(struct tee_ioctl_version_data *, const void *); +}; + +struct amdtee; + +struct amdtee_driver_data { + struct amdtee *amdtee; +}; + +struct amdtee { + struct tee_device *teedev; + struct tee_shm_pool *pool; +}; + +struct amdtee_shm_data { + struct list_head shm_node; + void *kaddr; + u32 buf_id; +}; + +struct amdtee_session { + struct list_head list_node; + u32 ta_handle; + struct kref refcount; + u32 session_info[32]; + unsigned long sess_mask[1]; + spinlock_t lock; +}; + +struct amdtee_context_data { + struct list_head sess_list; + struct list_head shm_list; + struct mutex shm_mutex; +}; + +struct shmem_desc { + void *kaddr; + u64 size; +}; + +struct amdtee_ta_data { + struct list_head list_node; + u32 ta_handle; + u32 refcount; +}; + +struct tee_cmd_unload_ta { + u32 ta_handle; +}; + +struct tee_cmd_close_session { + u32 ta_handle; + u32 session_info; +}; + +struct tee_cmd_unmap_shared_mem { + u32 buf_id; +}; + +struct memref { + u32 buf_id; + u32 offset; + u32 size; +}; + +struct value { + u32 a; + u32 b; +}; + +union tee_op_param { + struct memref mref; + struct value val; +}; + +struct tee_operation { + u32 param_types; + union tee_op_param params[4]; +}; + +struct tee_cmd_invoke_cmd { + u32 ta_handle; + u32 cmd_id; + u32 session_info; + struct tee_operation op; + u32 return_origin; +}; + +struct tee_sg_desc { + u32 low_addr; + u32 hi_addr; + u32 size; +}; + +struct tee_sg_list { + u32 count; + u32 size; + struct tee_sg_desc buf[64]; +}; + +struct tee_cmd_map_shared_mem { + u32 buf_id; + struct tee_sg_list sg_list; +}; + +struct tee_cmd_open_session { + u32 ta_handle; + u32 session_info; + struct tee_operation op; + u32 return_origin; +}; + +struct tee_cmd_load_ta { + u32 low_addr; + u32 hi_addr; + u32 size; + u32 ta_handle; + u32 return_origin; +}; + +struct net_device_devres { + struct net_device *ndev; +}; + +struct net_proto_family { + int family; + int (*create)(struct net *, struct socket *, int, int); + struct module *owner; +}; + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = 1, + SOF_TIMESTAMPING_TX_SOFTWARE = 2, + SOF_TIMESTAMPING_RX_HARDWARE = 4, + SOF_TIMESTAMPING_RX_SOFTWARE = 8, + SOF_TIMESTAMPING_SOFTWARE = 16, + SOF_TIMESTAMPING_SYS_HARDWARE = 32, + SOF_TIMESTAMPING_RAW_HARDWARE = 64, + SOF_TIMESTAMPING_OPT_ID = 128, + SOF_TIMESTAMPING_TX_SCHED = 256, + SOF_TIMESTAMPING_TX_ACK = 512, + SOF_TIMESTAMPING_OPT_CMSG = 1024, + SOF_TIMESTAMPING_OPT_TSONLY = 2048, + SOF_TIMESTAMPING_OPT_STATS = 4096, + SOF_TIMESTAMPING_OPT_PKTINFO = 8192, + SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, + SOF_TIMESTAMPING_BIND_PHC = 32768, + SOF_TIMESTAMPING_OPT_ID_TCP = 65536, + SOF_TIMESTAMPING_LAST = 65536, + SOF_TIMESTAMPING_MASK = 131071, +}; + +enum { + SOCK_WAKE_IO = 0, + SOCK_WAKE_WAITD = 1, + SOCK_WAKE_SPACE = 2, + SOCK_WAKE_URG = 3, +}; + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct ip_options { + __be32 faddr; + __be32 nexthop; + unsigned char optlen; + unsigned char srr; + unsigned char rr; + unsigned char ts; + unsigned char is_strictroute : 1; + unsigned char srr_is_hit : 1; + unsigned char is_changed : 1; + unsigned char rr_needaddr : 1; + unsigned char ts_needtime : 1; + unsigned char ts_needaddr : 1; + unsigned char router_alert; + unsigned char cipso; + unsigned char __pad2; + unsigned char __data[0]; +}; + +struct inet_skb_parm { + int iif; + struct ip_options opt; + u16 flags; + u16 frag_max_size; +}; + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; + __u16 frag_max_size; + __u16 srhoff; +}; + +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; +}; + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats : 1; + u8 unused : 7; +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + u64 transmit_time; + u32 mark; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + +struct ipv6_pinfo; + +struct ip_options_rcu; + +struct ip_mc_socklist; + +struct inet_sock { + struct sock sk; + struct ipv6_pinfo *pinet6; + unsigned long inet_flags; + __be32 inet_saddr; + __s16 uc_ttl; + __be16 inet_sport; + struct ip_options_rcu __attribute__((btf_type_tag("rcu"))) * inet_opt; + atomic_t inet_id; + __u8 tos; + __u8 min_ttl; + __u8 mc_ttl; + __u8 pmtudisc; + __u8 rcv_tos; + __u8 convert_csum; + int uc_index; + int mc_index; + __be32 mc_addr; + struct { + __u16 lo; + __u16 hi; + } local_port_range; + struct ip_mc_socklist __attribute__((btf_type_tag("rcu"))) * mc_list; + struct inet_cork_full cork; +}; + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct ipv6_txoptions; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +struct ipv6_mc_socklist; + +struct ipv6_ac_socklist; + +struct ipv6_fl_socklist; + +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; + const struct in6_addr *saddr_cache; + __be32 flow_label; + __u32 frag_size; + s16 hop_limit; + u8 mcast_hops; + int ucast_oif; + int mcast_oif; + union { + struct { + __u16 srcrt : 1; + __u16 osrcrt : 1; + __u16 rxinfo : 1; + __u16 rxoinfo : 1; + __u16 rxhlim : 1; + __u16 rxohlim : 1; + __u16 hopopts : 1; + __u16 ohopopts : 1; + __u16 dstopts : 1; + __u16 odstopts : 1; + __u16 rxflow : 1; + __u16 rxtclass : 1; + __u16 rxpmtu : 1; + __u16 rxorigdstaddr : 1; + __u16 recvfragsize : 1; + } bits; + __u16 all; + } rxopt; + __u8 srcprefs; + __u8 pmtudisc; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + __u32 dst_cookie; + struct ipv6_mc_socklist __attribute__((btf_type_tag("rcu"))) * ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist __attribute__((btf_type_tag("rcu"))) * ipv6_fl_list; + struct ipv6_txoptions __attribute__((btf_type_tag("rcu"))) * opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +struct ip6_sf_socklist; + +struct ipv6_mc_socklist { + struct in6_addr addr; + int ifindex; + unsigned int sfmode; + struct ipv6_mc_socklist __attribute__((btf_type_tag("rcu"))) * next; + struct ip6_sf_socklist __attribute__((btf_type_tag("rcu"))) * sflist; + struct callback_head rcu; +}; + +struct ip6_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + struct in6_addr sl_addr[0]; +}; + +struct ipv6_ac_socklist { + struct in6_addr acl_addr; + int acl_ifindex; + struct ipv6_ac_socklist *acl_next; +}; + +struct ip6_flowlabel; + +struct ipv6_fl_socklist { + struct ipv6_fl_socklist __attribute__((btf_type_tag("rcu"))) * next; + struct ip6_flowlabel *fl; + struct callback_head rcu; +}; + +struct ip6_flowlabel { + struct ip6_flowlabel __attribute__((btf_type_tag("rcu"))) * next; + __be32 label; + atomic_t users; + struct in6_addr dst; + struct ipv6_txoptions *opt; + unsigned long linger; + struct callback_head rcu; + u8 share; + union { + struct pid *pid; + kuid_t uid; + } owner; + unsigned long lastuse; + unsigned long expires; + struct net *fl_net; +}; + +struct ipv6_opt_hdr; + +struct ipv6_rt_hdr; + +struct ipv6_txoptions { + refcount_t refcnt; + int tot_len; + __u16 opt_flen; + __u16 opt_nflen; + struct ipv6_opt_hdr *hopopt; + struct ipv6_opt_hdr *dst0opt; + struct ipv6_rt_hdr *srcrt; + struct ipv6_opt_hdr *dst1opt; + struct callback_head rcu; +}; + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; +}; + +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +}; + +struct ip_options_rcu { + struct callback_head rcu; + struct ip_options opt; +}; + +struct in_addr { + __be32 s_addr; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct ip_sf_socklist; + +struct ip_mc_socklist { + struct ip_mc_socklist __attribute__((btf_type_tag("rcu"))) * next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; + struct ip_sf_socklist __attribute__((btf_type_tag("rcu"))) * sflist; + struct callback_head rcu; +}; + +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct sock_skb_cb { + u32 dropcount; +}; + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long tv_nsec; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +struct ifconf { + int ifc_len; + union { + char __attribute__((btf_type_tag("user"))) * ifcu_buf; + struct ifreq __attribute__((btf_type_tag("user"))) * ifcu_req; + } ifc_ifcu; +}; + +struct used_address { + struct __kernel_sockaddr_storage name; + unsigned int name_len; +}; + +enum { + INET_FLAGS_PKTINFO = 0, + INET_FLAGS_TTL = 1, + INET_FLAGS_TOS = 2, + INET_FLAGS_RECVOPTS = 3, + INET_FLAGS_RETOPTS = 4, + INET_FLAGS_PASSSEC = 5, + INET_FLAGS_ORIGDSTADDR = 6, + INET_FLAGS_CHECKSUM = 7, + INET_FLAGS_RECVFRAGSIZE = 8, + INET_FLAGS_RECVERR = 9, + INET_FLAGS_RECVERR_RFC4884 = 10, + INET_FLAGS_FREEBIND = 11, + INET_FLAGS_HDRINCL = 12, + INET_FLAGS_MC_LOOP = 13, + INET_FLAGS_MC_ALL = 14, + INET_FLAGS_TRANSPARENT = 15, + INET_FLAGS_IS_ICSK = 16, + INET_FLAGS_NODEFRAG = 17, + INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, + INET_FLAGS_DEFER_CONNECT = 19, + INET_FLAGS_MC6_LOOP = 20, + INET_FLAGS_RECVERR6_RFC4884 = 21, + INET_FLAGS_MC6_ALL = 22, + INET_FLAGS_AUTOFLOWLABEL_SET = 23, + INET_FLAGS_AUTOFLOWLABEL = 24, + INET_FLAGS_DONTFRAG = 25, + INET_FLAGS_RECVERR6 = 26, + INET_FLAGS_REPFLOW = 27, + INET_FLAGS_RTALERT_ISOLATE = 28, + INET_FLAGS_SNDFLOW = 29, +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = 1, + SOF_TXTIME_REPORT_ERRORS = 2, + SOF_TXTIME_FLAGS_LAST = 2, + SOF_TXTIME_FLAGS_MASK = 3, +}; + +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT = 2, + TCP_SYN_RECV = 3, + TCP_FIN_WAIT1 = 4, + TCP_FIN_WAIT2 = 5, + TCP_TIME_WAIT = 6, + TCP_CLOSE = 7, + TCP_CLOSE_WAIT = 8, + TCP_LAST_ACK = 9, + TCP_LISTEN = 10, + TCP_CLOSING = 11, + TCP_NEW_SYN_RECV = 12, + TCP_MAX_STATES = 13, +}; + +enum { + SK_MEMINFO_RMEM_ALLOC = 0, + SK_MEMINFO_RCVBUF = 1, + SK_MEMINFO_WMEM_ALLOC = 2, + SK_MEMINFO_SNDBUF = 3, + SK_MEMINFO_FWD_ALLOC = 4, + SK_MEMINFO_WMEM_QUEUED = 5, + SK_MEMINFO_OPTMEM = 6, + SK_MEMINFO_BACKLOG = 7, + SK_MEMINFO_DROPS = 8, + SK_MEMINFO_VARS = 9, +}; + +enum sknetlink_groups { + SKNLGRP_NONE = 0, + SKNLGRP_INET_TCP_DESTROY = 1, + SKNLGRP_INET_UDP_DESTROY = 2, + SKNLGRP_INET6_TCP_DESTROY = 3, + SKNLGRP_INET6_UDP_DESTROY = 4, + __SKNLGRP_MAX = 5, +}; + +struct fastopen_queue { + struct request_sock *rskq_rst_head; + struct request_sock *rskq_rst_tail; + spinlock_t lock; + int qlen; + int max_qlen; + struct tcp_fastopen_context __attribute__((btf_type_tag("rcu"))) * ctx; +}; + +struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + struct fastopen_queue fastopenq; +}; + +struct inet_bind_bucket; + +struct inet_bind2_bucket; + +struct inet_connection_sock_af_ops; + +struct tcp_ulp_ops; + +struct inet_connection_sock { + struct inet_sock icsk_inet; + struct request_sock_queue icsk_accept_queue; + struct inet_bind_bucket *icsk_bind_hash; + struct inet_bind2_bucket *icsk_bind2_hash; + unsigned long icsk_timeout; + struct timer_list icsk_retransmit_timer; + struct timer_list icsk_delack_timer; + __u32 icsk_rto; + __u32 icsk_rto_min; + __u32 icsk_delack_max; + __u32 icsk_pmtu_cookie; + const struct tcp_congestion_ops *icsk_ca_ops; + const struct inet_connection_sock_af_ops *icsk_af_ops; + const struct tcp_ulp_ops *icsk_ulp_ops; + void __attribute__((btf_type_tag("rcu"))) * icsk_ulp_data; + void (*icsk_clean_acked)(struct sock *, u32); + unsigned int (*icsk_sync_mss)(struct sock *, u32); + __u8 icsk_ca_state : 5; + __u8 icsk_ca_initialized : 1; + __u8 icsk_ca_setsockopt : 1; + __u8 icsk_ca_dst_locked : 1; + __u8 icsk_retransmits; + __u8 icsk_pending; + __u8 icsk_backoff; + __u8 icsk_syn_retries; + __u8 icsk_probes_out; + __u16 icsk_ext_hdr_len; + struct { + __u8 pending; + __u8 quick; + __u8 pingpong; + __u8 retry; + __u32 ato : 8; + __u32 lrcv_flowlabel : 20; + __u32 unused : 4; + unsigned long timeout; + __u32 lrcvtime; + __u16 last_seg_size; + __u16 rcv_mss; + } icsk_ack; + struct { + int search_high; + int search_low; + u32 probe_size : 31; + u32 enabled : 1; + u32 probe_timestamp; + } icsk_mtup; + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + u64 icsk_ca_priv[18]; +}; + +struct tcp_rack { + u64 mstamp; + u32 rtt_us; + u32 end_seq; + u32 last_delivered; + u8 reo_wnd_steps; + u8 reo_wnd_persist : 5; + u8 dsack_seen : 1; + u8 advanced : 1; +}; + +struct minmax_sample { + u32 t; + u32 v; +}; + +struct minmax { + struct minmax_sample s[3]; +}; + +struct tcp_options_received { + int ts_recent_stamp; + u32 ts_recent; + u32 rcv_tsval; + u32 rcv_tsecr; + u16 saw_tstamp : 1; + u16 tstamp_ok : 1; + u16 dsack : 1; + u16 wscale_ok : 1; + u16 sack_ok : 3; + u16 smc_ok : 1; + u16 snd_wscale : 4; + u16 rcv_wscale : 4; + u8 saw_unknown : 1; + u8 unused : 7; + u8 num_sacks; + u16 user_mss; + u16 mss_clamp; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +struct tcp_sock_af_ops; + +struct tcp_md5sig_info; + +struct tcp_fastopen_request; + +struct tcp_sock { + struct inet_connection_sock inet_conn; + u16 tcp_header_len; + u16 gso_segs; + __be32 pred_flags; + u64 bytes_received; + u32 segs_in; + u32 data_segs_in; + u32 rcv_nxt; + u32 copied_seq; + u32 rcv_wup; + u32 snd_nxt; + u32 segs_out; + u32 data_segs_out; + u64 bytes_sent; + u64 bytes_acked; + u32 dsack_dups; + u32 snd_una; + u32 snd_sml; + u32 rcv_tstamp; + u32 lsndtime; + u32 last_oow_ack_time; + u32 compressed_ack_rcv_nxt; + u32 tsoffset; + struct list_head tsq_node; + struct list_head tsorted_sent_queue; + u32 snd_wl1; + u32 snd_wnd; + u32 max_window; + u32 mss_cache; + u32 window_clamp; + u32 rcv_ssthresh; + u8 scaling_ratio; + struct tcp_rack rack; + u16 advmss; + u8 compressed_ack; + u8 dup_ack_counter : 2; + u8 tlp_retrans : 1; + u8 tcp_usec_ts : 1; + u8 fast_ack_mode : 2; + u8 tlp_orig_data_app_limited : 1; + u8 unused : 1; + u32 chrono_start; + u32 chrono_stat[3]; + u8 chrono_type : 2; + u8 rate_app_limited : 1; + u8 fastopen_connect : 1; + u8 fastopen_no_cookie : 1; + u8 is_sack_reneg : 1; + u8 fastopen_client_fail : 2; + u8 nonagle : 4; + u8 thin_lto : 1; + u8 recvmsg_inq : 1; + u8 repair : 1; + u8 frto : 1; + u8 repair_queue; + u8 save_syn : 2; + u8 syn_data : 1; + u8 syn_fastopen : 1; + u8 syn_fastopen_exp : 1; + u8 syn_fastopen_ch : 1; + u8 syn_data_acked : 1; + u8 is_cwnd_limited : 1; + u32 tlp_high_seq; + u32 tcp_tx_delay; + u64 tcp_wstamp_ns; + u64 tcp_clock_cache; + u64 tcp_mstamp; + u32 srtt_us; + u32 mdev_us; + u32 mdev_max_us; + u32 rttvar_us; + u32 rtt_seq; + struct minmax rtt_min; + u32 packets_out; + u32 retrans_out; + u32 max_packets_out; + u32 cwnd_usage_seq; + u16 urg_data; + u8 ecn_flags; + u8 keepalive_probes; + u32 reordering; + u32 reord_seen; + u32 snd_up; + struct tcp_options_received rx_opt; + u32 snd_ssthresh; + u32 snd_cwnd; + u32 snd_cwnd_cnt; + u32 snd_cwnd_clamp; + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; + u32 prr_delivered; + u32 prr_out; + u32 delivered; + u32 delivered_ce; + u32 lost; + u32 app_limited; + u64 first_tx_mstamp; + u64 delivered_mstamp; + u32 rate_delivered; + u32 rate_interval_us; + u32 rcv_wnd; + u32 write_seq; + u32 notsent_lowat; + u32 pushed_seq; + u32 lost_out; + u32 sacked_out; + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + struct rb_root out_of_order_queue; + struct sk_buff *ooo_last_skb; + struct tcp_sack_block duplicate_sack[1]; + struct tcp_sack_block selective_acks[4]; + struct tcp_sack_block recv_sack_cache[4]; + struct sk_buff *highest_sack; + int lost_cnt_hint; + u32 prior_ssthresh; + u32 high_seq; + u32 retrans_stamp; + u32 undo_marker; + int undo_retrans; + u64 bytes_retrans; + u32 total_retrans; + u32 rto_stamp; + u16 total_rto; + u16 total_rto_recoveries; + u32 total_rto_time; + u32 urg_seq; + unsigned int keepalive_time; + unsigned int keepalive_intvl; + int linger2; + u8 bpf_sock_ops_cb_flags; + u8 bpf_chg_cc_inprogress : 1; + u16 timeout_rehash; + u32 rcv_ooopack; + u32 rcv_rtt_last_tsecr; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 plb_rehash; + u32 mtu_info; + bool is_mptcp; + const struct tcp_sock_af_ops *af_specific; + struct tcp_md5sig_info __attribute__((btf_type_tag("rcu"))) * md5sig_info; + struct tcp_fastopen_request *fastopen_req; + struct request_sock __attribute__((btf_type_tag("rcu"))) * fastopen_rsk; + struct saved_syn *saved_syn; +}; + +struct inet_bind_bucket { + possible_net_t ib_net; + int l3mdev; + unsigned short port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct in6_addr fast_v6_rcv_saddr; + __be32 fast_rcv_saddr; + unsigned short fast_sk_family; + bool fast_ipv6_only; + struct hlist_node node; + struct hlist_head owners; +}; + +struct inet_bind2_bucket { + possible_net_t ib_net; + int l3mdev; + unsigned short port; + unsigned short family; + union { + struct in6_addr v6_rcv_saddr; + __be32 rcv_saddr; + }; + struct hlist_node node; + struct hlist_head owners; + struct hlist_head deathrow; +}; + +struct inet_connection_sock_af_ops { + int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); + void (*send_check)(struct sock *, struct sk_buff *); + int (*rebuild_header)(struct sock *); + void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); + int (*conn_request)(struct sock *, struct sk_buff *); + struct sock *(*syn_recv_sock)(const struct sock *, struct sk_buff *, + struct request_sock *, struct dst_entry *, + struct request_sock *, bool *); + u16 net_header_len; + u16 sockaddr_len; + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, + char __attribute__((btf_type_tag("user"))) *, + int __attribute__((btf_type_tag("user"))) *); + void (*addr2sockaddr)(struct sock *, struct sockaddr *); + void (*mtu_reduced)(struct sock *); +}; + +struct tcp_ulp_ops { + struct list_head list; + int (*init)(struct sock *); + void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); + void (*release)(struct sock *); + int (*get_info)(const struct sock *, struct sk_buff *); + size_t (*get_info_size)(const struct sock *); + void (*clone)(const struct request_sock *, struct sock *, const gfp_t); + char name[16]; + struct module *owner; +}; + +struct tcp_md5sig_key; + +struct tcp_sock_af_ops { + struct tcp_md5sig_key *(*md5_lookup)(const struct sock *, const struct sock *); + int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, + const struct sk_buff *); + int (*md5_parse)(struct sock *, int, sockptr_t, int); +}; + +union tcp_ao_addr { + struct in_addr a4; + struct in6_addr a6; +}; + +struct tcp_md5sig_key { + struct hlist_node node; + u8 keylen; + u8 family; + u8 prefixlen; + u8 flags; + union tcp_ao_addr addr; + int l3index; + u8 key[80]; + struct callback_head rcu; +}; + +struct tcp_md5sig_info { + struct hlist_head head; + struct callback_head rcu; +}; + +struct tcp_fastopen_cookie { + __le64 val[2]; + s8 len; + bool exp; +}; + +struct tcp_fastopen_request { + struct tcp_fastopen_cookie cookie; + struct msghdr *data; + size_t size; + int copied; + struct ubuf_info *uarg; +}; + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct net_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, u32); + unsigned int no_policy : 1; + unsigned int icmp_strict_tag_validation : 1; +}; + +struct udp_sock { + struct inet_sock inet; + unsigned long udp_flags; + int pending; + __u8 encap_type; + __u16 len; + __u16 gso_size; + __u16 pcslen; + __u16 pcrlen; + int (*encap_rcv)(struct sock *, struct sk_buff *); + void (*encap_err_rcv)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*encap_err_lookup)(struct sock *, struct sk_buff *); + void (*encap_destroy)(struct sock *); + struct sk_buff *(*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sock *, struct sk_buff *, int); + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct sk_buff_head reader_queue; + int forward_deficit; + int forward_threshold; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; +}; + +struct linger { + int l_onoff; + int l_linger; +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; + +struct so_timestamping { + int flags; + int bind_phc; +}; + +typedef unsigned short mifi_t; + +struct sioc_mif_req6 { + mifi_t mifi; + unsigned long icount; + unsigned long ocount; + unsigned long ibytes; + unsigned long obytes; +}; + +struct sockaddr_in6 { + unsigned short sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + +struct sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + unsigned long pktcnt; + unsigned long bytecnt; + unsigned long wrong_if; +}; + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u32 tsflags; +}; + +struct inet_request_sock { + struct request_sock req; + u16 snd_wscale : 4; + u16 rcv_wscale : 4; + u16 tstamp_ok : 1; + u16 sack_ok : 1; + u16 wscale_ok : 1; + u16 ecn_ok : 1; + u16 acked : 1; + u16 no_srccheck : 1; + u16 smc_ok : 1; + u32 ir_mark; + union { + struct ip_options_rcu __attribute__((btf_type_tag("rcu"))) * ireq_opt; + struct { + struct ipv6_txoptions *ipv6_opt; + struct sk_buff *pktopts; + }; + }; +}; + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + u64 snt_synack; + bool tfo_listener; + bool is_mptcp; + bool req_usec_ts; + bool drop_req; + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; + u32 rcv_nxt; + u8 syn_tos; +}; + +enum tcp_synack_type { + TCP_SYNACK_NORMAL = 0, + TCP_SYNACK_FASTOPEN = 1, + TCP_SYNACK_COOKIE = 2, +}; + +struct tcp_request_sock_ops { + u16 mss_clamp; + struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *, const struct sock *); + int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, + const struct sk_buff *); + __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); + struct dst_entry *(*route_req)(const struct sock *, struct sk_buff *, + struct flowi *, struct request_sock *); + u32 (*init_seq)(const struct sk_buff *); + u32 (*init_ts_off)(const struct net *, const struct sk_buff *); + int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, + struct request_sock *, struct tcp_fastopen_cookie *, + enum tcp_synack_type, struct sk_buff *); +}; + +struct drop_reason_list { + const char *const *reasons; + size_t n_reasons; +}; + +struct skb_checksum_ops { + __wsum (*update)(const void *, int, __wsum); + __wsum (*combine)(__wsum, __wsum, int, int); +}; + +struct page_frag_1k { + void *va; + u16 offset; + bool pfmemalloc; +}; + +struct napi_alloc_cache { + struct page_frag_cache page; + struct page_frag_1k page_small; + unsigned int skb_count; + void *skb_cache[64]; +}; + +enum skb_drop_reason_subsys { + SKB_DROP_REASON_SUBSYS_CORE = 0, + SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE = 1, + SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR = 2, + SKB_DROP_REASON_SUBSYS_OPENVSWITCH = 3, + SKB_DROP_REASON_SUBSYS_NUM = 4, +}; + +enum { + SKB_FCLONE_UNAVAILABLE = 0, + SKB_FCLONE_ORIG = 1, + SKB_FCLONE_CLONE = 2, +}; + +enum { + SKB_GSO_TCPV4 = 1, + SKB_GSO_DODGY = 2, + SKB_GSO_TCP_ECN = 4, + SKB_GSO_TCP_FIXEDID = 8, + SKB_GSO_TCPV6 = 16, + SKB_GSO_FCOE = 32, + SKB_GSO_GRE = 64, + SKB_GSO_GRE_CSUM = 128, + SKB_GSO_IPXIP4 = 256, + SKB_GSO_IPXIP6 = 512, + SKB_GSO_UDP_TUNNEL = 1024, + SKB_GSO_UDP_TUNNEL_CSUM = 2048, + SKB_GSO_PARTIAL = 4096, + SKB_GSO_TUNNEL_REMCSUM = 8192, + SKB_GSO_SCTP = 16384, + SKB_GSO_ESP = 32768, + SKB_GSO_UDP = 65536, + SKB_GSO_UDP_L4 = 131072, + SKB_GSO_FRAGLIST = 262144, +}; + +enum { + SCM_TSTAMP_SND = 0, + SCM_TSTAMP_SCHED = 1, + SCM_TSTAMP_ACK = 2, +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_MPTCP = 1, + SKB_EXT_NUM = 2, +}; + +struct sk_buff_fclones { + struct sk_buff skb1; + struct sk_buff skb2; + refcount_t fclone_ref; +}; + +struct mmpin { + struct user_struct *user; + unsigned int num_pg; +}; + +struct ubuf_info_msgzc { + struct ubuf_info ubuf; + union { + struct { + unsigned long desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy : 1; + u32 bytelen; + }; + }; + struct mmpin mmp; +}; + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; + __u32 frag_off; +}; + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct sd_flow_limit; + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + unsigned int processed; + unsigned int time_squeeze; + struct softnet_data *rps_ipi_list; + bool in_net_rx_action; + bool in_napi_threaded_poll; + struct sd_flow_limit __attribute__((btf_type_tag("rcu"))) * flow_limit; + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + struct { + u16 recursion; + u8 more; + u8 skip_txqueue; + } xmit; + long : 64; + long : 64; + long : 64; + unsigned int input_queue_head; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + call_single_data_t csd; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + unsigned int received_rps; + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + spinlock_t defer_lock; + int defer_count; + int defer_ipi_scheduled; + struct sk_buff *defer_list; + long : 64; + call_single_data_t defer_csd; +}; + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[128]; + u8 buckets[0]; +}; + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __u16 res1 : 4; + __u16 doff : 4; + __u16 fin : 1; + __u16 syn : 1; + __u16 rst : 1; + __u16 psh : 1; + __u16 ack : 1; + __u16 urg : 1; + __u16 ece : 1; + __u16 cwr : 1; + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; + __be16 reserved; + __be32 spi; + __be32 seq_no; + __u8 auth_data[0]; +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +struct ipv6hdr { + __u8 priority : 4; + __u8 version : 4; + __u8 flow_lbl[3]; + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + union { + struct { + struct in6_addr saddr; + struct in6_addr daddr; + }; + struct { + struct in6_addr saddr; + struct in6_addr daddr; + } addrs; + }; +}; + +struct vlan_ethhdr { + union { + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + }; + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + } addrs; + }; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +struct nf_conntrack { + refcount_t use; +}; + +struct skb_free_array { + unsigned int skb_count; + void *skb_array[16]; +}; + +struct ts_ops; + +struct ts_state; + +struct ts_config { + struct ts_ops *ops; + int flags; + unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, + struct ts_state *); + void (*finish)(struct ts_config *, struct ts_state *); +}; + +struct ts_ops { + const char *name; + struct ts_config *(*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, struct ts_state *); + void (*destroy)(struct ts_config *); + void *(*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +struct ts_state { + unsigned int offset; + char cb[48]; +}; + +typedef int (*sendmsg_func)(struct sock *, struct msghdr *); + +struct csum_state { + __wsum csum; + size_t off; +}; + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; +}; + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; +}; + +struct scm_timestamping { + struct __kernel_old_timespec ts[3]; +}; + +enum { + TCA_STATS_UNSPEC = 0, + TCA_STATS_BASIC = 1, + TCA_STATS_RATE_EST = 2, + TCA_STATS_QUEUE = 3, + TCA_STATS_APP = 4, + TCA_STATS_RATE_EST64 = 5, + TCA_STATS_PAD = 6, + TCA_STATS_BASIC_HW = 7, + TCA_STATS_PKT64 = 8, + __TCA_STATS_MAX = 9, +}; + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; +}; + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + +struct pcpu_gen_cookie; + +struct gen_cookie { + struct pcpu_gen_cookie __attribute__((btf_type_tag("percpu"))) * local; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + atomic64_t forward_last; + atomic64_t reverse_last; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct pcpu_gen_cookie { + local_t nesting; + u64 last; +}; + +enum { + RTM_BASE = 16, + RTM_NEWLINK = 16, + RTM_DELLINK = 17, + RTM_GETLINK = 18, + RTM_SETLINK = 19, + RTM_NEWADDR = 20, + RTM_DELADDR = 21, + RTM_GETADDR = 22, + RTM_NEWROUTE = 24, + RTM_DELROUTE = 25, + RTM_GETROUTE = 26, + RTM_NEWNEIGH = 28, + RTM_DELNEIGH = 29, + RTM_GETNEIGH = 30, + RTM_NEWRULE = 32, + RTM_DELRULE = 33, + RTM_GETRULE = 34, + RTM_NEWQDISC = 36, + RTM_DELQDISC = 37, + RTM_GETQDISC = 38, + RTM_NEWTCLASS = 40, + RTM_DELTCLASS = 41, + RTM_GETTCLASS = 42, + RTM_NEWTFILTER = 44, + RTM_DELTFILTER = 45, + RTM_GETTFILTER = 46, + RTM_NEWACTION = 48, + RTM_DELACTION = 49, + RTM_GETACTION = 50, + RTM_NEWPREFIX = 52, + RTM_GETMULTICAST = 58, + RTM_GETANYCAST = 62, + RTM_NEWNEIGHTBL = 64, + RTM_GETNEIGHTBL = 66, + RTM_SETNEIGHTBL = 67, + RTM_NEWNDUSEROPT = 68, + RTM_NEWADDRLABEL = 72, + RTM_DELADDRLABEL = 73, + RTM_GETADDRLABEL = 74, + RTM_GETDCB = 78, + RTM_SETDCB = 79, + RTM_NEWNETCONF = 80, + RTM_DELNETCONF = 81, + RTM_GETNETCONF = 82, + RTM_NEWMDB = 84, + RTM_DELMDB = 85, + RTM_GETMDB = 86, + RTM_NEWNSID = 88, + RTM_DELNSID = 89, + RTM_GETNSID = 90, + RTM_NEWSTATS = 92, + RTM_GETSTATS = 94, + RTM_SETSTATS = 95, + RTM_NEWCACHEREPORT = 96, + RTM_NEWCHAIN = 100, + RTM_DELCHAIN = 101, + RTM_GETCHAIN = 102, + RTM_NEWNEXTHOP = 104, + RTM_DELNEXTHOP = 105, + RTM_GETNEXTHOP = 106, + RTM_NEWLINKPROP = 108, + RTM_DELLINKPROP = 109, + RTM_GETLINKPROP = 110, + RTM_NEWVLAN = 112, + RTM_DELVLAN = 113, + RTM_GETVLAN = 114, + RTM_NEWNEXTHOPBUCKET = 116, + RTM_DELNEXTHOPBUCKET = 117, + RTM_GETNEXTHOPBUCKET = 118, + RTM_NEWTUNNEL = 120, + RTM_DELTUNNEL = 121, + RTM_GETTUNNEL = 122, + __RTM_MAX = 123, +}; + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, + RTNL_FLAG_BULK_DEL_SUPPORTED = 2, +}; + +enum rtnetlink_groups { + RTNLGRP_NONE = 0, + RTNLGRP_LINK = 1, + RTNLGRP_NOTIFY = 2, + RTNLGRP_NEIGH = 3, + RTNLGRP_TC = 4, + RTNLGRP_IPV4_IFADDR = 5, + RTNLGRP_IPV4_MROUTE = 6, + RTNLGRP_IPV4_ROUTE = 7, + RTNLGRP_IPV4_RULE = 8, + RTNLGRP_IPV6_IFADDR = 9, + RTNLGRP_IPV6_MROUTE = 10, + RTNLGRP_IPV6_ROUTE = 11, + RTNLGRP_IPV6_IFINFO = 12, + RTNLGRP_DECnet_IFADDR = 13, + RTNLGRP_NOP2 = 14, + RTNLGRP_DECnet_ROUTE = 15, + RTNLGRP_DECnet_RULE = 16, + RTNLGRP_NOP4 = 17, + RTNLGRP_IPV6_PREFIX = 18, + RTNLGRP_IPV6_RULE = 19, + RTNLGRP_ND_USEROPT = 20, + RTNLGRP_PHONET_IFADDR = 21, + RTNLGRP_PHONET_ROUTE = 22, + RTNLGRP_DCB = 23, + RTNLGRP_IPV4_NETCONF = 24, + RTNLGRP_IPV6_NETCONF = 25, + RTNLGRP_MDB = 26, + RTNLGRP_MPLS_ROUTE = 27, + RTNLGRP_NSID = 28, + RTNLGRP_MPLS_NETCONF = 29, + RTNLGRP_IPV4_MROUTE_R = 30, + RTNLGRP_IPV6_MROUTE_R = 31, + RTNLGRP_NEXTHOP = 32, + RTNLGRP_BRVLAN = 33, + RTNLGRP_MCTP_IFADDR = 34, + RTNLGRP_TUNNEL = 35, + RTNLGRP_STATS = 36, + __RTNLGRP_MAX = 37, +}; + +enum { + NETNSA_NONE = 0, + NETNSA_NSID = 1, + NETNSA_PID = 2, + NETNSA_FD = 3, + NETNSA_TARGET_NSID = 4, + NETNSA_CURRENT_NSID = 5, + __NETNSA_MAX = 6, +}; + +struct net_fill_args { + u32 portid; + u32 seq; + int flags; + int cmd; + int nsid; + bool add_ref; + int ref_nsid; +}; + +struct rtnl_net_dump_cb { + struct net *tgt_net; + struct net *ref_net; + struct sk_buff *skb; + struct net_fill_args fillargs; + int idx; + int s_idx; +}; + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); + +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +}; + +struct pppoe_hdr { + __u8 type : 4; + __u8 ver : 4; + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +}; + +struct flow_dissector { + unsigned long long used_keys; + unsigned short offset[33]; +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, + FLOW_DISSECTOR_KEY_PPPOE = 29, + FLOW_DISSECTOR_KEY_L2TPV3 = 30, + FLOW_DISSECTOR_KEY_CFM = 31, + FLOW_DISSECTOR_KEY_IPSEC = 32, + FLOW_DISSECTOR_KEY_MAX = 33, +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; +}; + +struct nf_conn; + +struct nf_ct_event { + struct nf_conn *ct; + u32 portid; + int report; +}; + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +union nf_conntrack_man_proto { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; + +typedef u16 u_int16_t; + +struct nf_conntrack_man { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + u_int16_t l3num; +}; + +struct nf_conntrack_tuple { + struct nf_conntrack_man src; + struct { + union nf_inet_addr u3; + union { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + u_int8_t type; + u_int8_t code; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; + } u; + u_int8_t protonum; + struct { + } __nfct_hash_offsetend; + u_int8_t dir; + } dst; +}; + +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; + struct nf_conntrack_tuple tuple; +}; + +typedef u32 u_int32_t; + +typedef u64 u_int64_t; + +struct nf_ct_dccp { + u_int8_t role[2]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE = 0, + SCTP_CONNTRACK_CLOSED = 1, + SCTP_CONNTRACK_COOKIE_WAIT = 2, + SCTP_CONNTRACK_COOKIE_ECHOED = 3, + SCTP_CONNTRACK_ESTABLISHED = 4, + SCTP_CONNTRACK_SHUTDOWN_SENT = 5, + SCTP_CONNTRACK_SHUTDOWN_RECD = 6, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, + SCTP_CONNTRACK_HEARTBEAT_SENT = 8, + SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, + SCTP_CONNTRACK_MAX = 10, +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + __be32 vtag[2]; + u8 init[2]; + u8 last_dir; + u8 flags; +}; + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + u_int8_t last_wscale; + u_int8_t last_flags; +}; + +struct nf_ct_udp { + unsigned long stream_ts; +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +union nf_conntrack_proto { + struct nf_ct_dccp dccp; + struct ip_ct_sctp sctp; + struct ip_ct_tcp tcp; + struct nf_ct_udp udp; + struct nf_ct_gre gre; + unsigned int tmpl_padto; +}; + +struct nf_ct_ext; + +struct nf_conn { + struct nf_conntrack ct_general; + spinlock_t lock; + u32 timeout; + struct nf_conntrack_zone zone; + struct nf_conntrack_tuple_hash tuplehash[2]; + unsigned long status; + possible_net_t ct_net; + struct hlist_node nat_bysource; + struct { + } __nfct_init_offset; + struct nf_conn *master; + u_int32_t mark; + u_int32_t secmark; + struct nf_ct_ext *ext; + union nf_conntrack_proto proto; +}; + +struct nf_ct_ext { + u8 offset[7]; + u8 len; + unsigned int gen_id; + long : 0; + char data[0]; +}; + +struct nf_conntrack_expect; + +struct nf_exp_event { + struct nf_conntrack_expect *exp; + u32 portid; + int report; +}; + +struct nf_conntrack_tuple_mask { + struct { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + } src; +}; + +struct nf_conntrack_helper; + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL = 0, + IP_CT_DIR_REPLY = 1, + IP_CT_DIR_MAX = 2, +}; + +struct nf_conntrack_expect { + struct hlist_node lnode; + struct hlist_node hnode; + struct nf_conntrack_tuple tuple; + struct nf_conntrack_tuple_mask mask; + refcount_t use; + unsigned int flags; + unsigned int class; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); + struct nf_conntrack_helper *helper; + struct nf_conn *master; + struct timer_list timeout; + union nf_inet_addr saved_addr; + union nf_conntrack_man_proto saved_proto; + enum ip_conntrack_dir dir; + struct callback_head rcu; +}; + +enum ip_conntrack_info { + IP_CT_ESTABLISHED = 0, + IP_CT_RELATED = 1, + IP_CT_NEW = 2, + IP_CT_IS_REPLY = 3, + IP_CT_ESTABLISHED_REPLY = 3, + IP_CT_RELATED_REPLY = 4, + IP_CT_NUMBER = 5, + IP_CT_UNTRACKED = 7, +}; + +enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, + __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, +}; + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD = 0, + FLOW_DISSECT_RET_OUT_BAD = 1, + FLOW_DISSECT_RET_PROTO_AGAIN = 2, + FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, + FLOW_DISSECT_RET_CONTINUE = 4, +}; + +enum bpf_ret_code { + BPF_OK = 0, + BPF_DROP = 2, + BPF_REDIRECT = 7, + BPF_LWT_REROUTE = 128, + BPF_FLOW_DISSECTOR_CONTINUE = 129, +}; + +enum metadata_type { + METADATA_IP_TUNNEL = 0, + METADATA_HW_PORT_MUX = 1, + METADATA_MACSEC = 2, + METADATA_XFRM = 3, +}; + +enum lwtunnel_encap_types { + LWTUNNEL_ENCAP_NONE = 0, + LWTUNNEL_ENCAP_MPLS = 1, + LWTUNNEL_ENCAP_IP = 2, + LWTUNNEL_ENCAP_ILA = 3, + LWTUNNEL_ENCAP_IP6 = 4, + LWTUNNEL_ENCAP_SEG6 = 5, + LWTUNNEL_ENCAP_BPF = 6, + LWTUNNEL_ENCAP_SEG6_LOCAL = 7, + LWTUNNEL_ENCAP_RPL = 8, + LWTUNNEL_ENCAP_IOAM6 = 9, + LWTUNNEL_ENCAP_XFRM = 10, + __LWTUNNEL_ENCAP_MAX = 11, +}; + +enum batadv_packettype { + BATADV_IV_OGM = 0, + BATADV_BCAST = 1, + BATADV_CODED = 2, + BATADV_ELP = 3, + BATADV_OGM2 = 4, + BATADV_MCAST = 5, + BATADV_UNICAST = 64, + BATADV_UNICAST_FRAG = 65, + BATADV_UNICAST_4ADDR = 66, + BATADV_ICMP = 67, + BATADV_UNICAST_TVLV = 68, +}; + +struct _flow_keys_digest_data { + __be16 n_proto; + u8 ip_proto; + u8 padding; + __be32 ports; + __be32 src; + __be32 dst; +}; + +struct ip_tunnel_key { + __be64 tun_id; + union { + struct { + __be32 src; + __be32 dst; + } ipv4; + struct { + struct in6_addr src; + struct in6_addr dst; + } ipv6; + } u; + __be16 tun_flags; + u8 tos; + u8 ttl; + __be32 label; + u32 nhid; + __be16 tp_src; + __be16 tp_dst; + __u8 flow_flags; +}; + +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + +struct dst_cache_pcpu; + +struct dst_cache { + struct dst_cache_pcpu __attribute__((btf_type_tag("percpu"))) * cache; + unsigned long reset_ts; +}; + +struct ip_tunnel_info { + struct ip_tunnel_key key; + struct ip_tunnel_encap encap; + struct dst_cache dst_cache; + u8 options_len; + u8 mode; +}; + +struct hw_port_info { + struct net_device *lower_dev; + u32 port_id; +}; + +typedef u64 sci_t; + +struct macsec_info { + sci_t sci; +}; + +struct xfrm_md_info { + u32 if_id; + int link; + struct dst_entry *dst_orig; +}; + +struct metadata_dst { + struct dst_entry dst; + enum metadata_type type; + union { + struct ip_tunnel_info tun_info; + struct hw_port_info port_info; + struct macsec_info macsec_info; + struct xfrm_md_info xfrm_info; + } u; +}; + +struct dst_cache_pcpu { + unsigned long refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + +struct mpls_label { + __be32 entry; +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl : 8; + u32 mpls_bos : 1; + u32 mpls_tc : 3; + u32 mpls_label : 20; +}; + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; + __u8 dest[6]; +}; + +struct arphdr { + __be16 ar_hrd; + __be16 ar_pro; + unsigned char ar_hln; + unsigned char ar_pln; + __be16 ar_op; +}; + +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + +struct tipc_basic_hdr { + __be32 w[4]; +}; + +struct flow_dissector_key_cfm { + u8 mdl_ver; + u8 opcode; +}; + +struct flow_dissector_key_ipsec { + __be32 spi; +}; + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + __u8 reserved[4]; + } un; +}; + +struct flow_dissector_key_tcp { + __be16 flags; +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; + +struct ip_esp_hdr { + __be32 spi; + __be32 seq_no; + __u8 enc_data[0]; +}; + +struct flow_dissector_key_l2tpv3 { + __be32 session_id; +}; + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +struct flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +struct flow_dissector_key_tipc { + __be32 key; +}; + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id : 12; + u16 vlan_dei : 1; + u16 vlan_priority : 3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; + __be16 vlan_eth_type; + u16 padding; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_addrs addrs; + long : 0; +}; + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; + u8 l2_miss; +}; + +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +struct nf_conn_labels { + unsigned long bits[2]; +}; + +struct flow_dissector_key_enc_opts { + u8 data[255]; + u8 len; + __be16 dst_opt_type; +}; + +struct flow_dissector_key_hash { + u32 hash; +}; + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; + u8 ver; + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __attribute__((packed)); + +struct hsr_tag { + __be16 path_and_LSDU_size; + __be16 sequence_nr; + __be16 encap_proto; +}; + +struct flow_dissector_key_eth_addrs { + unsigned char dst[6]; + unsigned char src[6]; +}; + +struct flow_dissector_key_num_of_vlans { + u8 num_of_vlans; +}; + +struct flow_dissector_key_pppoe { + __be16 session_id; + __be16 ppp_proto; + __be16 type; +}; + +struct flow_keys_digest { + u8 data[16]; +}; + +struct rps_sock_flow_table { + u32 mask; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + u32 ents[0]; +}; + +enum xps_map_type { + XPS_CPUS = 0, + XPS_RXQS = 1, + XPS_MAPS_MAX = 2, +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED = 0, + __QDISC_STATE_DEACTIVATED = 1, + __QDISC_STATE_MISSED = 2, + __QDISC_STATE_DRAINING = 3, +}; + +enum { + NAPIF_STATE_SCHED = 1, + NAPIF_STATE_MISSED = 2, + NAPIF_STATE_DISABLE = 4, + NAPIF_STATE_NPSVC = 8, + NAPIF_STATE_LISTED = 16, + NAPIF_STATE_NO_BUSY_POLL = 32, + NAPIF_STATE_IN_BUSY_POLL = 64, + NAPIF_STATE_PREFER_BUSY_POLL = 128, + NAPIF_STATE_THREADED = 256, + NAPIF_STATE_SCHED_THREADED = 512, +}; + +enum { + NAPI_STATE_SCHED = 0, + NAPI_STATE_MISSED = 1, + NAPI_STATE_DISABLE = 2, + NAPI_STATE_NPSVC = 3, + NAPI_STATE_LISTED = 4, + NAPI_STATE_NO_BUSY_POLL = 5, + NAPI_STATE_IN_BUSY_POLL = 6, + NAPI_STATE_PREFER_BUSY_POLL = 7, + NAPI_STATE_THREADED = 8, + NAPI_STATE_SCHED_THREADED = 9, +}; + +enum { + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT = 1, + LINUX_MIB_SYNCOOKIESRECV = 2, + LINUX_MIB_SYNCOOKIESFAILED = 3, + LINUX_MIB_EMBRYONICRSTS = 4, + LINUX_MIB_PRUNECALLED = 5, + LINUX_MIB_RCVPRUNED = 6, + LINUX_MIB_OFOPRUNED = 7, + LINUX_MIB_OUTOFWINDOWICMPS = 8, + LINUX_MIB_LOCKDROPPEDICMPS = 9, + LINUX_MIB_ARPFILTER = 10, + LINUX_MIB_TIMEWAITED = 11, + LINUX_MIB_TIMEWAITRECYCLED = 12, + LINUX_MIB_TIMEWAITKILLED = 13, + LINUX_MIB_PAWSACTIVEREJECTED = 14, + LINUX_MIB_PAWSESTABREJECTED = 15, + LINUX_MIB_DELAYEDACKS = 16, + LINUX_MIB_DELAYEDACKLOCKED = 17, + LINUX_MIB_DELAYEDACKLOST = 18, + LINUX_MIB_LISTENOVERFLOWS = 19, + LINUX_MIB_LISTENDROPS = 20, + LINUX_MIB_TCPHPHITS = 21, + LINUX_MIB_TCPPUREACKS = 22, + LINUX_MIB_TCPHPACKS = 23, + LINUX_MIB_TCPRENORECOVERY = 24, + LINUX_MIB_TCPSACKRECOVERY = 25, + LINUX_MIB_TCPSACKRENEGING = 26, + LINUX_MIB_TCPSACKREORDER = 27, + LINUX_MIB_TCPRENOREORDER = 28, + LINUX_MIB_TCPTSREORDER = 29, + LINUX_MIB_TCPFULLUNDO = 30, + LINUX_MIB_TCPPARTIALUNDO = 31, + LINUX_MIB_TCPDSACKUNDO = 32, + LINUX_MIB_TCPLOSSUNDO = 33, + LINUX_MIB_TCPLOSTRETRANSMIT = 34, + LINUX_MIB_TCPRENOFAILURES = 35, + LINUX_MIB_TCPSACKFAILURES = 36, + LINUX_MIB_TCPLOSSFAILURES = 37, + LINUX_MIB_TCPFASTRETRANS = 38, + LINUX_MIB_TCPSLOWSTARTRETRANS = 39, + LINUX_MIB_TCPTIMEOUTS = 40, + LINUX_MIB_TCPLOSSPROBES = 41, + LINUX_MIB_TCPLOSSPROBERECOVERY = 42, + LINUX_MIB_TCPRENORECOVERYFAIL = 43, + LINUX_MIB_TCPSACKRECOVERYFAIL = 44, + LINUX_MIB_TCPRCVCOLLAPSED = 45, + LINUX_MIB_TCPDSACKOLDSENT = 46, + LINUX_MIB_TCPDSACKOFOSENT = 47, + LINUX_MIB_TCPDSACKRECV = 48, + LINUX_MIB_TCPDSACKOFORECV = 49, + LINUX_MIB_TCPABORTONDATA = 50, + LINUX_MIB_TCPABORTONCLOSE = 51, + LINUX_MIB_TCPABORTONMEMORY = 52, + LINUX_MIB_TCPABORTONTIMEOUT = 53, + LINUX_MIB_TCPABORTONLINGER = 54, + LINUX_MIB_TCPABORTFAILED = 55, + LINUX_MIB_TCPMEMORYPRESSURES = 56, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, + LINUX_MIB_TCPSACKDISCARD = 58, + LINUX_MIB_TCPDSACKIGNOREDOLD = 59, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, + LINUX_MIB_TCPSPURIOUSRTOS = 61, + LINUX_MIB_TCPMD5NOTFOUND = 62, + LINUX_MIB_TCPMD5UNEXPECTED = 63, + LINUX_MIB_TCPMD5FAILURE = 64, + LINUX_MIB_SACKSHIFTED = 65, + LINUX_MIB_SACKMERGED = 66, + LINUX_MIB_SACKSHIFTFALLBACK = 67, + LINUX_MIB_TCPBACKLOGDROP = 68, + LINUX_MIB_PFMEMALLOCDROP = 69, + LINUX_MIB_TCPMINTTLDROP = 70, + LINUX_MIB_TCPDEFERACCEPTDROP = 71, + LINUX_MIB_IPRPFILTER = 72, + LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, + LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, + LINUX_MIB_TCPREQQFULLDROP = 75, + LINUX_MIB_TCPRETRANSFAIL = 76, + LINUX_MIB_TCPRCVCOALESCE = 77, + LINUX_MIB_TCPBACKLOGCOALESCE = 78, + LINUX_MIB_TCPOFOQUEUE = 79, + LINUX_MIB_TCPOFODROP = 80, + LINUX_MIB_TCPOFOMERGE = 81, + LINUX_MIB_TCPCHALLENGEACK = 82, + LINUX_MIB_TCPSYNCHALLENGE = 83, + LINUX_MIB_TCPFASTOPENACTIVE = 84, + LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, + LINUX_MIB_TCPFASTOPENPASSIVE = 86, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, + LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, + LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, + LINUX_MIB_BUSYPOLLRXPACKETS = 92, + LINUX_MIB_TCPAUTOCORKING = 93, + LINUX_MIB_TCPFROMZEROWINDOWADV = 94, + LINUX_MIB_TCPTOZEROWINDOWADV = 95, + LINUX_MIB_TCPWANTZEROWINDOWADV = 96, + LINUX_MIB_TCPSYNRETRANS = 97, + LINUX_MIB_TCPORIGDATASENT = 98, + LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, + LINUX_MIB_TCPHYSTARTTRAINCWND = 100, + LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, + LINUX_MIB_TCPHYSTARTDELAYCWND = 102, + LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, + LINUX_MIB_TCPACKSKIPPEDPAWS = 104, + LINUX_MIB_TCPACKSKIPPEDSEQ = 105, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, + LINUX_MIB_TCPWINPROBE = 109, + LINUX_MIB_TCPKEEPALIVE = 110, + LINUX_MIB_TCPMTUPFAIL = 111, + LINUX_MIB_TCPMTUPSUCCESS = 112, + LINUX_MIB_TCPDELIVERED = 113, + LINUX_MIB_TCPDELIVEREDCE = 114, + LINUX_MIB_TCPACKCOMPRESSED = 115, + LINUX_MIB_TCPZEROWINDOWDROP = 116, + LINUX_MIB_TCPRCVQDROP = 117, + LINUX_MIB_TCPWQUEUETOOBIG = 118, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, + LINUX_MIB_TCPTIMEOUTREHASH = 120, + LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, + LINUX_MIB_TCPDSACKRECVSEGS = 122, + LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, + LINUX_MIB_TCPMIGRATEREQSUCCESS = 124, + LINUX_MIB_TCPMIGRATEREQFAILURE = 125, + LINUX_MIB_TCPPLBREHASH = 126, + LINUX_MIB_TCPAOREQUIRED = 127, + LINUX_MIB_TCPAOBAD = 128, + LINUX_MIB_TCPAOKEYNOTFOUND = 129, + LINUX_MIB_TCPAOGOOD = 130, + LINUX_MIB_TCPAODROPPEDICMPS = 131, + __LINUX_MIB_MAX = 132, +}; + +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE = 3, +}; + +enum { + IF_OPER_UNKNOWN = 0, + IF_OPER_NOTPRESENT = 1, + IF_OPER_DOWN = 2, + IF_OPER_LOWERLAYERDOWN = 3, + IF_OPER_TESTING = 4, + IF_OPER_DORMANT = 5, + IF_OPER_UP = 6, +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_NUMPROTO = 11, +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS = 0, + NF_NETDEV_EGRESS = 1, + NF_NETDEV_NUMHOOKS = 2, +}; + +enum tcx_action_base { + TCX_NEXT = -1, + TCX_PASS = 0, + TCX_DROP = 2, + TCX_REDIRECT = 7, +}; + +enum qdisc_state2_t { + __QDISC_STATE2_RUNNING = 0, +}; + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + netdevice_tracker dev_tracker; + int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, + struct net_device *); + void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); + bool (*id_match)(struct packet_type *, struct sock *); + struct net *af_packet_net; + void *af_packet_priv; + struct list_head list; +}; + +struct netdev_adjacent { + struct net_device *dev; + netdevice_tracker dev_tracker; + bool master; + bool ignore; + u16 ref_nr; + void *private; + struct list_head list; + struct callback_head rcu; +}; + +struct dev_kfree_skb_cb { + enum skb_drop_reason reason; +}; + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; + +struct net_device_path_stack { + int num_paths; + struct net_device_path path[5]; +}; + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + +struct netdev_notifier_offload_xstats_rd; + +struct netdev_notifier_offload_xstats_ru; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; + enum netdev_offload_xstats_type type; + union { + struct netdev_notifier_offload_xstats_rd *report_delta; + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +struct netdev_notifier_offload_xstats_rd { + struct rtnl_hw_stats64 stats; + bool used; +}; + +struct netdev_notifier_offload_xstats_ru { + bool used; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +}; + +typedef struct ifslave ifslave; + +struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +}; + +typedef struct ifbond ifbond; + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct dpll_pin; + +struct xdp_umem; + +struct xsk_queue; + +struct xdp_buff_xsk; + +struct xdp_desc; + +struct xsk_buff_pool { + struct device *dev; + struct net_device *netdev; + struct list_head xsk_tx_list; + spinlock_t xsk_tx_list_lock; + refcount_t users; + struct xdp_umem *umem; + struct work_struct work; + struct list_head free_list; + struct list_head xskb_list; + u32 heads_cnt; + u16 queue_id; + long : 64; + struct xsk_queue *fq; + struct xsk_queue *cq; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + struct xdp_desc *tx_descs; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 chunk_shift; + u32 frame_len; + u8 cached_need_wakeup; + bool uses_need_wakeup; + bool dma_need_sync; + bool unaligned; + void *addrs; + spinlock_t cq_lock; + struct xdp_buff_xsk *free_heads[0]; + long : 64; + long : 64; + long : 64; +}; + +struct xdp_umem { + void *addrs; + u64 size; + u32 headroom; + u32 chunk_size; + u32 chunks; + u32 npgs; + struct user_struct *user; + refcount_t users; + u8 flags; + bool zc; + struct page **pgs; + int id; + struct list_head xsk_dma_list; + struct work_struct work; +}; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + u8 cb[24]; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + u64 orig_addr; + struct list_head free_list_node; + struct list_head xskb_list_node; +}; + +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +struct seg6_pernet_data { + struct mutex lock; + struct in6_addr __attribute__((btf_type_tag("rcu"))) * tun_src; + struct rhashtable hmac_infos; +}; + +struct netdev_hw_addr { + struct list_head list; + struct rb_node node; + unsigned char addr[32]; + unsigned char type; + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +enum { + RTAX_UNSPEC = 0, + RTAX_LOCK = 1, + RTAX_MTU = 2, + RTAX_WINDOW = 3, + RTAX_RTT = 4, + RTAX_RTTVAR = 5, + RTAX_SSTHRESH = 6, + RTAX_CWND = 7, + RTAX_ADVMSS = 8, + RTAX_REORDERING = 9, + RTAX_HOPLIMIT = 10, + RTAX_INITCWND = 11, + RTAX_FEATURES = 12, + RTAX_RTO_MIN = 13, + RTAX_INITRWND = 14, + RTAX_QUICKACK = 15, + RTAX_CC_ALGO = 16, + RTAX_FASTOPEN_NO_COOKIE = 17, + __RTAX_MAX = 18, +}; + +struct neigh_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table neigh_vars[22]; +}; + +enum { + NEIGH_VAR_MCAST_PROBES = 0, + NEIGH_VAR_UCAST_PROBES = 1, + NEIGH_VAR_APP_PROBES = 2, + NEIGH_VAR_MCAST_REPROBES = 3, + NEIGH_VAR_RETRANS_TIME = 4, + NEIGH_VAR_BASE_REACHABLE_TIME = 5, + NEIGH_VAR_DELAY_PROBE_TIME = 6, + NEIGH_VAR_INTERVAL_PROBE_TIME_MS = 7, + NEIGH_VAR_GC_STALETIME = 8, + NEIGH_VAR_QUEUE_LEN_BYTES = 9, + NEIGH_VAR_PROXY_QLEN = 10, + NEIGH_VAR_ANYCAST_DELAY = 11, + NEIGH_VAR_PROXY_DELAY = 12, + NEIGH_VAR_LOCKTIME = 13, + NEIGH_VAR_QUEUE_LEN = 14, + NEIGH_VAR_RETRANS_TIME_MS = 15, + NEIGH_VAR_BASE_REACHABLE_TIME_MS = 16, + NEIGH_VAR_GC_INTERVAL = 17, + NEIGH_VAR_GC_THRESH1 = 18, + NEIGH_VAR_GC_THRESH2 = 19, + NEIGH_VAR_GC_THRESH3 = 20, + NEIGH_VAR_MAX = 21, +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_DN_TABLE = 2, + NEIGH_NR_TABLES = 3, + NEIGH_LINK_TABLE = 3, +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, + NETEVENT_REDIRECT = 2, + NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, + NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, + NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, +}; + +enum { + NDA_UNSPEC = 0, + NDA_DST = 1, + NDA_LLADDR = 2, + NDA_CACHEINFO = 3, + NDA_PROBES = 4, + NDA_VLAN = 5, + NDA_PORT = 6, + NDA_VNI = 7, + NDA_IFINDEX = 8, + NDA_MASTER = 9, + NDA_LINK_NETNSID = 10, + NDA_SRC_VNI = 11, + NDA_PROTOCOL = 12, + NDA_NH_ID = 13, + NDA_FDB_EXT_ATTRS = 14, + NDA_FLAGS_EXT = 15, + NDA_NDM_STATE_MASK = 16, + NDA_NDM_FLAGS_MASK = 17, + __NDA_MAX = 18, +}; + +enum { + RTN_UNSPEC = 0, + RTN_UNICAST = 1, + RTN_LOCAL = 2, + RTN_BROADCAST = 3, + RTN_ANYCAST = 4, + RTN_MULTICAST = 5, + RTN_BLACKHOLE = 6, + RTN_UNREACHABLE = 7, + RTN_PROHIBIT = 8, + RTN_THROW = 9, + RTN_NAT = 10, + RTN_XRESOLVE = 11, + __RTN_MAX = 12, +}; + +enum { + NDTA_UNSPEC = 0, + NDTA_NAME = 1, + NDTA_THRESH1 = 2, + NDTA_THRESH2 = 3, + NDTA_THRESH3 = 4, + NDTA_CONFIG = 5, + NDTA_PARMS = 6, + NDTA_STATS = 7, + NDTA_GC_INTERVAL = 8, + NDTA_PAD = 9, + __NDTA_MAX = 10, +}; + +enum { + NDTPA_UNSPEC = 0, + NDTPA_IFINDEX = 1, + NDTPA_REFCNT = 2, + NDTPA_REACHABLE_TIME = 3, + NDTPA_BASE_REACHABLE_TIME = 4, + NDTPA_RETRANS_TIME = 5, + NDTPA_GC_STALETIME = 6, + NDTPA_DELAY_PROBE_TIME = 7, + NDTPA_QUEUE_LEN = 8, + NDTPA_APP_PROBES = 9, + NDTPA_UCAST_PROBES = 10, + NDTPA_MCAST_PROBES = 11, + NDTPA_ANYCAST_DELAY = 12, + NDTPA_PROXY_DELAY = 13, + NDTPA_PROXY_QLEN = 14, + NDTPA_LOCKTIME = 15, + NDTPA_QUEUE_LENBYTES = 16, + NDTPA_MCAST_REPROBES = 17, + NDTPA_PAD = 18, + NDTPA_INTERVAL_PROBE_TIME_MS = 19, + __NDTPA_MAX = 20, +}; + +struct neighbour_cb { + unsigned long sched_next; + unsigned int flags; +}; + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void *(*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); + unsigned int bucket; + unsigned int flags; +}; + +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + struct module *owner; + unsigned int flags; + struct callback_head rcu; +}; + +enum rtattr_type_t { + RTA_UNSPEC = 0, + RTA_DST = 1, + RTA_SRC = 2, + RTA_IIF = 3, + RTA_OIF = 4, + RTA_GATEWAY = 5, + RTA_PRIORITY = 6, + RTA_PREFSRC = 7, + RTA_METRICS = 8, + RTA_MULTIPATH = 9, + RTA_PROTOINFO = 10, + RTA_FLOW = 11, + RTA_CACHEINFO = 12, + RTA_SESSION = 13, + RTA_MP_ALGO = 14, + RTA_TABLE = 15, + RTA_MARK = 16, + RTA_MFC_STATS = 17, + RTA_VIA = 18, + RTA_NEWDST = 19, + RTA_PREF = 20, + RTA_ENCAP_TYPE = 21, + RTA_ENCAP = 22, + RTA_EXPIRES = 23, + RTA_PAD = 24, + RTA_UID = 25, + RTA_TTL_PROPAGATE = 26, + RTA_IP_PROTO = 27, + RTA_SPORT = 28, + RTA_DPORT = 29, + RTA_NH_ID = 30, + __RTA_MAX = 31, +}; + +enum { + IFLA_UNSPEC = 0, + IFLA_ADDRESS = 1, + IFLA_BROADCAST = 2, + IFLA_IFNAME = 3, + IFLA_MTU = 4, + IFLA_LINK = 5, + IFLA_QDISC = 6, + IFLA_STATS = 7, + IFLA_COST = 8, + IFLA_PRIORITY = 9, + IFLA_MASTER = 10, + IFLA_WIRELESS = 11, + IFLA_PROTINFO = 12, + IFLA_TXQLEN = 13, + IFLA_MAP = 14, + IFLA_WEIGHT = 15, + IFLA_OPERSTATE = 16, + IFLA_LINKMODE = 17, + IFLA_LINKINFO = 18, + IFLA_NET_NS_PID = 19, + IFLA_IFALIAS = 20, + IFLA_NUM_VF = 21, + IFLA_VFINFO_LIST = 22, + IFLA_STATS64 = 23, + IFLA_VF_PORTS = 24, + IFLA_PORT_SELF = 25, + IFLA_AF_SPEC = 26, + IFLA_GROUP = 27, + IFLA_NET_NS_FD = 28, + IFLA_EXT_MASK = 29, + IFLA_PROMISCUITY = 30, + IFLA_NUM_TX_QUEUES = 31, + IFLA_NUM_RX_QUEUES = 32, + IFLA_CARRIER = 33, + IFLA_PHYS_PORT_ID = 34, + IFLA_CARRIER_CHANGES = 35, + IFLA_PHYS_SWITCH_ID = 36, + IFLA_LINK_NETNSID = 37, + IFLA_PHYS_PORT_NAME = 38, + IFLA_PROTO_DOWN = 39, + IFLA_GSO_MAX_SEGS = 40, + IFLA_GSO_MAX_SIZE = 41, + IFLA_PAD = 42, + IFLA_XDP = 43, + IFLA_EVENT = 44, + IFLA_NEW_NETNSID = 45, + IFLA_IF_NETNSID = 46, + IFLA_TARGET_NETNSID = 46, + IFLA_CARRIER_UP_COUNT = 47, + IFLA_CARRIER_DOWN_COUNT = 48, + IFLA_NEW_IFINDEX = 49, + IFLA_MIN_MTU = 50, + IFLA_MAX_MTU = 51, + IFLA_PROP_LIST = 52, + IFLA_ALT_IFNAME = 53, + IFLA_PERM_ADDRESS = 54, + IFLA_PROTO_DOWN_REASON = 55, + IFLA_PARENT_DEV_NAME = 56, + IFLA_PARENT_DEV_BUS_NAME = 57, + IFLA_GRO_MAX_SIZE = 58, + IFLA_TSO_MAX_SIZE = 59, + IFLA_TSO_MAX_SEGS = 60, + IFLA_ALLMULTI = 61, + IFLA_DEVLINK_PORT = 62, + IFLA_GSO_IPV4_MAX_SIZE = 63, + IFLA_GRO_IPV4_MAX_SIZE = 64, + IFLA_DPLL_PIN = 65, + __IFLA_MAX = 66, +}; + +enum { + IFLA_BRIDGE_FLAGS = 0, + IFLA_BRIDGE_MODE = 1, + IFLA_BRIDGE_VLAN_INFO = 2, + IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, + IFLA_BRIDGE_MRP = 4, + IFLA_BRIDGE_CFM = 5, + IFLA_BRIDGE_MST = 6, + __IFLA_BRIDGE_MAX = 7, +}; + +enum { + IFLA_BRPORT_UNSPEC = 0, + IFLA_BRPORT_STATE = 1, + IFLA_BRPORT_PRIORITY = 2, + IFLA_BRPORT_COST = 3, + IFLA_BRPORT_MODE = 4, + IFLA_BRPORT_GUARD = 5, + IFLA_BRPORT_PROTECT = 6, + IFLA_BRPORT_FAST_LEAVE = 7, + IFLA_BRPORT_LEARNING = 8, + IFLA_BRPORT_UNICAST_FLOOD = 9, + IFLA_BRPORT_PROXYARP = 10, + IFLA_BRPORT_LEARNING_SYNC = 11, + IFLA_BRPORT_PROXYARP_WIFI = 12, + IFLA_BRPORT_ROOT_ID = 13, + IFLA_BRPORT_BRIDGE_ID = 14, + IFLA_BRPORT_DESIGNATED_PORT = 15, + IFLA_BRPORT_DESIGNATED_COST = 16, + IFLA_BRPORT_ID = 17, + IFLA_BRPORT_NO = 18, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, + IFLA_BRPORT_CONFIG_PENDING = 20, + IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, + IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, + IFLA_BRPORT_HOLD_TIMER = 23, + IFLA_BRPORT_FLUSH = 24, + IFLA_BRPORT_MULTICAST_ROUTER = 25, + IFLA_BRPORT_PAD = 26, + IFLA_BRPORT_MCAST_FLOOD = 27, + IFLA_BRPORT_MCAST_TO_UCAST = 28, + IFLA_BRPORT_VLAN_TUNNEL = 29, + IFLA_BRPORT_BCAST_FLOOD = 30, + IFLA_BRPORT_GROUP_FWD_MASK = 31, + IFLA_BRPORT_NEIGH_SUPPRESS = 32, + IFLA_BRPORT_ISOLATED = 33, + IFLA_BRPORT_BACKUP_PORT = 34, + IFLA_BRPORT_MRP_RING_OPEN = 35, + IFLA_BRPORT_MRP_IN_OPEN = 36, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, + IFLA_BRPORT_LOCKED = 39, + IFLA_BRPORT_MAB = 40, + IFLA_BRPORT_MCAST_N_GROUPS = 41, + IFLA_BRPORT_MCAST_MAX_GROUPS = 42, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 43, + IFLA_BRPORT_BACKUP_NHID = 44, + __IFLA_BRPORT_MAX = 45, +}; + +enum { + IFLA_STATS_UNSPEC = 0, + IFLA_STATS_LINK_64 = 1, + IFLA_STATS_LINK_XSTATS = 2, + IFLA_STATS_LINK_XSTATS_SLAVE = 3, + IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, + IFLA_STATS_AF_SPEC = 5, + __IFLA_STATS_MAX = 6, +}; + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 2, + IFLA_OFFLOAD_XSTATS_L3_STATS = 3, + __IFLA_OFFLOAD_XSTATS_MAX = 4, +}; + +enum rtnl_kinds { + RTNL_KIND_NEW = 0, + RTNL_KIND_DEL = 1, + RTNL_KIND_GET = 2, + RTNL_KIND_SET = 3, +}; + +enum { + IFLA_EVENT_NONE = 0, + IFLA_EVENT_REBOOT = 1, + IFLA_EVENT_FEATURES = 2, + IFLA_EVENT_BONDING_FAILOVER = 3, + IFLA_EVENT_NOTIFY_PEERS = 4, + IFLA_EVENT_IGMP_RESEND = 5, + IFLA_EVENT_BONDING_OPTIONS = 6, +}; + +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC = 0, + IFLA_PROTO_DOWN_REASON_MASK = 1, + IFLA_PROTO_DOWN_REASON_VALUE = 2, + __IFLA_PROTO_DOWN_REASON_CNT = 3, + IFLA_PROTO_DOWN_REASON_MAX = 2, +}; + +enum { + IFLA_VF_INFO_UNSPEC = 0, + IFLA_VF_INFO = 1, + __IFLA_VF_INFO_MAX = 2, +}; + +enum { + IFLA_VF_UNSPEC = 0, + IFLA_VF_MAC = 1, + IFLA_VF_VLAN = 2, + IFLA_VF_TX_RATE = 3, + IFLA_VF_SPOOFCHK = 4, + IFLA_VF_LINK_STATE = 5, + IFLA_VF_RATE = 6, + IFLA_VF_RSS_QUERY_EN = 7, + IFLA_VF_STATS = 8, + IFLA_VF_TRUST = 9, + IFLA_VF_IB_NODE_GUID = 10, + IFLA_VF_IB_PORT_GUID = 11, + IFLA_VF_VLAN_LIST = 12, + IFLA_VF_BROADCAST = 13, + __IFLA_VF_MAX = 14, +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC = 0, + IFLA_VF_VLAN_INFO = 1, + __IFLA_VF_VLAN_INFO_MAX = 2, +}; + +enum { + IFLA_VF_STATS_RX_PACKETS = 0, + IFLA_VF_STATS_TX_PACKETS = 1, + IFLA_VF_STATS_RX_BYTES = 2, + IFLA_VF_STATS_TX_BYTES = 3, + IFLA_VF_STATS_BROADCAST = 4, + IFLA_VF_STATS_MULTICAST = 5, + IFLA_VF_STATS_PAD = 6, + IFLA_VF_STATS_RX_DROPPED = 7, + IFLA_VF_STATS_TX_DROPPED = 8, + __IFLA_VF_STATS_MAX = 9, +}; + +enum { + IFLA_VF_PORT_UNSPEC = 0, + IFLA_VF_PORT = 1, + __IFLA_VF_PORT_MAX = 2, +}; + +enum { + IFLA_PORT_UNSPEC = 0, + IFLA_PORT_VF = 1, + IFLA_PORT_PROFILE = 2, + IFLA_PORT_VSI_TYPE = 3, + IFLA_PORT_INSTANCE_UUID = 4, + IFLA_PORT_HOST_UUID = 5, + IFLA_PORT_REQUEST = 6, + IFLA_PORT_RESPONSE = 7, + __IFLA_PORT_MAX = 8, +}; + +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV = 1, + XDP_ATTACHED_SKB = 2, + XDP_ATTACHED_HW = 3, + XDP_ATTACHED_MULTI = 4, +}; + +enum { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +}; + +enum { + IFLA_INFO_UNSPEC = 0, + IFLA_INFO_KIND = 1, + IFLA_INFO_DATA = 2, + IFLA_INFO_XSTATS = 3, + IFLA_INFO_SLAVE_KIND = 4, + IFLA_INFO_SLAVE_DATA = 5, + __IFLA_INFO_MAX = 6, +}; + +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 2, + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX = 3, +}; + +enum { + IFLA_STATS_GETSET_UNSPEC = 0, + IFLA_STATS_GET_FILTERS = 1, + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 2, + __IFLA_STATS_GETSET_MAX = 3, +}; + +enum { + MDBA_GET_ENTRY_UNSPEC = 0, + MDBA_GET_ENTRY = 1, + MDBA_GET_ENTRY_ATTRS = 2, + __MDBA_GET_ENTRY_MAX = 3, +}; + +enum { + MDBA_SET_ENTRY_UNSPEC = 0, + MDBA_SET_ENTRY = 1, + MDBA_SET_ENTRY_ATTRS = 2, + __MDBA_SET_ENTRY_MAX = 3, +}; + +struct rtnl_af_ops { + struct list_head list; + int family; + int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); + size_t (*get_link_af_size)(const struct net_device *, u32); + int (*validate_link_af)(const struct net_device *, const struct nlattr *, + struct netlink_ext_ack *); + int (*set_link_af)(struct net_device *, const struct nlattr *, + struct netlink_ext_ack *); + int (*fill_stats_af)(struct sk_buff *, const struct net_device *); + size_t (*get_stats_af_size)(const struct net_device *); +}; + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + unsigned short ifi_type; + int ifi_index; + unsigned int ifi_flags; + unsigned int ifi_change; +}; + +struct rtnl_offload_xstats_request_used { + bool request; + bool used; +}; + +struct rtnl_newlink_tbs { + struct nlattr *tb[66]; + struct nlattr *attr[51]; + struct nlattr *slave_attr[45]; +}; + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +struct br_port_msg { + __u8 family; + __u32 ifindex; +}; + +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + __u32 rx_compressed; + __u32 tx_compressed; + __u32 rx_nohandler; +}; + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + +struct rtnl_stats_dump_filters { + u32 mask[6]; +}; + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +struct rtnl_mdb_dump_ctx { + long idx; +}; + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct br_mdb_entry { + __u32 ifindex; + __u8 state; + __u8 flags; + __u16 vid; + struct { + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } u; + __be16 proto; + } addr; +}; + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + unsigned char __pad[8]; +}; + +enum { + IF_LINK_MODE_DEFAULT = 0, + IF_LINK_MODE_DORMANT = 1, + IF_LINK_MODE_TESTING = 2, +}; + +enum lw_bits { + LW_URGENT = 0, +}; + +struct ipv6_bpf_stub { + int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); + struct sock *(*udp6_lib_lookup)(struct net *, const struct in6_addr *, __be16, + const struct in6_addr *, __be16, int, int, + struct udp_table *, struct sk_buff *); + int (*ipv6_setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*ipv6_getsockopt)(struct sock *, int, int, sockptr_t, sockptr_t); + int (*ipv6_dev_get_saddr)(struct net *, const struct net_device *, + const struct in6_addr *, unsigned int, struct in6_addr *); +}; + +struct bpf_scratchpad { + union { + __be32 diff[128]; + u8 buff[512]; + }; +}; + +enum { + BPF_F_NEIGH = 2, + BPF_F_PEER = 4, + BPF_F_NEXTHOP = 8, +}; + +enum { + BPF_F_RECOMPUTE_CSUM = 1, + BPF_F_INVALIDATE_HASH = 2, +}; + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC = 0, + BPF_HDR_START_NET = 1, +}; + +enum { + BPF_F_HDR_FIELD_MASK = 15, +}; + +enum { + BPF_F_PSEUDO_HDR = 16, + BPF_F_MARK_MANGLED_0 = 32, + BPF_F_MARK_ENFORCE = 64, +}; + +enum { + BPF_CSUM_LEVEL_QUERY = 0, + BPF_CSUM_LEVEL_INC = 1, + BPF_CSUM_LEVEL_DEC = 2, + BPF_CSUM_LEVEL_RESET = 3, +}; + +enum { + BPF_F_INGRESS = 1, +}; + +enum { + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS = 1, + IPSTATS_MIB_INOCTETS = 2, + IPSTATS_MIB_INDELIVERS = 3, + IPSTATS_MIB_OUTFORWDATAGRAMS = 4, + IPSTATS_MIB_OUTREQUESTS = 5, + IPSTATS_MIB_OUTOCTETS = 6, + IPSTATS_MIB_INHDRERRORS = 7, + IPSTATS_MIB_INTOOBIGERRORS = 8, + IPSTATS_MIB_INNOROUTES = 9, + IPSTATS_MIB_INADDRERRORS = 10, + IPSTATS_MIB_INUNKNOWNPROTOS = 11, + IPSTATS_MIB_INTRUNCATEDPKTS = 12, + IPSTATS_MIB_INDISCARDS = 13, + IPSTATS_MIB_OUTDISCARDS = 14, + IPSTATS_MIB_OUTNOROUTES = 15, + IPSTATS_MIB_REASMTIMEOUT = 16, + IPSTATS_MIB_REASMREQDS = 17, + IPSTATS_MIB_REASMOKS = 18, + IPSTATS_MIB_REASMFAILS = 19, + IPSTATS_MIB_FRAGOKS = 20, + IPSTATS_MIB_FRAGFAILS = 21, + IPSTATS_MIB_FRAGCREATES = 22, + IPSTATS_MIB_INMCASTPKTS = 23, + IPSTATS_MIB_OUTMCASTPKTS = 24, + IPSTATS_MIB_INBCASTPKTS = 25, + IPSTATS_MIB_OUTBCASTPKTS = 26, + IPSTATS_MIB_INMCASTOCTETS = 27, + IPSTATS_MIB_OUTMCASTOCTETS = 28, + IPSTATS_MIB_INBCASTOCTETS = 29, + IPSTATS_MIB_OUTBCASTOCTETS = 30, + IPSTATS_MIB_CSUMERRORS = 31, + IPSTATS_MIB_NOECTPKTS = 32, + IPSTATS_MIB_ECT1PKTS = 33, + IPSTATS_MIB_ECT0PKTS = 34, + IPSTATS_MIB_CEPKTS = 35, + IPSTATS_MIB_REASM_OVERLAPS = 36, + IPSTATS_MIB_OUTPKTS = 37, + __IPSTATS_MIB_MAX = 38, +}; + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = 1, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128, + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256, +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET = 0, + BPF_ADJ_ROOM_MAC = 1, +}; + +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0 = 1, + MEM_TYPE_PAGE_POOL = 2, + MEM_TYPE_XSK_BUFF_POOL = 3, + MEM_TYPE_MAX = 4, +}; + +struct xdp_sock { + struct sock sk; + long : 64; + long : 64; + long : 64; + struct xsk_queue *rx; + struct net_device *dev; + struct xdp_umem *umem; + struct list_head flush_node; + struct xsk_buff_pool *pool; + u16 queue_id; + bool zc; + bool sg; + enum { + XSK_READY = 0, + XSK_BOUND = 1, + XSK_UNBOUND = 2, + } state; + long : 64; + struct xsk_queue *tx; + struct list_head tx_list; + u32 tx_budget_spent; + spinlock_t rx_lock; + u64 rx_dropped; + u64 rx_queue_full; + struct sk_buff *skb; + struct list_head map_list; + spinlock_t map_list_lock; + struct mutex mutex; + struct xsk_queue *fq_tmp; + struct xsk_queue *cq_tmp; +}; + +enum { + BPF_F_TUNINFO_IPV6 = 1, +}; + +enum { + BPF_F_TUNINFO_FLAGS = 16, +}; + +enum { + BPF_F_ZERO_CSUM_TX = 2, + BPF_F_DONT_FRAGMENT = 4, + BPF_F_SEQ_NUMBER = 8, + BPF_F_NO_TUNNEL_KEY = 16, +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, + TCP_BPF_DELACK_MAX = 1003, + TCP_BPF_RTO_MIN = 1004, + TCP_BPF_SYN = 1005, + TCP_BPF_SYN_IP = 1006, + TCP_BPF_SYN_MAC = 1007, +}; + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = 1, + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, + BPF_SOCK_OPS_STATE_CB_FLAG = 4, + BPF_SOCK_OPS_RTT_CB_FLAG = 8, + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, + BPF_SOCK_OPS_ALL_CB_FLAGS = 127, +}; + +enum { + BPF_FIB_LOOKUP_DIRECT = 1, + BPF_FIB_LOOKUP_OUTPUT = 2, + BPF_FIB_LOOKUP_SKIP_NEIGH = 4, + BPF_FIB_LOOKUP_TBID = 8, + BPF_FIB_LOOKUP_SRC = 16, +}; + +enum { + IPV4_DEVCONF_FORWARDING = 1, + IPV4_DEVCONF_MC_FORWARDING = 2, + IPV4_DEVCONF_PROXY_ARP = 3, + IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, + IPV4_DEVCONF_SECURE_REDIRECTS = 5, + IPV4_DEVCONF_SEND_REDIRECTS = 6, + IPV4_DEVCONF_SHARED_MEDIA = 7, + IPV4_DEVCONF_RP_FILTER = 8, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, + IPV4_DEVCONF_BOOTP_RELAY = 10, + IPV4_DEVCONF_LOG_MARTIANS = 11, + IPV4_DEVCONF_TAG = 12, + IPV4_DEVCONF_ARPFILTER = 13, + IPV4_DEVCONF_MEDIUM_ID = 14, + IPV4_DEVCONF_NOXFRM = 15, + IPV4_DEVCONF_NOPOLICY = 16, + IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, + IPV4_DEVCONF_ARP_ANNOUNCE = 18, + IPV4_DEVCONF_ARP_IGNORE = 19, + IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, + IPV4_DEVCONF_ARP_ACCEPT = 21, + IPV4_DEVCONF_ARP_NOTIFY = 22, + IPV4_DEVCONF_ACCEPT_LOCAL = 23, + IPV4_DEVCONF_SRC_VMARK = 24, + IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, + IPV4_DEVCONF_ROUTE_LOCALNET = 26, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, + IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, + IPV4_DEVCONF_BC_FORWARDING = 32, + IPV4_DEVCONF_ARP_EVICT_NOCARRIER = 33, + __IPV4_DEVCONF_MAX = 34, +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS = 0, + BPF_FIB_LKUP_RET_BLACKHOLE = 1, + BPF_FIB_LKUP_RET_UNREACHABLE = 2, + BPF_FIB_LKUP_RET_PROHIBIT = 3, + BPF_FIB_LKUP_RET_NOT_FWDED = 4, + BPF_FIB_LKUP_RET_FWD_DISABLED = 5, + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, + BPF_FIB_LKUP_RET_NO_NEIGH = 7, + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9, +}; + +enum rt_scope_t { + RT_SCOPE_UNIVERSE = 0, + RT_SCOPE_SITE = 200, + RT_SCOPE_LINK = 253, + RT_SCOPE_HOST = 254, + RT_SCOPE_NOWHERE = 255, +}; + +enum rt_class_t { + RT_TABLE_UNSPEC = 0, + RT_TABLE_COMPAT = 252, + RT_TABLE_DEFAULT = 253, + RT_TABLE_MAIN = 254, + RT_TABLE_LOCAL = 255, + RT_TABLE_MAX = 4294967295, +}; + +enum bpf_check_mtu_ret { + BPF_MTU_CHK_RET_SUCCESS = 0, + BPF_MTU_CHK_RET_FRAG_NEEDED = 1, + BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, +}; + +enum bpf_check_mtu_flags { + BPF_MTU_CHK_SEGS = 1, +}; + +enum bpf_lwt_encap_mode { + BPF_LWT_ENCAP_SEG6 = 0, + BPF_LWT_ENCAP_SEG6_INLINE = 1, + BPF_LWT_ENCAP_IP = 2, +}; + +enum { + SEG6_LOCAL_ACTION_UNSPEC = 0, + SEG6_LOCAL_ACTION_END = 1, + SEG6_LOCAL_ACTION_END_X = 2, + SEG6_LOCAL_ACTION_END_T = 3, + SEG6_LOCAL_ACTION_END_DX2 = 4, + SEG6_LOCAL_ACTION_END_DX6 = 5, + SEG6_LOCAL_ACTION_END_DX4 = 6, + SEG6_LOCAL_ACTION_END_DT6 = 7, + SEG6_LOCAL_ACTION_END_DT4 = 8, + SEG6_LOCAL_ACTION_END_B6 = 9, + SEG6_LOCAL_ACTION_END_B6_ENCAP = 10, + SEG6_LOCAL_ACTION_END_BM = 11, + SEG6_LOCAL_ACTION_END_S = 12, + SEG6_LOCAL_ACTION_END_AS = 13, + SEG6_LOCAL_ACTION_END_AM = 14, + SEG6_LOCAL_ACTION_END_BPF = 15, + SEG6_LOCAL_ACTION_END_DT46 = 16, + __SEG6_LOCAL_ACTION_MAX = 17, +}; + +enum { + INET_ECN_NOT_ECT = 0, + INET_ECN_ECT_1 = 1, + INET_ECN_ECT_0 = 2, + INET_ECN_CE = 3, + INET_ECN_MASK = 3, +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = 1, +}; + +enum { + BPF_SOCK_OPS_VOID = 0, + BPF_SOCK_OPS_TIMEOUT_INIT = 1, + BPF_SOCK_OPS_RWND_INIT = 2, + BPF_SOCK_OPS_TCP_CONNECT_CB = 3, + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, + BPF_SOCK_OPS_NEEDS_ECN = 6, + BPF_SOCK_OPS_BASE_RTT = 7, + BPF_SOCK_OPS_RTO_CB = 8, + BPF_SOCK_OPS_RETRANS_CB = 9, + BPF_SOCK_OPS_STATE_CB = 10, + BPF_SOCK_OPS_TCP_LISTEN_CB = 11, + BPF_SOCK_OPS_RTT_CB = 12, + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, +}; + +enum { + BPF_SKB_TSTAMP_UNSPEC = 0, + BPF_SKB_TSTAMP_DELIVERY_MONO = 1, +}; + +enum { + BPF_SK_LOOKUP_F_REPLACE = 1, + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, +}; + +typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); + +typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, + void *, u32); + +typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); + +typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); + +typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); + +typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); + +typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_redirect)(u32, u64); + +typedef u64 (*btf_bpf_redirect_peer)(u32, u64); + +struct bpf_redir_neigh; + +typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); + +struct bpf_redir_neigh { + __u32 nh_family; + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; + }; +}; + +typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_get_cgroup_classid_curr)(); + +typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); + +typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); + +typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); + +typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_xdp_get_buff_len)(struct xdp_buff *); + +typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_load_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_store_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); + +typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); + +struct bpf_tunnel_key; + +typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + union { + __u16 tunnel_ext; + __be16 tunnel_flags; + }; + __u32 tunnel_label; + union { + __u32 local_ipv4; + __u32 local_ipv6[4]; + }; +}; + +typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); + +typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, + u32, u64); + +typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); + +typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); + +typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); + +typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); + +typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); + +typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sk_msg)(struct sk_msg *); + +typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); + +typedef u64 (*btf_bpf_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); + +typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); + +struct bpf_fib_lookup; + +typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); + +struct bpf_fib_lookup { + __u8 family; + __u8 l4_protocol; + __be16 sport; + __be16 dport; + union { + __u16 tot_len; + __u16 mtu_result; + }; + __u32 ifindex; + union { + __u8 tos; + __be32 flowinfo; + __u32 rt_metric; + }; + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + union { + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + __u32 tbid; + }; + __u8 smac[6]; + __u8 dmac[6]; +}; + +typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_store_bytes)(struct sk_buff *, u32, const void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_action)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_adjust_srh)(struct sk_buff *, u32, s32); + +struct bpf_sock_tuple; + +typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, + u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, + u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, + u64, u64); + +typedef u64 (*btf_bpf_sk_release)(struct sock *); + +typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, + u32, u64); + +typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, + u32, u64); + +typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, + u32, u64); + +typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, + struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, + struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, + struct bpf_sock_tuple *, u32, u64, u64); + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + __u64 bytes_acked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +typedef u64 (*btf_bpf_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); + +typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); + +typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); + +typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, + u32, u64); + +typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tstamp)(struct sk_buff *, u64, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *, struct tcphdr *); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *); + +typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, + void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, + void *, u32, u32); + +typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); + +typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_unix_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_mptcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_sock_from_file)(struct file *); + +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; + +struct strp_msg { + int full_len; + int offset; +}; + +struct tls_strparser { + struct sock *sk; + u32 mark : 8; + u32 stopped : 1; + u32 copy_mode : 1; + u32 mixed_decrypted : 1; + u32 msg_ready : 1; + struct strp_msg stm; + struct sk_buff *anchor; + struct work_struct work; +}; + +struct tls_sw_context_rx { + struct crypto_aead *aead_recv; + struct crypto_wait async_wait; + struct sk_buff_head rx_list; + void (*saved_data_ready)(struct sock *); + u8 reader_present; + u8 async_capable : 1; + u8 zc_capable : 1; + u8 reader_contended : 1; + struct tls_strparser strp; + atomic_t decrypt_pending; + spinlock_t decrypt_compl_lock; + struct sk_buff_head async_hold; + struct wait_queue_head wq; +}; + +struct ipv6_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u8 first_segment; + __u8 flags; + __u16 tag; + struct in6_addr segments[0]; +}; + +struct seg6_bpf_srh_state { + struct ipv6_sr_hdr *srh; + u16 hdrlen; + bool valid; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; +}; + +struct inet_timewait_sock { + struct sock_common __tw_common; + __u32 tw_mark; + volatile unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __be16 tw_sport; + unsigned int tw_transparent : 1; + unsigned int tw_flowlabel : 20; + unsigned int tw_usec_ts : 1; + unsigned int tw_pad : 2; + unsigned int tw_tos : 8; + u32 tw_txhash; + u32 tw_priority; + struct timer_list tw_timer; + struct inet_bind_bucket *tw_tb; + struct inet_bind2_bucket *tw_tb2; + struct hlist_node tw_bind2_node; +}; + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + u32 tw_last_oow_ack_time; + int tw_ts_recent_stamp; + u32 tw_tx_delay; + struct tcp_md5sig_key *tw_md5_key; +}; + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct tls_prot_info { + u16 version; + u16 cipher_type; + u16 prepend_size; + u16 tag_size; + u16 overhead_size; + u16 iv_size; + u16 salt_size; + u16 rec_seq_size; + u16 aad_size; + u16 tail_size; +}; + +struct cipher_context { + char iv[20]; + char rec_seq[8]; +}; + +struct tls_crypto_info { + __u16 version; + __u16 cipher_type; +}; + +struct tls12_crypto_info_aes_gcm_128 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[32]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_chacha20_poly1305 { + struct tls_crypto_info info; + unsigned char iv[12]; + unsigned char key[32]; + unsigned char salt[0]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +union tls_crypto_context { + struct tls_crypto_info info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; + }; +}; + +struct tls_context { + struct tls_prot_info prot_info; + u8 tx_conf : 3; + u8 rx_conf : 3; + u8 zerocopy_sendfile : 1; + u8 rx_no_pad : 1; + int (*push_pending_record)(struct sock *, int); + void (*sk_write_space)(struct sock *); + void *priv_ctx_tx; + void *priv_ctx_rx; + struct net_device __attribute__((btf_type_tag("rcu"))) * netdev; + struct cipher_context tx; + struct cipher_context rx; + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + bool splicing_pages; + bool pending_open_record_frags; + struct mutex tx_lock; + unsigned long flags; + struct proto *sk_proto; + struct sock *sk; + void (*sk_destruct)(struct sock *); + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + struct list_head list; + refcount_t refcount; + struct callback_head rcu; +}; + +struct fib_result { + __be32 prefix; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + u32 tclassid; + struct fib_nh_common *nhc; + struct fib_info *fi; + struct fib_table *table; + struct hlist_head *fa_head; +}; + +struct fib6_result { + struct fib6_nh *nh; + struct fib6_info *f6i; + u32 fib6_flags; + u8 fib6_type; + struct rt6_info *rt6; +}; + +struct sock_diag_handler { + __u8 family; + int (*dump)(struct sk_buff *, struct nlmsghdr *); + int (*get_info)(struct sk_buff *, struct sock *); + int (*destroy)(struct sk_buff *, struct nlmsghdr *); +}; + +struct broadcast_sk { + struct sock *sk; + struct work_struct work; +}; + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF = 0, + HWTSTAMP_TX_ON = 1, + HWTSTAMP_TX_ONESTEP_SYNC = 2, + HWTSTAMP_TX_ONESTEP_P2P = 3, + __HWTSTAMP_TX_CNT = 4, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE = 0, + HWTSTAMP_FILTER_ALL = 1, + HWTSTAMP_FILTER_SOME = 2, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, + HWTSTAMP_FILTER_PTP_V2_EVENT = 12, + HWTSTAMP_FILTER_PTP_V2_SYNC = 13, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, + HWTSTAMP_FILTER_NTP_ALL = 15, + __HWTSTAMP_FILTER_CNT = 16, +}; + +enum hwtstamp_flags { + HWTSTAMP_FLAG_BONDED_PHC_INDEX = 1, + HWTSTAMP_FLAG_LAST = 1, + HWTSTAMP_FLAG_MASK = 1, +}; + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +struct tso_t { + int next_frag_idx; + int size; + void *data; + u16 ip_id; + u8 tlen; + bool ipv6; + u32 tcp_seq; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE = 0, + FIB_EVENT_ENTRY_APPEND = 1, + FIB_EVENT_ENTRY_ADD = 2, + FIB_EVENT_ENTRY_DEL = 3, + FIB_EVENT_RULE_ADD = 4, + FIB_EVENT_RULE_DEL = 5, + FIB_EVENT_NH_ADD = 6, + FIB_EVENT_NH_DEL = 7, + FIB_EVENT_VIF_ADD = 8, + FIB_EVENT_VIF_DEL = 9, +}; + +struct fib_notifier_net { + struct list_head fib_notifier_ops; + struct atomic_notifier_head fib_chain; +}; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +struct xdp_frame_bulk { + int count; + void *xa; + void *q[16]; +}; + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP = 1, + FLOW_ACTION_TRAP = 2, + FLOW_ACTION_GOTO = 3, + FLOW_ACTION_REDIRECT = 4, + FLOW_ACTION_MIRRED = 5, + FLOW_ACTION_REDIRECT_INGRESS = 6, + FLOW_ACTION_MIRRED_INGRESS = 7, + FLOW_ACTION_VLAN_PUSH = 8, + FLOW_ACTION_VLAN_POP = 9, + FLOW_ACTION_VLAN_MANGLE = 10, + FLOW_ACTION_TUNNEL_ENCAP = 11, + FLOW_ACTION_TUNNEL_DECAP = 12, + FLOW_ACTION_MANGLE = 13, + FLOW_ACTION_ADD = 14, + FLOW_ACTION_CSUM = 15, + FLOW_ACTION_MARK = 16, + FLOW_ACTION_PTYPE = 17, + FLOW_ACTION_PRIORITY = 18, + FLOW_ACTION_RX_QUEUE_MAPPING = 19, + FLOW_ACTION_WAKE = 20, + FLOW_ACTION_QUEUE = 21, + FLOW_ACTION_SAMPLE = 22, + FLOW_ACTION_POLICE = 23, + FLOW_ACTION_CT = 24, + FLOW_ACTION_CT_METADATA = 25, + FLOW_ACTION_MPLS_PUSH = 26, + FLOW_ACTION_MPLS_POP = 27, + FLOW_ACTION_MPLS_MANGLE = 28, + FLOW_ACTION_GATE = 29, + FLOW_ACTION_PPPOE_PUSH = 30, + FLOW_ACTION_JUMP = 31, + FLOW_ACTION_PIPE = 32, + FLOW_ACTION_VLAN_PUSH_ETH = 33, + FLOW_ACTION_VLAN_POP_ETH = 34, + FLOW_ACTION_CONTINUE = 35, + NUM_FLOW_ACTIONS = 36, +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = 1, + FLOW_ACTION_HW_STATS_DELAYED = 2, + FLOW_ACTION_HW_STATS_ANY = 3, + FLOW_ACTION_HW_STATS_DISABLED = 4, + FLOW_ACTION_HW_STATS_DONT_CARE = 7, +}; + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, + FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, + FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, + FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, + FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, +}; + +enum offload_act_command { + FLOW_ACT_REPLACE = 0, + FLOW_ACT_DESTROY = 1, + FLOW_ACT_STATS = 2, +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, + FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, +}; + +enum flow_block_command { + FLOW_BLOCK_BIND = 0, + FLOW_BLOCK_UNBIND = 1, +}; + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + struct Qdisc *sch; + enum flow_block_binder_type binder_type; + void *data; + void *cb_priv; + void (*cleanup)(struct flow_block_cb *); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +typedef int +flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, + void *, void *, void (*)(struct flow_block_cb *)); + +struct flow_indr_dev { + struct list_head list; + flow_indr_block_bind_cb_t *cb; + void *cb_priv; + refcount_t refcnt; +}; + +struct flow_indir_dev_info { + void *data; + struct net_device *dev; + struct Qdisc *sch; + enum tc_setup_type type; + void (*cleanup)(struct flow_block_cb *); + struct list_head list; + enum flow_block_command command; + enum flow_block_binder_type binder_type; + struct list_head *cb_list; +}; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; + struct Qdisc *sch; + struct list_head *cb_list_head; +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +typedef void (*action_destr)(void *); + +struct psample_group; + +struct nf_flowtable; + +struct action_gate_entry; + +struct flow_action_cookie; + +struct flow_action_entry { + enum flow_action_id id; + u32 hw_index; + unsigned long cookie; + u64 miss_cookie; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + unsigned char dst[6]; + unsigned char src[6]; + } vlan_push_eth; + struct { + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u16 rx_queue; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + u32 burst; + u64 rate_bytes_ps; + u64 peakrate_bytes_ps; + u32 avrate; + u16 overhead; + u64 burst_pkt; + u64 rate_pkt_ps; + u32 mtu; + struct { + enum flow_action_id act_id; + u32 extval; + } exceed; + struct { + enum flow_action_id act_id; + u32 extval; + } notexceed; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + unsigned long cookie; + u32 mark; + u32 labels[4]; + bool orig_dir; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + s32 prio; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + struct { + u16 sid; + } pppoe; + }; + struct flow_action_cookie *user_cookie; +}; + +struct flow_action { + unsigned int num_entries; + struct flow_action_entry entries[0]; +}; + +struct flow_rule { + struct flow_match match; + struct flow_action action; +}; + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[0]; +}; + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 drops; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +struct flow_offload_action { + struct netlink_ext_ack *extack; + enum offload_act_command command; + enum flow_action_id id; + u32 index; + unsigned long cookie; + struct flow_stats stats; + struct flow_action action; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key; + struct flow_dissector_key_meta *mask; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key; + struct flow_dissector_key_basic *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key; + struct flow_dissector_key_control *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key; + struct flow_dissector_key_eth_addrs *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key; + struct flow_dissector_key_vlan *mask; +}; + +struct flow_match_arp { + struct flow_dissector_key_arp *key; + struct flow_dissector_key_arp *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key; + struct flow_dissector_key_ipv4_addrs *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key; + struct flow_dissector_key_ipv6_addrs *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key; + struct flow_dissector_key_ip *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key; + struct flow_dissector_key_ports *mask; +}; + +struct flow_dissector_key_ports_range; + +struct flow_match_ports_range { + struct flow_dissector_key_ports_range *key; + struct flow_dissector_key_ports_range *mask; +}; + +struct flow_dissector_key_ports_range { + union { + struct flow_dissector_key_ports tp; + struct { + struct flow_dissector_key_ports tp_min; + struct flow_dissector_key_ports tp_max; + }; + }; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key; + struct flow_dissector_key_tcp *mask; +}; + +struct flow_match_ipsec { + struct flow_dissector_key_ipsec *key; + struct flow_dissector_key_ipsec *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key; + struct flow_dissector_key_icmp *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key; + struct flow_dissector_key_mpls *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key; + struct flow_dissector_key_keyid *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key; + struct flow_dissector_key_enc_opts *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key; + struct flow_dissector_key_ct *mask; +}; + +struct flow_match_pppoe { + struct flow_dissector_key_pppoe *key; + struct flow_dissector_key_pppoe *mask; +}; + +struct flow_match_l2tpv3 { + struct flow_dissector_key_l2tpv3 *key; + struct flow_dissector_key_l2tpv3 *mask; +}; + +enum gro_result { + GRO_MERGED = 0, + GRO_MERGED_FREE = 1, + GRO_HELD = 2, + GRO_NORMAL = 3, + GRO_CONSUMED = 4, +}; + +struct offload_callbacks { + struct sk_buff *(*gso_segment)(struct sk_buff *, netdev_features_t); + struct sk_buff *(*gro_receive)(struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sk_buff *, int); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +struct napi_gro_cb { + union { + struct { + void *frag0; + unsigned int frag0_len; + }; + struct { + struct sk_buff *last; + unsigned long age; + }; + }; + int data_offset; + u16 flush; + u16 flush_id; + u16 count; + u16 proto; + union { + struct { + u16 gro_remcsum_start; + u8 same_flow : 1; + u8 encap_mark : 1; + u8 csum_valid : 1; + u8 csum_cnt : 3; + u8 free : 2; + u8 is_ipv6 : 1; + u8 is_fou : 1; + u8 is_atomic : 1; + u8 recursion_counter : 4; + u8 is_flist : 1; + }; + struct { + u16 gro_remcsum_start; + u8 same_flow : 1; + u8 encap_mark : 1; + u8 csum_valid : 1; + u8 csum_cnt : 3; + u8 free : 2; + u8 is_ipv6 : 1; + u8 is_fou : 1; + u8 is_atomic : 1; + u8 recursion_counter : 4; + u8 is_flist : 1; + } zeroed; + }; + __wsum csum; +}; + +typedef enum gro_result gro_result_t; + +enum { + NETDEV_A_DEV_IFINDEX = 1, + NETDEV_A_DEV_PAD = 2, + NETDEV_A_DEV_XDP_FEATURES = 3, + NETDEV_A_DEV_XDP_ZC_MAX_SEGS = 4, + NETDEV_A_DEV_XDP_RX_METADATA_FEATURES = 5, + __NETDEV_A_DEV_MAX = 6, + NETDEV_A_DEV_MAX = 5, +}; + +enum netdev_xdp_rx_metadata { + NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, + NETDEV_XDP_RX_METADATA_HASH = 2, + NETDEV_XDP_RX_METADATA_MASK = 3, +}; + +enum { + NETDEV_CMD_DEV_GET = 1, + NETDEV_CMD_DEV_ADD_NTF = 2, + NETDEV_CMD_DEV_DEL_NTF = 3, + NETDEV_CMD_DEV_CHANGE_NTF = 4, + __NETDEV_CMD_MAX = 5, + NETDEV_CMD_MAX = 4, +}; + +enum { + NETDEV_NLGRP_MGMT = 0, +}; + +struct genl_dumpit_info { + struct genl_split_ops op; + struct genl_info info; +}; + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *, char *); + ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); +}; + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *, char *); + ssize_t (*store)(struct netdev_queue *, const char *, size_t); +}; + +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; + +enum { + FR_ACT_UNSPEC = 0, + FR_ACT_TO_TBL = 1, + FR_ACT_GOTO = 2, + FR_ACT_NOP = 3, + FR_ACT_RES3 = 4, + FR_ACT_RES4 = 5, + FR_ACT_BLACKHOLE = 6, + FR_ACT_UNREACHABLE = 7, + FR_ACT_PROHIBIT = 8, + __FR_ACT_MAX = 9, +}; + +enum { + FRA_UNSPEC = 0, + FRA_DST = 1, + FRA_SRC = 2, + FRA_IIFNAME = 3, + FRA_GOTO = 4, + FRA_UNUSED2 = 5, + FRA_PRIORITY = 6, + FRA_UNUSED3 = 7, + FRA_UNUSED4 = 8, + FRA_UNUSED5 = 9, + FRA_FWMARK = 10, + FRA_FLOW = 11, + FRA_TUN_ID = 12, + FRA_SUPPRESS_IFGROUP = 13, + FRA_SUPPRESS_PREFIXLEN = 14, + FRA_TABLE = 15, + FRA_FWMASK = 16, + FRA_OIFNAME = 17, + FRA_PAD = 18, + FRA_L3MDEV = 19, + FRA_UID_RANGE = 20, + FRA_PROTOCOL = 21, + FRA_IP_PROTO = 22, + FRA_SPORT_RANGE = 23, + FRA_DPORT_RANGE = 24, + __FRA_MAX = 25, +}; + +struct fib_rule_uid_range { + __u32 start; + __u32 end; +}; + +struct fib_rule_notifier_info { + struct fib_notifier_info info; + struct fib_rule *rule; +}; + +typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *, enum skb_drop_reason); + +typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *, void *); + +typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); + +typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, + const struct net_device *); + +typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, + unsigned int); + +typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); + +typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); + +typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); + +typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); + +typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, + long, int); + +typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, + const int); + +typedef void (*btf_trace_inet_sk_error_report)(void *, const struct sock *); + +typedef void (*btf_trace_sk_data_ready)(void *, const struct sock *); + +typedef void (*btf_trace_sock_send_length)(void *, struct sock *, int, int); + +typedef void (*btf_trace_sock_recv_length)(void *, struct sock *, int, int); + +typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, + const struct sk_buff *); + +typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); + +typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); + +typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, + const struct request_sock *); + +typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_tcp_bad_csum)(void *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_cong_state_set)(void *, struct sock *, const u8); + +typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, + const struct fib_nh_common *, int); + +typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, + const struct netdev_queue *, int, struct sk_buff *); + +typedef void (*btf_trace_qdisc_enqueue)(void *, struct Qdisc *, + const struct netdev_queue *, struct sk_buff *); + +typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, + struct net_device *, u32); + +typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, + const unsigned char *, u16, u16); + +struct net_bridge; + +struct net_bridge_port; + +typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, + struct net_bridge_port *, + const unsigned char *, u16); + +struct bridge_id { + unsigned char prio[2]; + unsigned char addr[6]; +}; + +typedef struct bridge_id bridge_id; + +struct bridge_mcast_other_query { + struct timer_list timer; + struct timer_list delay_timer; +}; + +struct bridge_mcast_own_query { + struct timer_list timer; + u32 startup_sent; +}; + +struct br_ip { + union { + __be32 ip4; + struct in6_addr ip6; + } src; + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } dst; + __be16 proto; + __u16 vid; +}; + +struct bridge_mcast_querier { + struct br_ip addr; + int port_ifidx; + seqcount_spinlock_t seq; +}; + +struct net_bridge_vlan; + +struct net_bridge_mcast { + struct net_bridge *br; + struct net_bridge_vlan *vlan; + u32 multicast_last_member_count; + u32 multicast_startup_query_count; + u8 multicast_querier; + u8 multicast_igmp_version; + u8 multicast_router; + u8 multicast_mld_version; + unsigned long multicast_last_member_interval; + unsigned long multicast_membership_interval; + unsigned long multicast_querier_interval; + unsigned long multicast_query_interval; + unsigned long multicast_query_response_interval; + unsigned long multicast_startup_query_interval; + struct hlist_head ip4_mc_router_list; + struct timer_list ip4_mc_router_timer; + struct bridge_mcast_other_query ip4_other_query; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_querier ip4_querier; + struct hlist_head ip6_mc_router_list; + struct timer_list ip6_mc_router_timer; + struct bridge_mcast_other_query ip6_other_query; + struct bridge_mcast_own_query ip6_own_query; + struct bridge_mcast_querier ip6_querier; +}; + +struct bridge_mcast_stats; + +struct net_bridge { + spinlock_t lock; + spinlock_t hash_lock; + struct hlist_head frame_type_list; + struct net_device *dev; + unsigned long options; + struct rhashtable fdb_hash_tbl; + struct list_head port_list; + union { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; + u16 group_fwd_mask; + u16 group_fwd_mask_required; + bridge_id designated_root; + bridge_id bridge_id; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; + unsigned long max_age; + unsigned long hello_time; + unsigned long forward_delay; + unsigned long ageing_time; + unsigned long bridge_max_age; + unsigned long bridge_hello_time; + unsigned long bridge_forward_delay; + unsigned long bridge_ageing_time; + u32 root_path_cost; + u8 group_addr[6]; + enum { + BR_NO_STP = 0, + BR_KERNEL_STP = 1, + BR_USER_STP = 2, + } stp_enabled; + struct net_bridge_mcast multicast_ctx; + struct bridge_mcast_stats __attribute__((btf_type_tag("percpu"))) * mcast_stats; + u32 hash_max; + spinlock_t multicast_lock; + struct rhashtable mdb_hash_tbl; + struct rhashtable sg_port_tbl; + struct hlist_head mcast_gc_list; + struct hlist_head mdb_list; + struct work_struct mcast_gc_work; + struct timer_list hello_timer; + struct timer_list tcn_timer; + struct timer_list topology_change_timer; + struct delayed_work gc_work; + struct kobject *ifobj; + u32 auto_cnt; + atomic_t fdb_n_learned; + u32 fdb_max_learned; + struct hlist_head fdb_list; + struct hlist_head mrp_list; +}; + +struct net_bridge_mcast_port { + struct net_bridge_port *port; + struct net_bridge_vlan *vlan; + struct bridge_mcast_own_query ip4_own_query; + struct timer_list ip4_mc_router_timer; + struct hlist_node ip4_rlist; + struct bridge_mcast_own_query ip6_own_query; + struct timer_list ip6_mc_router_timer; + struct hlist_node ip6_rlist; + unsigned char multicast_router; + u32 mdb_n_entries; + u32 mdb_max_entries; +}; + +struct br_tunnel_info { + __be64 tunnel_id; + struct metadata_dst __attribute__((btf_type_tag("rcu"))) * tunnel_dst; +}; + +struct net_bridge_vlan { + struct rhash_head vnode; + struct rhash_head tnode; + u16 vid; + u16 flags; + u16 priv_flags; + u8 state; + struct pcpu_sw_netstats __attribute__((btf_type_tag("percpu"))) * stats; + union { + struct net_bridge *br; + struct net_bridge_port *port; + }; + union { + refcount_t refcnt; + struct net_bridge_vlan *brvlan; + }; + struct br_tunnel_info tinfo; + union { + struct net_bridge_mcast br_mcast_ctx; + struct net_bridge_mcast_port port_mcast_ctx; + }; + u16 msti; + struct list_head vlist; + struct callback_head rcu; +}; + +typedef __u16 port_id; + +struct bridge_stp_xstats { + __u64 transition_blk; + __u64 transition_fwd; + __u64 rx_bpdu; + __u64 tx_bpdu; + __u64 rx_tcn; + __u64 tx_tcn; +}; + +struct net_bridge_port { + struct net_bridge *br; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + unsigned long flags; + struct net_bridge_port __attribute__((btf_type_tag("rcu"))) * backup_port; + u32 backup_nhid; + u8 priority; + u8 state; + u16 port_no; + unsigned char topology_change_ack; + unsigned char config_pending; + port_id port_id; + port_id designated_port; + bridge_id designated_root; + bridge_id designated_bridge; + u32 path_cost; + u32 designated_cost; + unsigned long designated_age; + struct timer_list forward_delay_timer; + struct timer_list hold_timer; + struct timer_list message_age_timer; + struct kobject kobj; + struct callback_head rcu; + struct net_bridge_mcast_port multicast_ctx; + struct bridge_mcast_stats __attribute__((btf_type_tag("percpu"))) * mcast_stats; + u32 multicast_eht_hosts_limit; + u32 multicast_eht_hosts_cnt; + struct hlist_head mglist; + char sysfs_name[16]; + u16 group_fwd_mask; + u16 backup_redirected_cnt; + struct bridge_stp_xstats stp_xstats; +}; + +struct br_mcast_stats { + __u64 igmp_v1queries[2]; + __u64 igmp_v2queries[2]; + __u64 igmp_v3queries[2]; + __u64 igmp_leaves[2]; + __u64 igmp_v1reports[2]; + __u64 igmp_v2reports[2]; + __u64 igmp_v3reports[2]; + __u64 igmp_parse_errors; + __u64 mld_v1queries[2]; + __u64 mld_v2queries[2]; + __u64 mld_leaves[2]; + __u64 mld_v1reports[2]; + __u64 mld_v2reports[2]; + __u64 mld_parse_errors; + __u64 mcast_bytes[2]; + __u64 mcast_packets[2]; +}; + +struct bridge_mcast_stats { + struct br_mcast_stats mstats; + struct u64_stats_sync syncp; +}; + +struct net_bridge_fdb_entry; + +typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, + struct net_bridge_fdb_entry *); + +struct mac_addr { + unsigned char addr[6]; +}; + +typedef struct mac_addr mac_addr; + +struct net_bridge_fdb_key { + mac_addr addr; + u16 vlan_id; +}; + +struct net_bridge_fdb_entry { + struct rhash_head rhnode; + struct net_bridge_port *dst; + struct net_bridge_fdb_key key; + struct hlist_node fdb_node; + unsigned long flags; + long : 64; + long : 64; + unsigned long updated; + unsigned long used; + struct callback_head rcu; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, + const unsigned char *, u16, unsigned long); + +typedef void (*btf_trace_br_mdb_full)(void *, const struct net_device *, const struct br_ip *); + +typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); + +typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, + const struct page *, u32); + +typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, + const struct page *, u32); + +typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); + +typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, + const void *, const struct neighbour *, bool); + +typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); + +typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4, +}; + +struct trace_event_raw_kfree_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + unsigned short protocol; + enum skb_drop_reason reason; + char __data[0]; +}; + +struct trace_event_raw_consume_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + char __data[0]; +}; + +struct trace_event_raw_skb_copy_datagram_iovec { + struct trace_entry ent; + const void *skbaddr; + int len; + char __data[0]; +}; + +struct trace_event_raw_net_dev_start_xmit { + struct trace_entry ent; + u32 __data_loc_name; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + unsigned int len; + unsigned int data_len; + int network_offset; + bool transport_offset_valid; + int transport_offset; + u8 tx_flags; + u16 gso_size; + u16 gso_segs; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + int rc; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit_timeout { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_driver; + int queue_index; + char __data[0]; +}; + +struct trace_event_raw_net_dev_template { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_verbose_template { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int napi_id; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + u32 hash; + bool l4_hash; + unsigned int len; + unsigned int data_len; + unsigned int truesize; + bool mac_header_valid; + int mac_header; + unsigned char nr_frags; + u16 gso_size; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_exit_template { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_napi_poll { + struct trace_entry ent; + struct napi_struct *napi; + u32 __data_loc_dev_name; + int work; + int budget; + char __data[0]; +}; + +struct trace_event_raw_sock_rcvqueue_full { + struct trace_entry ent; + int rmem_alloc; + unsigned int truesize; + int sk_rcvbuf; + char __data[0]; +}; + +struct trace_event_raw_sock_exceed_buf_limit { + struct trace_entry ent; + char name[32]; + long sysctl_mem[3]; + long allocated; + int sysctl_rmem; + int rmem_alloc; + int sysctl_wmem; + int wmem_alloc; + int wmem_queued; + int kind; + char __data[0]; +}; + +struct trace_event_raw_inet_sock_set_state { + struct trace_entry ent; + const void *skaddr; + int oldstate; + int newstate; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_inet_sk_error_report { + struct trace_entry ent; + int error; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_sk_data_ready { + struct trace_entry ent; + const void *skaddr; + __u16 family; + __u16 protocol; + unsigned long ip; + char __data[0]; +}; + +struct trace_event_raw_sock_msg_length { + struct trace_entry ent; + void *sk; + __u16 family; + __u16 protocol; + int ret; + int flags; + char __data[0]; +}; + +struct trace_event_raw_udp_fail_queue_rcv_skb { + struct trace_entry ent; + int rc; + __u16 lport; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk_skb { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_retransmit_synack { + struct trace_entry ent; + const void *skaddr; + const void *req; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_probe { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 mark; + __u16 data_len; + __u32 snd_nxt; + __u32 snd_una; + __u32 snd_cwnd; + __u32 ssthresh; + __u32 snd_wnd; + __u32 srtt; + __u32 rcv_wnd; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_skb { + struct trace_entry ent; + const void *skbaddr; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_cong_state_set { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u8 cong_state; + char __data[0]; +}; + +struct trace_event_raw_fib_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + u8 proto; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[4]; + __u8 dst[4]; + __u8 gw4[4]; + __u8 gw6[16]; + u16 sport; + u16 dport; + char name[16]; + char __data[0]; +}; + +struct trace_event_raw_qdisc_dequeue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + int packets; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + unsigned long txq_state; + char __data[0]; +}; + +struct trace_event_raw_qdisc_enqueue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + char __data[0]; +}; + +struct trace_event_raw_qdisc_reset { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_destroy { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_create { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_add { + struct trace_entry ent; + u8 ndm_flags; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + u16 nlh_flags; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_external_learn_add { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_fdb_delete { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_update { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + unsigned long flags; + char __data[0]; +}; + +struct trace_event_raw_br_mdb_full { + struct trace_entry ent; + u32 __data_loc_dev; + int af; + u16 vid; + __u8 src[16]; + __u8 grp[16]; + __u8 grpmac[6]; + char __data[0]; +}; + +struct trace_event_raw_page_pool_release { + struct trace_entry ent; + const struct page_pool *pool; + s32 inflight; + u32 hold; + u32 release; + u64 cnt; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_release { + struct trace_entry ent; + const struct page_pool *pool; + const struct page *page; + u32 release; + unsigned long pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_hold { + struct trace_entry ent; + const struct page_pool *pool; + const struct page *page; + u32 hold; + unsigned long pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_update_nid { + struct trace_entry ent; + const struct page_pool *pool; + int pool_nid; + int new_nid; + char __data[0]; +}; + +struct trace_event_raw_neigh_create { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + int entries; + u8 created; + u8 gc_exempt; + u8 primary_key4[4]; + u8 primary_key6[16]; + char __data[0]; +}; + +struct trace_event_raw_neigh_update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + unsigned long confirmed; + unsigned long updated; + unsigned long used; + u8 new_lladdr[32]; + u8 new_state; + u32 update_flags; + u32 pid; + char __data[0]; +}; + +struct trace_event_raw_neigh__update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + unsigned long confirmed; + unsigned long updated; + unsigned long used; + u32 err; + char __data[0]; +}; + +struct trace_event_data_offsets_net_dev_start_xmit { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_xmit { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_template { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_rx_verbose_template { + u32 name; +}; + +struct trace_event_data_offsets_napi_poll { + u32 dev_name; +}; + +struct trace_event_data_offsets_br_fdb_add { + u32 dev; +}; + +struct trace_event_data_offsets_br_mdb_full { + u32 dev; +}; + +struct trace_event_data_offsets_neigh_create { + u32 dev; +}; + +struct trace_event_data_offsets_neigh_update { + u32 dev; +}; + +struct trace_event_data_offsets_neigh__update { + u32 dev; +}; + +struct trace_event_data_offsets_kfree_skb {}; + +struct trace_event_data_offsets_consume_skb {}; + +struct trace_event_data_offsets_skb_copy_datagram_iovec {}; + +struct trace_event_data_offsets_net_dev_xmit_timeout { + u32 name; + u32 driver; +}; + +struct trace_event_data_offsets_net_dev_rx_exit_template {}; + +struct trace_event_data_offsets_sock_rcvqueue_full {}; + +struct trace_event_data_offsets_sock_exceed_buf_limit {}; + +struct trace_event_data_offsets_inet_sock_set_state {}; + +struct trace_event_data_offsets_inet_sk_error_report {}; + +struct trace_event_data_offsets_sk_data_ready {}; + +struct trace_event_data_offsets_sock_msg_length {}; + +struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; + +struct trace_event_data_offsets_tcp_event_sk_skb {}; + +struct trace_event_data_offsets_tcp_event_sk {}; + +struct trace_event_data_offsets_tcp_retransmit_synack {}; + +struct trace_event_data_offsets_tcp_probe {}; + +struct trace_event_data_offsets_tcp_event_skb {}; + +struct trace_event_data_offsets_tcp_cong_state_set {}; + +struct trace_event_data_offsets_fib_table_lookup {}; + +struct trace_event_data_offsets_qdisc_dequeue {}; + +struct trace_event_data_offsets_qdisc_enqueue {}; + +struct trace_event_data_offsets_qdisc_reset { + u32 dev; + u32 kind; +}; + +struct trace_event_data_offsets_qdisc_destroy { + u32 dev; + u32 kind; +}; + +struct trace_event_data_offsets_qdisc_create { + u32 dev; + u32 kind; +}; + +struct trace_event_data_offsets_br_fdb_external_learn_add { + u32 br_dev; + u32 dev; +}; + +struct trace_event_data_offsets_fdb_delete { + u32 br_dev; + u32 dev; +}; + +struct trace_event_data_offsets_br_fdb_update { + u32 br_dev; + u32 dev; +}; + +struct trace_event_data_offsets_page_pool_release {}; + +struct trace_event_data_offsets_page_pool_state_release {}; + +struct trace_event_data_offsets_page_pool_state_hold {}; + +struct trace_event_data_offsets_page_pool_update_nid {}; + +enum net_dm_alert_mode { + NET_DM_ALERT_MODE_SUMMARY = 0, + NET_DM_ALERT_MODE_PACKET = 1, +}; + +struct devlink_trap_metadata; + +struct net_dm_alert_ops { + void (*kfree_skb_probe)(void *, struct sk_buff *, void *, enum skb_drop_reason); + void (*napi_poll_probe)(void *, struct napi_struct *, int, int); + void (*work_item_func)(struct work_struct *); + void (*hw_work_item_func)(struct work_struct *); + void (*hw_trap_probe)(void *, const struct devlink *, struct sk_buff *, + const struct devlink_trap_metadata *); +}; + +enum devlink_trap_type { + DEVLINK_TRAP_TYPE_DROP = 0, + DEVLINK_TRAP_TYPE_EXCEPTION = 1, + DEVLINK_TRAP_TYPE_CONTROL = 2, +}; + +struct devlink_trap_metadata { + const char *trap_name; + const char *trap_group_name; + struct net_device *input_dev; + netdevice_tracker dev_tracker; + const struct flow_action_cookie *fa_cookie; + enum devlink_trap_type trap_type; +}; + +struct net_dm_stats { + u64_stats_t dropped; + struct u64_stats_sync syncp; +}; + +struct net_dm_hw_entries; + +struct per_cpu_dm_data { + spinlock_t lock; + union { + struct sk_buff *skb; + struct net_dm_hw_entries *hw_entries; + }; + struct sk_buff_head drop_queue; + struct work_struct dm_alert_work; + struct timer_list send_timer; + struct net_dm_stats stats; +}; + +struct net_dm_hw_entry { + char trap_name[40]; + u32 count; +}; + +struct net_dm_hw_entries { + u32 num_entries; + struct net_dm_hw_entry entries[0]; +}; + +enum net_dm_attr { + NET_DM_ATTR_UNSPEC = 0, + NET_DM_ATTR_ALERT_MODE = 1, + NET_DM_ATTR_PC = 2, + NET_DM_ATTR_SYMBOL = 3, + NET_DM_ATTR_IN_PORT = 4, + NET_DM_ATTR_TIMESTAMP = 5, + NET_DM_ATTR_PROTO = 6, + NET_DM_ATTR_PAYLOAD = 7, + NET_DM_ATTR_PAD = 8, + NET_DM_ATTR_TRUNC_LEN = 9, + NET_DM_ATTR_ORIG_LEN = 10, + NET_DM_ATTR_QUEUE_LEN = 11, + NET_DM_ATTR_STATS = 12, + NET_DM_ATTR_HW_STATS = 13, + NET_DM_ATTR_ORIGIN = 14, + NET_DM_ATTR_HW_TRAP_GROUP_NAME = 15, + NET_DM_ATTR_HW_TRAP_NAME = 16, + NET_DM_ATTR_HW_ENTRIES = 17, + NET_DM_ATTR_HW_ENTRY = 18, + NET_DM_ATTR_HW_TRAP_COUNT = 19, + NET_DM_ATTR_SW_DROPS = 20, + NET_DM_ATTR_HW_DROPS = 21, + NET_DM_ATTR_FLOW_ACTION_COOKIE = 22, + NET_DM_ATTR_REASON = 23, + __NET_DM_ATTR_MAX = 24, + NET_DM_ATTR_MAX = 23, +}; + +enum { + NET_DM_CMD_UNSPEC = 0, + NET_DM_CMD_ALERT = 1, + NET_DM_CMD_CONFIG = 2, + NET_DM_CMD_START = 3, + NET_DM_CMD_STOP = 4, + NET_DM_CMD_PACKET_ALERT = 5, + NET_DM_CMD_CONFIG_GET = 6, + NET_DM_CMD_CONFIG_NEW = 7, + NET_DM_CMD_STATS_GET = 8, + NET_DM_CMD_STATS_NEW = 9, + _NET_DM_CMD_MAX = 10, +}; + +enum net_dm_origin { + NET_DM_ORIGIN_SW = 0, + NET_DM_ORIGIN_HW = 1, +}; + +enum { + NET_DM_ATTR_PORT_NETDEV_IFINDEX = 0, + NET_DM_ATTR_PORT_NETDEV_NAME = 1, + __NET_DM_ATTR_PORT_MAX = 2, + NET_DM_ATTR_PORT_MAX = 1, +}; + +enum { + NET_DM_ATTR_STATS_DROPPED = 0, + __NET_DM_ATTR_STATS_MAX = 1, + NET_DM_ATTR_STATS_MAX = 0, +}; + +struct net_dm_skb_cb { + union { + struct devlink_trap_metadata *hw_metadata; + void *pc; + }; + enum skb_drop_reason reason; +}; + +struct net_dm_drop_point { + __u8 pc[8]; + __u32 count; +}; + +struct net_dm_alert_msg { + __u32 entries; + struct net_dm_drop_point points[0]; +}; + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +struct lwtunnel_encap_ops { + int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, + struct lwtunnel_state **, struct netlink_ext_ack *); + void (*destroy_state)(struct lwtunnel_state *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*input)(struct sk_buff *); + int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); + int (*get_encap_size)(struct lwtunnel_state *); + int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); + int (*xmit)(struct sk_buff *); + struct module *owner; +}; + +struct rtnexthop { + unsigned short rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +enum { + LWT_BPF_UNSPEC = 0, + LWT_BPF_IN = 1, + LWT_BPF_OUT = 2, + LWT_BPF_XMIT = 3, + LWT_BPF_XMIT_HEADROOM = 4, + __LWT_BPF_MAX = 5, +}; + +enum { + LWT_BPF_PROG_UNSPEC = 0, + LWT_BPF_PROG_FD = 1, + LWT_BPF_PROG_NAME = 2, + __LWT_BPF_PROG_MAX = 3, +}; + +enum { + LWTUNNEL_XMIT_DONE = 0, + LWTUNNEL_XMIT_CONTINUE = 256, +}; + +struct bpf_lwt_prog { + struct bpf_prog *prog; + char *name; +}; + +struct bpf_lwt { + struct bpf_lwt_prog in; + struct bpf_lwt_prog out; + struct bpf_lwt_prog xmit; + int family; +}; + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +struct percpu_free_defer { + struct callback_head rcu; + void __attribute__((btf_type_tag("percpu"))) * ptr; +}; + +struct gro_cells { + struct gro_cell __attribute__((btf_type_tag("percpu"))) * cells; +}; + +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN = 0, + NETDEV_LAG_TX_TYPE_RANDOM = 1, + NETDEV_LAG_TX_TYPE_BROADCAST = 2, + NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4, + NETDEV_LAG_TX_TYPE_HASH = 5, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE = 0, + NETDEV_LAG_HASH_L2 = 1, + NETDEV_LAG_HASH_L34 = 2, + NETDEV_LAG_HASH_L23 = 3, + NETDEV_LAG_HASH_E23 = 4, + NETDEV_LAG_HASH_E34 = 5, + NETDEV_LAG_HASH_VLAN_SRCMAC = 6, + NETDEV_LAG_HASH_UNKNOWN = 7, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS = 1, + __SK_REDIRECT = 2, + __SK_NONE = 3, +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED = 0, + SK_PSOCK_RX_STRP_ENABLED = 1, +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +enum { + BTF_SOCK_TYPE_INET = 0, + BTF_SOCK_TYPE_INET_CONN = 1, + BTF_SOCK_TYPE_INET_REQ = 2, + BTF_SOCK_TYPE_INET_TW = 3, + BTF_SOCK_TYPE_REQ = 4, + BTF_SOCK_TYPE_SOCK = 5, + BTF_SOCK_TYPE_SOCK_COMMON = 6, + BTF_SOCK_TYPE_TCP = 7, + BTF_SOCK_TYPE_TCP_REQ = 8, + BTF_SOCK_TYPE_TCP_TW = 9, + BTF_SOCK_TYPE_TCP6 = 10, + BTF_SOCK_TYPE_UDP = 11, + BTF_SOCK_TYPE_UDP6 = 12, + BTF_SOCK_TYPE_UNIX = 13, + BTF_SOCK_TYPE_MPTCP = 14, + BTF_SOCK_TYPE_SOCKET = 15, + MAX_BTF_SOCK_TYPE = 16, +}; + +typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, + void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, + void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + spinlock_t lock; + long : 64; + long : 64; +}; + +struct bpf_shtab_bucket; + +struct bpf_shtab { + struct bpf_map map; + struct bpf_shtab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; + long : 64; +}; + +struct bpf_shtab_bucket { + struct hlist_head head; + spinlock_t lock; +}; + +struct bpf_shtab_elem { + struct callback_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct sock_map_seq_info { + struct bpf_map *map; + struct sock *sk; + u32 index; +}; + +struct bpf_iter__sockmap { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + struct sock *sk; + }; +}; + +struct sock_hash_seq_info { + struct bpf_map *map; + struct bpf_shtab *htab; + u32 bucket_id; +}; + +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE = 0, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, + __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE = 0, + SK_DIAG_BPF_STORAGE = 1, + __SK_DIAG_BPF_STORAGE_REP_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_NONE = 0, + SK_DIAG_BPF_STORAGE_PAD = 1, + SK_DIAG_BPF_STORAGE_MAP_ID = 2, + SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, + __SK_DIAG_BPF_STORAGE_MAX = 4, +}; + +typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); + +typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, + u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[0]; +}; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned int skip_elems; +}; + +struct bpf_iter__bpf_sk_storage_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + struct sock *sk; + }; + union { + void *value; + }; +}; + +struct compat_cmsghdr { + compat_size_t cmsg_len; + compat_int_t cmsg_level; + compat_int_t cmsg_type; +}; + +struct fcllc { + __u8 dsap; + __u8 ssap; + __u8 llc; + __u8 protid[3]; + __be16 ethertype; +}; + +struct fch_hdr { + __u8 daddr[6]; + __u8 saddr[6]; +}; + +struct hippi_fp_hdr { + __be32 fixed; + __be32 d2_size; +}; + +struct hippi_le_hdr { + __u8 message_type : 4; + __u8 double_wide : 1; + __u8 fc : 3; + __u8 dest_switch_addr[3]; + __u8 src_addr_type : 4; + __u8 dest_addr_type : 4; + __u8 src_switch_addr[3]; + __u16 reserved; + __u8 daddr[6]; + __u16 locally_administered; + __u8 saddr[6]; +}; + +struct hippi_snap_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl; + __u8 oui[3]; + __be16 ethertype; +}; + +struct hippi_hdr { + struct hippi_fp_hdr fp; + struct hippi_le_hdr le; + struct hippi_snap_hdr snap; +}; + +struct hippi_cb { + __u32 ifield; +}; + +enum { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + TCA_EXT_WARN_MSG = 16, + __TCA_MAX = 17, +}; + +struct skb_array { + struct ptr_ring ring; +}; + +struct pfifo_fast_priv { + struct skb_array q[3]; +}; + +struct tc_prio_qopt { + int bands; + __u8 priomap[16]; +}; + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u16 mpu; + u8 linklayer; + u8 shift; +}; + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + unsigned short overhead; + short cell_align; + unsigned short mpu; + __u32 rate; +}; + +struct psched_pktrate { + u64 rate_pkts_ps; + u32 mult; + u8 shift; +}; + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc __attribute__((btf_type_tag("rcu"))) * *p_miniq; +}; + +enum tc_mq_command { + TC_MQ_CREATE = 0, + TC_MQ_DESTROY = 1, + TC_MQ_STATS = 2, + TC_MQ_GRAFT = 3, +}; + +struct tc_qopt_offload_stats { + struct gnet_stats_basic_sync *bstats; + struct gnet_stats_queue *qstats; +}; + +struct tc_mq_opt_offload_graft_params { + unsigned long queue; + u32 child_handle; +}; + +struct tc_mq_qopt_offload { + enum tc_mq_command command; + u32 handle; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; +}; + +struct mq_sched { + struct Qdisc **qdiscs; +}; + +struct sch_frag_data { + unsigned long dst; + struct qdisc_skb_cb cb; + __be16 inner_protocol; + u16 vlan_tci; + __be16 vlan_proto; + unsigned int l2_len; + u8 l2_data[18]; + int (*xmit)(struct sk_buff *); +}; + +struct tc_skb_cb { + struct qdisc_skb_cb qdisc_cb; + u16 mru; + u8 post_ct : 1; + u8 post_ct_snat : 1; + u8 post_ct_dnat : 1; + u16 zone; +}; + +typedef void (*btf_trace_netlink_extack)(void *, const char *); + +struct listeners; + +struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; + struct listeners __attribute__((btf_type_tag("rcu"))) * listeners; + unsigned int flags; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, unsigned long *); + int registered; +}; + +struct listeners { + struct callback_head rcu; + unsigned long masks[0]; +}; + +enum netlink_skb_flags { + NETLINK_SKB_DST = 8, +}; + +enum { + NETLINK_F_KERNEL_SOCKET = 0, + NETLINK_F_RECV_PKTINFO = 1, + NETLINK_F_BROADCAST_SEND_ERROR = 2, + NETLINK_F_RECV_NO_ENOBUFS = 3, + NETLINK_F_LISTEN_ALL_NSID = 4, + NETLINK_F_CAP_ACK = 5, + NETLINK_F_EXT_ACK = 6, + NETLINK_F_STRICT_CHK = 7, +}; + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED = 1, +}; + +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED = 0, + NLMSGERR_ATTR_MSG = 1, + NLMSGERR_ATTR_OFFS = 2, + NLMSGERR_ATTR_COOKIE = 3, + NLMSGERR_ATTR_POLICY = 4, + NLMSGERR_ATTR_MISS_TYPE = 5, + NLMSGERR_ATTR_MISS_NEST = 6, + __NLMSGERR_ATTR_MAX = 7, + NLMSGERR_ATTR_MAX = 6, +}; + +struct trace_event_raw_netlink_extack { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +struct netlink_sock { + struct sock sk; + unsigned long flags; + u32 portid; + u32 dst_portid; + u32 dst_group; + u32 subscriptions; + u32 ngroups; + unsigned long *groups; + unsigned long state; + size_t max_recvmsg_len; + wait_queue_head_t wait; + bool bound; + bool cb_running; + int dump_done_errno; + struct netlink_callback cb; + struct mutex *cb_mutex; + struct mutex cb_def_mutex; + void (*netlink_rcv)(struct sk_buff *); + int (*netlink_bind)(struct net *, int); + void (*netlink_unbind)(struct net *, int); + void (*netlink_release)(struct sock *, unsigned long *); + struct module *module; + struct rhash_head node; + struct callback_head rcu; + struct work_struct work; +}; + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + unsigned short nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct trace_event_data_offsets_netlink_extack { + u32 msg; +}; + +struct netlink_tap_net { + struct list_head netlink_tap_all; + struct mutex netlink_tap_lock; +}; + +struct netlink_broadcast_data { + struct sock *exclude_sk; + struct net *net; + u32 portid; + u32 group; + int failure; + int delivery_failure; + int congested; + int delivered; + gfp_t allocation; + struct sk_buff *skb; + struct sk_buff *skb2; + int (*tx_filter)(struct sock *, struct sk_buff *, void *); + void *tx_data; +}; + +struct netlink_set_err_data { + struct sock *exclude_sk; + u32 portid; + u32 group; + int code; +}; + +struct netlink_compare_arg { + possible_net_t pnet; + u32 portid; +}; + +struct nl_pktinfo { + __u32 group; +}; + +struct nl_seq_iter { + struct seq_net_private p; + struct rhashtable_iter hti; + int link; +}; + +struct bpf_iter__netlink { + union { + struct bpf_iter_meta *meta; + }; + union { + struct netlink_sock *sk; + }; +}; + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +enum { + CTRL_CMD_UNSPEC = 0, + CTRL_CMD_NEWFAMILY = 1, + CTRL_CMD_DELFAMILY = 2, + CTRL_CMD_GETFAMILY = 3, + CTRL_CMD_NEWOPS = 4, + CTRL_CMD_DELOPS = 5, + CTRL_CMD_GETOPS = 6, + CTRL_CMD_NEWMCAST_GRP = 7, + CTRL_CMD_DELMCAST_GRP = 8, + CTRL_CMD_GETMCAST_GRP = 9, + CTRL_CMD_GETPOLICY = 10, + __CTRL_CMD_MAX = 11, +}; + +enum genl_validate_flags { + GENL_DONT_VALIDATE_STRICT = 1, + GENL_DONT_VALIDATE_DUMP = 2, + GENL_DONT_VALIDATE_DUMP_STRICT = 4, +}; + +enum { + CTRL_ATTR_UNSPEC = 0, + CTRL_ATTR_FAMILY_ID = 1, + CTRL_ATTR_FAMILY_NAME = 2, + CTRL_ATTR_VERSION = 3, + CTRL_ATTR_HDRSIZE = 4, + CTRL_ATTR_MAXATTR = 5, + CTRL_ATTR_OPS = 6, + CTRL_ATTR_MCAST_GROUPS = 7, + CTRL_ATTR_POLICY = 8, + CTRL_ATTR_OP_POLICY = 9, + CTRL_ATTR_OP = 10, + __CTRL_ATTR_MAX = 11, +}; + +enum { + CTRL_ATTR_OP_UNSPEC = 0, + CTRL_ATTR_OP_ID = 1, + CTRL_ATTR_OP_FLAGS = 2, + __CTRL_ATTR_OP_MAX = 3, +}; + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC = 0, + CTRL_ATTR_MCAST_GRP_NAME = 1, + CTRL_ATTR_MCAST_GRP_ID = 2, + __CTRL_ATTR_MCAST_GRP_MAX = 3, +}; + +enum { + CTRL_ATTR_POLICY_UNSPEC = 0, + CTRL_ATTR_POLICY_DO = 1, + CTRL_ATTR_POLICY_DUMP = 2, + __CTRL_ATTR_POLICY_DUMP_MAX = 3, + CTRL_ATTR_POLICY_DUMP_MAX = 2, +}; + +struct genl_op_iter { + const struct genl_family *family; + struct genl_split_ops doit; + struct genl_split_ops dumpit; + int cmd_idx; + int entry_idx; + u32 cmd; + u8 flags; +}; + +struct netlink_policy_dump_state; + +struct ctrl_dump_policy_ctx { + struct netlink_policy_dump_state *state; + const struct genl_family *rt; + struct genl_op_iter *op_iter; + u32 op; + u16 fam_id; + u8 dump_map : 1; + u8 single_op : 1; +}; + +struct genl_start_context { + const struct genl_family *family; + struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; + const struct genl_split_ops *ops; + int hdrlen; +}; + +struct netlink_policy_dump_state { + unsigned int policy_idx; + unsigned int attr_idx; + unsigned int n_alloc; + struct { + const struct nla_policy *policy; + unsigned int maxtype; + } policies[0]; +}; + +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID = 0, + NL_ATTR_TYPE_FLAG = 1, + NL_ATTR_TYPE_U8 = 2, + NL_ATTR_TYPE_U16 = 3, + NL_ATTR_TYPE_U32 = 4, + NL_ATTR_TYPE_U64 = 5, + NL_ATTR_TYPE_S8 = 6, + NL_ATTR_TYPE_S16 = 7, + NL_ATTR_TYPE_S32 = 8, + NL_ATTR_TYPE_S64 = 9, + NL_ATTR_TYPE_BINARY = 10, + NL_ATTR_TYPE_STRING = 11, + NL_ATTR_TYPE_NUL_STRING = 12, + NL_ATTR_TYPE_NESTED = 13, + NL_ATTR_TYPE_NESTED_ARRAY = 14, + NL_ATTR_TYPE_BITFIELD32 = 15, + NL_ATTR_TYPE_SINT = 16, + NL_ATTR_TYPE_UINT = 17, +}; + +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC = 0, + NL_POLICY_TYPE_ATTR_TYPE = 1, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, + NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, + NL_POLICY_TYPE_ATTR_PAD = 11, + NL_POLICY_TYPE_ATTR_MASK = 12, + __NL_POLICY_TYPE_ATTR_MAX = 13, + NL_POLICY_TYPE_ATTR_MAX = 12, +}; + +typedef void (*btf_trace_bpf_test_finish)(void *, int *); + +struct bpf_test_timer { + enum { + NO_PREEMPT = 0, + NO_MIGRATE = 1, + } mode; + u32 i; + u64 time_start; + u64 time_spent; +}; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5, + NF_INET_INGRESS = 5, +}; + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +struct trace_event_raw_bpf_test_finish { + struct trace_entry ent; + int err; + char __data[0]; +}; + +struct xdp_test_data { + struct xdp_buff *orig_ctx; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct xdp_rxq_info rxq; + struct net_device *dev; + struct page_pool *pp; + struct xdp_frame **frames; + struct sk_buff **skbs; + struct xdp_mem_info mem; + u32 batch_size; + u32 frame_cnt; + long : 64; + long : 64; +}; + +struct xdp_page_head { + struct xdp_buff orig_ctx; + struct xdp_buff ctx; + union { + struct { + struct { + } __empty_frame; + struct xdp_frame frame[0]; + }; + struct { + struct { + } __empty_data; + u8 data[0]; + }; + }; +}; + +struct trace_event_data_offsets_bpf_test_finish {}; + +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; + +struct bpf_raw_tp_test_run_info { + struct bpf_prog *prog; + void *ctx; + u32 retval; +}; + +struct bpf_dummy_ops_test_args { + u64 args[12]; + struct bpf_dummy_ops_state state; +}; + +typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *, ...); + +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, + ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, + ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, + __ETHTOOL_LINK_MODE_MASK_NBITS = 102, +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, + ETHTOOL_MSG_LINKINFO_NTF = 3, + ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, + ETHTOOL_MSG_LINKMODES_NTF = 5, + ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, + ETHTOOL_MSG_DEBUG_GET_REPLY = 7, + ETHTOOL_MSG_DEBUG_NTF = 8, + ETHTOOL_MSG_WOL_GET_REPLY = 9, + ETHTOOL_MSG_WOL_NTF = 10, + ETHTOOL_MSG_FEATURES_GET_REPLY = 11, + ETHTOOL_MSG_FEATURES_SET_REPLY = 12, + ETHTOOL_MSG_FEATURES_NTF = 13, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, + ETHTOOL_MSG_PRIVFLAGS_NTF = 15, + ETHTOOL_MSG_RINGS_GET_REPLY = 16, + ETHTOOL_MSG_RINGS_NTF = 17, + ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, + ETHTOOL_MSG_CHANNELS_NTF = 19, + ETHTOOL_MSG_COALESCE_GET_REPLY = 20, + ETHTOOL_MSG_COALESCE_NTF = 21, + ETHTOOL_MSG_PAUSE_GET_REPLY = 22, + ETHTOOL_MSG_PAUSE_NTF = 23, + ETHTOOL_MSG_EEE_GET_REPLY = 24, + ETHTOOL_MSG_EEE_NTF = 25, + ETHTOOL_MSG_TSINFO_GET_REPLY = 26, + ETHTOOL_MSG_CABLE_TEST_NTF = 27, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, + ETHTOOL_MSG_FEC_GET_REPLY = 30, + ETHTOOL_MSG_FEC_NTF = 31, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, + ETHTOOL_MSG_STATS_GET_REPLY = 33, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 34, + ETHTOOL_MSG_MODULE_GET_REPLY = 35, + ETHTOOL_MSG_MODULE_NTF = 36, + ETHTOOL_MSG_PSE_GET_REPLY = 37, + ETHTOOL_MSG_RSS_GET_REPLY = 38, + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 39, + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 40, + ETHTOOL_MSG_PLCA_NTF = 41, + ETHTOOL_MSG_MM_GET_REPLY = 42, + ETHTOOL_MSG_MM_NTF = 43, + __ETHTOOL_MSG_KERNEL_CNT = 44, + ETHTOOL_MSG_KERNEL_MAX = 43, +}; + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS = 1, + ETH_SS_PRIV_FLAGS = 2, + ETH_SS_NTUPLE_FILTERS = 3, + ETH_SS_FEATURES = 4, + ETH_SS_RSS_HASH_FUNCS = 5, + ETH_SS_TUNABLES = 6, + ETH_SS_PHY_STATS = 7, + ETH_SS_PHY_TUNABLES = 8, + ETH_SS_LINK_MODES = 9, + ETH_SS_MSG_CLASSES = 10, + ETH_SS_WOL_MODES = 11, + ETH_SS_SOF_TIMESTAMPING = 12, + ETH_SS_TS_TX_TYPES = 13, + ETH_SS_TS_RX_FILTERS = 14, + ETH_SS_UDP_TUNNEL_TYPES = 15, + ETH_SS_STATS_STD = 16, + ETH_SS_STATS_ETH_PHY = 17, + ETH_SS_STATS_ETH_MAC = 18, + ETH_SS_STATS_ETH_CTRL = 19, + ETH_SS_STATS_RMON = 20, + ETH_SS_COUNT = 21, +}; + +enum ethtool_flags { + ETH_FLAG_TXVLAN = 128, + ETH_FLAG_RXVLAN = 256, + ETH_FLAG_LRO = 32768, + ETH_FLAG_NTUPLE = 134217728, + ETH_FLAG_RXHASH = 268435456, +}; + +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT = 0, + ETHTOOL_F_WISH__BIT = 1, + ETHTOOL_F_COMPAT__BIT = 2, +}; + +enum tunable_id { + ETHTOOL_ID_UNSPEC = 0, + ETHTOOL_RX_COPYBREAK = 1, + ETHTOOL_TX_COPYBREAK = 2, + ETHTOOL_PFC_PREVENTION_TOUT = 3, + ETHTOOL_TX_COPYBREAK_BUF_SIZE = 4, + __ETHTOOL_TUNABLE_COUNT = 5, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC = 0, + ETHTOOL_TUNABLE_U8 = 1, + ETHTOOL_TUNABLE_U16 = 2, + ETHTOOL_TUNABLE_U32 = 3, + ETHTOOL_TUNABLE_U64 = 4, + ETHTOOL_TUNABLE_STRING = 5, + ETHTOOL_TUNABLE_S8 = 6, + ETHTOOL_TUNABLE_S16 = 7, + ETHTOOL_TUNABLE_S32 = 8, + ETHTOOL_TUNABLE_S64 = 9, +}; + +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC = 0, + ETHTOOL_PHY_DOWNSHIFT = 1, + ETHTOOL_PHY_FAST_LINK_DOWN = 2, + ETHTOOL_PHY_EDPD = 3, + __ETHTOOL_PHY_TUNABLE_COUNT = 4, +}; + +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT = 0, + ETHTOOL_FEC_AUTO_BIT = 1, + ETHTOOL_FEC_OFF_BIT = 2, + ETHTOOL_FEC_RS_BIT = 3, + ETHTOOL_FEC_BASER_BIT = 4, + ETHTOOL_FEC_LLRS_BIT = 5, +}; + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; +}; + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +struct ethtool_devlink_compat { + struct devlink *devlink; + union { + struct ethtool_flash efl; + struct ethtool_drvinfo info; + }; +}; + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + unsigned long priv[0]; +}; + +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *); + int (*get_strings)(struct phy_device *, u8 *); + int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *, + struct netlink_ext_ack *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); + int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, + const struct phy_tdr_config *); +}; + +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[4]; + __u32 advertising[4]; + __u32 lp_advertising[4]; + } link_modes; +}; + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + compat_u64 ring_cookie; + u32 location; +} __attribute__((packed)); + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +} __attribute__((packed)); + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; + +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; + __u32 rss_config[0]; +}; + +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[128]; + char data[0]; +}; + +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +struct ethtool_forced_speed_map { + u32 speed; + unsigned long caps[2]; + const u32 *cap_arr; + u32 arr_size; +}; + +typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); + +struct ethnl_req_info; + +struct ethnl_reply_data; + +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + u8 set_ntf_cmd; + int (*parse_request)(struct ethnl_req_info *, struct nlattr **, + struct netlink_ext_ack *); + int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, + const struct genl_info *); + int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); + int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, + const struct ethnl_reply_data *); + void (*cleanup_data)(struct ethnl_reply_data *); + int (*set_validate)(struct ethnl_req_info *, struct genl_info *); + int (*set)(struct ethnl_req_info *, struct genl_info *); +}; + +struct ethnl_req_info { + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; +}; + +struct ethnl_reply_data { + struct net_device *dev; +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC = 0, + ETHTOOL_A_HEADER_DEV_INDEX = 1, + ETHTOOL_A_HEADER_DEV_NAME = 2, + ETHTOOL_A_HEADER_FLAGS = 3, + __ETHTOOL_A_HEADER_CNT = 4, + ETHTOOL_A_HEADER_MAX = 3, +}; + +enum ethtool_multicast_groups { + ETHNL_MCGRP_MONITOR = 0, +}; + +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + unsigned long pos_ifindex; +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC = 0, + ETHTOOL_A_BITSET_NOMASK = 1, + ETHTOOL_A_BITSET_SIZE = 2, + ETHTOOL_A_BITSET_BITS = 3, + ETHTOOL_A_BITSET_VALUE = 4, + ETHTOOL_A_BITSET_MASK = 5, + __ETHTOOL_A_BITSET_CNT = 6, + ETHTOOL_A_BITSET_MAX = 5, +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC = 0, + ETHTOOL_A_BITSET_BITS_BIT = 1, + __ETHTOOL_A_BITSET_BITS_CNT = 2, + ETHTOOL_A_BITSET_BITS_MAX = 1, +}; + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC = 0, + ETHTOOL_A_BITSET_BIT_INDEX = 1, + ETHTOOL_A_BITSET_BIT_NAME = 2, + ETHTOOL_A_BITSET_BIT_VALUE = 3, + __ETHTOOL_A_BITSET_BIT_CNT = 4, + ETHTOOL_A_BITSET_BIT_MAX = 3, +}; + +typedef const char (*const ethnl_string_array_t)[32]; + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[32]; +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC = 0, + ETHTOOL_A_STRSET_HEADER = 1, + ETHTOOL_A_STRSET_STRINGSETS = 2, + ETHTOOL_A_STRSET_COUNTS_ONLY = 3, + __ETHTOOL_A_STRSET_CNT = 4, + ETHTOOL_A_STRSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC = 0, + ETHTOOL_A_STRINGSETS_STRINGSET = 1, + __ETHTOOL_A_STRINGSETS_CNT = 2, + ETHTOOL_A_STRINGSETS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC = 0, + ETHTOOL_A_STRINGSET_ID = 1, + ETHTOOL_A_STRINGSET_COUNT = 2, + ETHTOOL_A_STRINGSET_STRINGS = 3, + __ETHTOOL_A_STRINGSET_CNT = 4, + ETHTOOL_A_STRINGSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC = 0, + ETHTOOL_A_STRINGS_STRING = 1, + __ETHTOOL_A_STRINGS_CNT = 2, + ETHTOOL_A_STRINGS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRING_UNSPEC = 0, + ETHTOOL_A_STRING_INDEX = 1, + ETHTOOL_A_STRING_VALUE = 2, + __ETHTOOL_A_STRING_CNT = 3, + ETHTOOL_A_STRING_MAX = 2, +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[21]; +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC = 0, + ETHTOOL_A_LINKINFO_HEADER = 1, + ETHTOOL_A_LINKINFO_PORT = 2, + ETHTOOL_A_LINKINFO_PHYADDR = 3, + ETHTOOL_A_LINKINFO_TP_MDIX = 4, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, + ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, + __ETHTOOL_A_LINKINFO_CNT = 7, + ETHTOOL_A_LINKINFO_MAX = 6, +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC = 0, + ETHTOOL_A_LINKMODES_HEADER = 1, + ETHTOOL_A_LINKMODES_AUTONEG = 2, + ETHTOOL_A_LINKMODES_OURS = 3, + ETHTOOL_A_LINKMODES_PEER = 4, + ETHTOOL_A_LINKMODES_SPEED = 5, + ETHTOOL_A_LINKMODES_DUPLEX = 6, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, + ETHTOOL_A_LINKMODES_LANES = 9, + ETHTOOL_A_LINKMODES_RATE_MATCHING = 10, + __ETHTOOL_A_LINKMODES_CNT = 11, + ETHTOOL_A_LINKMODES_MAX = 10, +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +enum { + ETHTOOL_A_RSS_UNSPEC = 0, + ETHTOOL_A_RSS_HEADER = 1, + ETHTOOL_A_RSS_CONTEXT = 2, + ETHTOOL_A_RSS_HFUNC = 3, + ETHTOOL_A_RSS_INDIR = 4, + ETHTOOL_A_RSS_HKEY = 5, + __ETHTOOL_A_RSS_CNT = 6, + ETHTOOL_A_RSS_MAX = 5, +}; + +struct rss_req_info { + struct ethnl_req_info base; + u32 rss_context; +}; + +struct rss_reply_data { + struct ethnl_reply_data base; + u32 indir_size; + u32 hkey_size; + u32 hfunc; + u32 *indir_table; + u8 *hkey; +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC = 0, + ETHTOOL_A_LINKSTATE_HEADER = 1, + ETHTOOL_A_LINKSTATE_LINK = 2, + ETHTOOL_A_LINKSTATE_SQI = 3, + ETHTOOL_A_LINKSTATE_SQI_MAX = 4, + ETHTOOL_A_LINKSTATE_EXT_STATE = 5, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 7, + __ETHTOOL_A_LINKSTATE_CNT = 8, + ETHTOOL_A_LINKSTATE_MAX = 7, +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + struct ethtool_link_ext_stats link_stats; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; +}; + +enum { + NETIF_MSG_DRV_BIT = 0, + NETIF_MSG_PROBE_BIT = 1, + NETIF_MSG_LINK_BIT = 2, + NETIF_MSG_TIMER_BIT = 3, + NETIF_MSG_IFDOWN_BIT = 4, + NETIF_MSG_IFUP_BIT = 5, + NETIF_MSG_RX_ERR_BIT = 6, + NETIF_MSG_TX_ERR_BIT = 7, + NETIF_MSG_TX_QUEUED_BIT = 8, + NETIF_MSG_INTR_BIT = 9, + NETIF_MSG_TX_DONE_BIT = 10, + NETIF_MSG_RX_STATUS_BIT = 11, + NETIF_MSG_PKTDATA_BIT = 12, + NETIF_MSG_HW_BIT = 13, + NETIF_MSG_WOL_BIT = 14, + NETIF_MSG_CLASS_COUNT = 15, +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC = 0, + ETHTOOL_A_DEBUG_HEADER = 1, + ETHTOOL_A_DEBUG_MSGMASK = 2, + __ETHTOOL_A_DEBUG_CNT = 3, + ETHTOOL_A_DEBUG_MAX = 2, +}; + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +enum { + ETHTOOL_A_WOL_UNSPEC = 0, + ETHTOOL_A_WOL_HEADER = 1, + ETHTOOL_A_WOL_MODES = 2, + ETHTOOL_A_WOL_SOPASS = 3, + __ETHTOOL_A_WOL_CNT = 4, + ETHTOOL_A_WOL_MAX = 3, +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC = 0, + ETHTOOL_A_FEATURES_HEADER = 1, + ETHTOOL_A_FEATURES_HW = 2, + ETHTOOL_A_FEATURES_WANTED = 3, + ETHTOOL_A_FEATURES_ACTIVE = 4, + ETHTOOL_A_FEATURES_NOCHANGE = 5, + __ETHTOOL_A_FEATURES_CNT = 6, + ETHTOOL_A_FEATURES_MAX = 5, +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[2]; + u32 wanted[2]; + u32 active[2]; + u32 nochange[2]; + u32 all[2]; +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, + ETHTOOL_A_PRIVFLAGS_HEADER = 1, + ETHTOOL_A_PRIVFLAGS_FLAGS = 2, + __ETHTOOL_A_PRIVFLAGS_CNT = 3, + ETHTOOL_A_PRIVFLAGS_MAX = 2, +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[32]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED = 1, + ETHTOOL_TCP_DATA_SPLIT_ENABLED = 2, +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC = 0, + ETHTOOL_A_RINGS_HEADER = 1, + ETHTOOL_A_RINGS_RX_MAX = 2, + ETHTOOL_A_RINGS_RX_MINI_MAX = 3, + ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, + ETHTOOL_A_RINGS_TX_MAX = 5, + ETHTOOL_A_RINGS_RX = 6, + ETHTOOL_A_RINGS_RX_MINI = 7, + ETHTOOL_A_RINGS_RX_JUMBO = 8, + ETHTOOL_A_RINGS_TX = 9, + ETHTOOL_A_RINGS_RX_BUF_LEN = 10, + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 11, + ETHTOOL_A_RINGS_CQE_SIZE = 12, + ETHTOOL_A_RINGS_TX_PUSH = 13, + ETHTOOL_A_RINGS_RX_PUSH = 14, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 15, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 16, + __ETHTOOL_A_RINGS_CNT = 17, + ETHTOOL_A_RINGS_MAX = 16, +}; + +enum ethtool_supported_ring_param { + ETHTOOL_RING_USE_RX_BUF_LEN = 1, + ETHTOOL_RING_USE_CQE_SIZE = 2, + ETHTOOL_RING_USE_TX_PUSH = 4, + ETHTOOL_RING_USE_RX_PUSH = 8, + ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = 16, +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; + u32 supported_ring_params; +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC = 0, + ETHTOOL_A_CHANNELS_HEADER = 1, + ETHTOOL_A_CHANNELS_RX_MAX = 2, + ETHTOOL_A_CHANNELS_TX_MAX = 3, + ETHTOOL_A_CHANNELS_OTHER_MAX = 4, + ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, + ETHTOOL_A_CHANNELS_RX_COUNT = 6, + ETHTOOL_A_CHANNELS_TX_COUNT = 7, + ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, + ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, + __ETHTOOL_A_CHANNELS_CNT = 10, + ETHTOOL_A_CHANNELS_MAX = 9, +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC = 0, + ETHTOOL_A_COALESCE_HEADER = 1, + ETHTOOL_A_COALESCE_RX_USECS = 2, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, + ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, + ETHTOOL_A_COALESCE_TX_USECS = 6, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, + ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, + ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, + ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, + ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, + ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, + ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 24, + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 25, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES = 26, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES = 27, + ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS = 28, + __ETHTOOL_A_COALESCE_CNT = 29, + ETHTOOL_A_COALESCE_MAX = 28, +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + struct kernel_ethtool_coalesce kernel_coalesce; + u32 supported_params; +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC = 0, + ETHTOOL_A_PAUSE_HEADER = 1, + ETHTOOL_A_PAUSE_AUTONEG = 2, + ETHTOOL_A_PAUSE_RX = 3, + ETHTOOL_A_PAUSE_TX = 4, + ETHTOOL_A_PAUSE_STATS = 5, + ETHTOOL_A_PAUSE_STATS_SRC = 6, + __ETHTOOL_A_PAUSE_CNT = 7, + ETHTOOL_A_PAUSE_MAX = 6, +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, + ETHTOOL_A_PAUSE_STAT_PAD = 1, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, + __ETHTOOL_A_PAUSE_STAT_CNT = 4, + ETHTOOL_A_PAUSE_STAT_MAX = 3, +}; + +struct pause_req_info { + struct ethnl_req_info base; + enum ethtool_mac_stats_src src; +}; + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + struct ethtool_pause_stats pausestat; +}; + +enum { + ETHTOOL_A_EEE_UNSPEC = 0, + ETHTOOL_A_EEE_HEADER = 1, + ETHTOOL_A_EEE_MODES_OURS = 2, + ETHTOOL_A_EEE_MODES_PEER = 3, + ETHTOOL_A_EEE_ACTIVE = 4, + ETHTOOL_A_EEE_ENABLED = 5, + ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, + ETHTOOL_A_EEE_TX_LPI_TIMER = 7, + __ETHTOOL_A_EEE_CNT = 8, + ETHTOOL_A_EEE_MAX = 7, +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_eee eee; +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC = 0, + ETHTOOL_A_TSINFO_HEADER = 1, + ETHTOOL_A_TSINFO_TIMESTAMPING = 2, + ETHTOOL_A_TSINFO_TX_TYPES = 3, + ETHTOOL_A_TSINFO_RX_FILTERS = 4, + ETHTOOL_A_TSINFO_PHC_INDEX = 5, + __ETHTOOL_A_TSINFO_CNT = 6, + ETHTOOL_A_TSINFO_MAX = 5, +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_ts_info ts_info; +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_HEADER = 1, + __ETHTOOL_A_CABLE_TEST_CNT = 2, + ETHTOOL_A_CABLE_TEST_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, + ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, + __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, + ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_NEST_RESULT = 1, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, + __ETHTOOL_A_CABLE_NEST_CNT = 3, + ETHTOOL_A_CABLE_NEST_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, + ETHTOOL_A_CABLE_RESULT_PAIR = 1, + ETHTOOL_A_CABLE_RESULT_CODE = 2, + __ETHTOOL_A_CABLE_RESULT_CNT = 3, + ETHTOOL_A_CABLE_RESULT_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 3, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, + __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, + ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, + ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, + __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, + ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, + ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, + __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, + ETHTOOL_A_CABLE_PULSE_mV = 1, + __ETHTOOL_A_CABLE_PULSE_CNT = 2, + ETHTOOL_A_CABLE_PULSE_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC = 0, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, + __ETHTOOL_A_CABLE_STEP_CNT = 4, + ETHTOOL_A_CABLE_STEP_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A = 0, + ETHTOOL_A_CABLE_PAIR_B = 1, + ETHTOOL_A_CABLE_PAIR_C = 2, + ETHTOOL_A_CABLE_PAIR_D = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, + ETHTOOL_A_TUNNEL_INFO_HEADER = 1, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, + __ETHTOOL_A_TUNNEL_INFO_CNT = 3, + ETHTOOL_A_TUNNEL_INFO_MAX = 2, +}; + +enum udp_tunnel_nic_info_flags { + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, +}; + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE = 1, + __ETHTOOL_A_TUNNEL_UDP_CNT = 2, + ETHTOOL_A_TUNNEL_UDP_MAX = 1, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, +}; + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + unsigned long ifindex; +}; + +enum { + ETHTOOL_A_FEC_UNSPEC = 0, + ETHTOOL_A_FEC_HEADER = 1, + ETHTOOL_A_FEC_MODES = 2, + ETHTOOL_A_FEC_AUTO = 3, + ETHTOOL_A_FEC_ACTIVE = 4, + ETHTOOL_A_FEC_STATS = 5, + __ETHTOOL_A_FEC_CNT = 6, + ETHTOOL_A_FEC_MAX = 5, +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC = 0, + ETHTOOL_A_FEC_STAT_PAD = 1, + ETHTOOL_A_FEC_STAT_CORRECTED = 2, + ETHTOOL_A_FEC_STAT_UNCORR = 3, + ETHTOOL_A_FEC_STAT_CORR_BITS = 4, + __ETHTOOL_A_FEC_STAT_CNT = 5, + ETHTOOL_A_FEC_STAT_MAX = 4, +}; + +struct fec_stat_grp { + u64 stats[9]; + u8 cnt; +}; + +struct fec_reply_data { + struct ethnl_reply_data base; + unsigned long fec_link_modes[2]; + u32 active_fec; + u8 fec_auto; + struct fec_stat_grp corr; + struct fec_stat_grp uncorr; + struct fec_stat_grp corr_bits; +}; + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, + ETHTOOL_A_MODULE_EEPROM_HEADER = 1, + ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, + ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, + ETHTOOL_A_MODULE_EEPROM_PAGE = 4, + ETHTOOL_A_MODULE_EEPROM_BANK = 5, + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, + ETHTOOL_A_MODULE_EEPROM_DATA = 7, + __ETHTOOL_A_MODULE_EEPROM_CNT = 8, + ETHTOOL_A_MODULE_EEPROM_MAX = 7, +}; + +struct eeprom_req_info { + struct ethnl_req_info base; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; +}; + +struct eeprom_reply_data { + struct ethnl_reply_data base; + u32 length; + u8 *data; +}; + +enum { + ETHTOOL_STATS_ETH_PHY = 0, + ETHTOOL_STATS_ETH_MAC = 1, + ETHTOOL_STATS_ETH_CTRL = 2, + ETHTOOL_STATS_RMON = 3, + __ETHTOOL_STATS_CNT = 4, +}; + +enum { + ETHTOOL_A_STATS_UNSPEC = 0, + ETHTOOL_A_STATS_PAD = 1, + ETHTOOL_A_STATS_HEADER = 2, + ETHTOOL_A_STATS_GROUPS = 3, + ETHTOOL_A_STATS_GRP = 4, + ETHTOOL_A_STATS_SRC = 5, + __ETHTOOL_A_STATS_CNT = 6, + ETHTOOL_A_STATS_MAX = 5, +}; + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC = 0, + ETHTOOL_A_STATS_GRP_PAD = 1, + ETHTOOL_A_STATS_GRP_ID = 2, + ETHTOOL_A_STATS_GRP_SS_ID = 3, + ETHTOOL_A_STATS_GRP_STAT = 4, + ETHTOOL_A_STATS_GRP_HIST_RX = 5, + ETHTOOL_A_STATS_GRP_HIST_TX = 6, + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, + ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, + ETHTOOL_A_STATS_GRP_HIST_VAL = 9, + __ETHTOOL_A_STATS_GRP_CNT = 10, + ETHTOOL_A_STATS_GRP_MAX = 9, +}; + +enum { + ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, + __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, + ETHTOOL_A_STATS_ETH_PHY_MAX = 0, +}; + +enum { + ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, + ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, + ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, + ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, + ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, + ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, + ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, + ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, + ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, + ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, + ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, + ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, + ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, + ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, + ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, + ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, + ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, + ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, + ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, + ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, + ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, + ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, + __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, + ETHTOOL_A_STATS_ETH_MAC_MAX = 21, +}; + +enum { + ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, + ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, + ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, + __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, + ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, +}; + +enum { + ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, + ETHTOOL_A_STATS_RMON_OVERSIZE = 1, + ETHTOOL_A_STATS_RMON_FRAG = 2, + ETHTOOL_A_STATS_RMON_JABBER = 3, + __ETHTOOL_A_STATS_RMON_CNT = 4, + ETHTOOL_A_STATS_RMON_MAX = 3, +}; + +struct stats_req_info { + struct ethnl_req_info base; + unsigned long stat_mask[1]; + enum ethtool_mac_stats_src src; +}; + +struct stats_reply_data { + struct ethnl_reply_data base; + union { + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + }; + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + } stats; + }; + const struct ethtool_rmon_hist_range *rmon_ranges; +}; + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC = 0, + ETHTOOL_A_PHC_VCLOCKS_HEADER = 1, + ETHTOOL_A_PHC_VCLOCKS_NUM = 2, + ETHTOOL_A_PHC_VCLOCKS_INDEX = 3, + __ETHTOOL_A_PHC_VCLOCKS_CNT = 4, + ETHTOOL_A_PHC_VCLOCKS_MAX = 3, +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +enum { + ETHTOOL_A_MM_STAT_UNSPEC = 0, + ETHTOOL_A_MM_STAT_PAD = 1, + ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS = 2, + ETHTOOL_A_MM_STAT_SMD_ERRORS = 3, + ETHTOOL_A_MM_STAT_REASSEMBLY_OK = 4, + ETHTOOL_A_MM_STAT_RX_FRAG_COUNT = 5, + ETHTOOL_A_MM_STAT_TX_FRAG_COUNT = 6, + ETHTOOL_A_MM_STAT_HOLD_COUNT = 7, + __ETHTOOL_A_MM_STAT_CNT = 8, + ETHTOOL_A_MM_STAT_MAX = 7, +}; + +enum { + ETHTOOL_A_MM_UNSPEC = 0, + ETHTOOL_A_MM_HEADER = 1, + ETHTOOL_A_MM_PMAC_ENABLED = 2, + ETHTOOL_A_MM_TX_ENABLED = 3, + ETHTOOL_A_MM_TX_ACTIVE = 4, + ETHTOOL_A_MM_TX_MIN_FRAG_SIZE = 5, + ETHTOOL_A_MM_RX_MIN_FRAG_SIZE = 6, + ETHTOOL_A_MM_VERIFY_ENABLED = 7, + ETHTOOL_A_MM_VERIFY_STATUS = 8, + ETHTOOL_A_MM_VERIFY_TIME = 9, + ETHTOOL_A_MM_MAX_VERIFY_TIME = 10, + ETHTOOL_A_MM_STATS = 11, + __ETHTOOL_A_MM_CNT = 12, + ETHTOOL_A_MM_MAX = 11, +}; + +struct mm_reply_data { + struct ethnl_reply_data base; + struct ethtool_mm_state state; + struct ethtool_mm_stats stats; +}; + +enum { + ETHTOOL_A_MODULE_UNSPEC = 0, + ETHTOOL_A_MODULE_HEADER = 1, + ETHTOOL_A_MODULE_POWER_MODE_POLICY = 2, + ETHTOOL_A_MODULE_POWER_MODE = 3, + __ETHTOOL_A_MODULE_CNT = 4, + ETHTOOL_A_MODULE_MAX = 3, +}; + +struct module_reply_data { + struct ethnl_reply_data base; + struct ethtool_module_power_mode_params power; +}; + +enum ethtool_podl_pse_admin_state { + ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_podl_pse_pw_d_status { + ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP = 5, + ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE = 6, + ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR = 7, +}; + +enum { + ETHTOOL_A_PSE_UNSPEC = 0, + ETHTOOL_A_PSE_HEADER = 1, + ETHTOOL_A_PODL_PSE_ADMIN_STATE = 2, + ETHTOOL_A_PODL_PSE_ADMIN_CONTROL = 3, + ETHTOOL_A_PODL_PSE_PW_D_STATUS = 4, + __ETHTOOL_A_PSE_CNT = 5, + ETHTOOL_A_PSE_MAX = 4, +}; + +struct pse_control_status { + enum ethtool_podl_pse_admin_state podl_admin_state; + enum ethtool_podl_pse_pw_d_status podl_pw_status; +}; + +struct pse_reply_data { + struct ethnl_reply_data base; + struct pse_control_status status; +}; + +struct pse_control_config { + enum ethtool_podl_pse_admin_state admin_cotrol; +}; + +enum { + ETHTOOL_A_PLCA_UNSPEC = 0, + ETHTOOL_A_PLCA_HEADER = 1, + ETHTOOL_A_PLCA_VERSION = 2, + ETHTOOL_A_PLCA_ENABLED = 3, + ETHTOOL_A_PLCA_STATUS = 4, + ETHTOOL_A_PLCA_NODE_CNT = 5, + ETHTOOL_A_PLCA_NODE_ID = 6, + ETHTOOL_A_PLCA_TO_TMR = 7, + ETHTOOL_A_PLCA_BURST_CNT = 8, + ETHTOOL_A_PLCA_BURST_TMR = 9, + __ETHTOOL_A_PLCA_CNT = 10, + ETHTOOL_A_PLCA_MAX = 9, +}; + +struct plca_reply_data { + struct ethnl_reply_data base; + struct phy_plca_cfg plca_cfg; + struct phy_plca_status plca_st; +}; + +struct nf_queue_entry; + +struct nf_ipv6_ops { + void (*route_input)(struct sk_buff *); + int (*fragment)(struct net *, struct sock *, struct sk_buff *, + int (*)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); +}; + +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + unsigned int hook_index; + struct net_device *physin; + struct net_device *physout; + struct nf_hook_state state; + u16 size; +}; + +struct nfnl_ct_hook { + size_t (*build_size)(const struct nf_conn *); + int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, + u_int16_t, u_int16_t); + int (*parse)(const struct nlattr *, struct nf_conn *); + int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); + void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); +}; + +struct nf_ct_hook { + int (*update)(struct net *, struct sk_buff *); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); + void (*attach)(struct sk_buff *, const struct sk_buff *); + void (*set_closing)(struct nf_conntrack *); +}; + +struct nf_defrag_hook { + struct module *owner; + int (*enable)(struct net *); + void (*disable)(struct net *); +}; + +enum nf_nat_manip_type; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); + void (*decode_session)(struct sk_buff *, struct flowi *); + unsigned int (*manip_pkt)(struct sk_buff *, struct nf_conn *, + enum nf_nat_manip_type, enum ip_conntrack_dir); + void (*remove_nat_bysrc)(struct nf_conn *); +}; + +enum nf_hook_ops_type { + NF_HOOK_OP_UNDEFINED = 0, + NF_HOOK_OP_NF_TABLES = 1, + NF_HOOK_OP_BPF = 2, +}; + +struct nf_hook_ops { + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u8 pf; + enum nf_hook_ops_type hook_ops_type : 8; + unsigned int hooknum; + int priority; +}; + +struct nf_hook_entries_rcu_head { + struct callback_head head; + void *allocation; +}; + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + u_int16_t flags; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +struct nf_log_buf { + unsigned int count; + char buf[1020]; +}; + +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *, unsigned int); + void (*nf_hook_drop)(struct net *); +}; + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED = 0, + BRNF_PROTO_8021Q = 1, + BRNF_PROTO_PPPOE = 2, + } orig_proto : 8; + u8 pkt_otherhost : 1; + u8 in_prerouting : 1; + u8 bridged_dnat : 1; + u8 sabotage_in_done : 1; + __u16 frag_max_size; + int physinif; + struct net_device *physoutdev; + union { + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + char neigh_header[8]; + }; +}; + +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_sockopt_ops { + struct list_head list; + u_int8_t pf; + int set_optmin; + int set_optmax; + int (*set)(struct sock *, int, sockptr_t, unsigned int); + int get_optmin; + int get_optmax; + int (*get)(struct sock *, int, void __attribute__((btf_type_tag("user"))) *, int *); + struct module *owner; +}; + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = -2147483648, + NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP_PRI_CONNTRACK_DEFRAG = -400, + NF_IP_PRI_RAW = -300, + NF_IP_PRI_SELINUX_FIRST = -225, + NF_IP_PRI_CONNTRACK = -200, + NF_IP_PRI_MANGLE = -150, + NF_IP_PRI_NAT_DST = -100, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_HELPER = 300, + NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, + NF_IP_PRI_LAST = 2147483647, +}; + +struct bpf_nf_link { + struct bpf_link link; + struct nf_hook_ops hook_ops; + struct net *net; + u32 dead; + const struct nf_defrag_hook *defrag_hook; +}; + +struct uncached_list { + spinlock_t lock; + struct list_head head; + struct list_head quarantine; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + unsigned long sf_count[2]; + __be32 sf_inaddr; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +struct rt_cache_stat { + unsigned int in_slow_tot; + unsigned int in_slow_mc; + unsigned int in_no_route; + unsigned int in_brd; + unsigned int in_martian_dst; + unsigned int in_martian_src; + unsigned int out_slow_tot; + unsigned int out_slow_mc; +}; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + __be32 sl_addr[0]; +}; + +typedef u8 dscp_t; + +struct fib_alias { + struct hlist_node fa_list; + struct fib_info *fa_info; + dscp_t fa_dscp; + u8 fa_type; + u8 fa_state; + u8 fa_slen; + u32 tb_id; + s16 fa_default; + u8 offload; + u8 trap; + u8 offload_failed; + struct callback_head rcu; +}; + +struct ipv4_addr_key { + __be32 addr; + int vif; +}; + +struct inetpeer_addr { + union { + struct ipv4_addr_key a4; + struct in6_addr a6; + u32 key[4]; + }; + __u16 family; +}; + +struct inet_peer { + struct rb_node rb_node; + struct inetpeer_addr daddr; + u32 metrics[17]; + u32 rate_tokens; + u32 n_redirects; + unsigned long rate_last; + union { + struct { + atomic_t rid; + }; + struct callback_head rcu; + }; + __u32 dtime; + refcount_t refcnt; +}; + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + unsigned int rtm_flags; +}; + +struct fib_rt_info { + struct fib_info *fi; + u32 tb_id; + __be32 dst; + int dst_len; + dscp_t dscp; + u8 type; + u8 offload : 1; + u8 trap : 1; + u8 offload_failed : 1; + u8 unused : 5; +}; + +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; +}; + +struct raw_hashinfo { + spinlock_t lock; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + struct hlist_head ht[256]; +}; + +enum ip_defrag_users { + IP_DEFRAG_LOCAL_DELIVER = 0, + IP_DEFRAG_CALL_RA_CHAIN = 1, + IP_DEFRAG_CONNTRACK_IN = 2, + __IP_DEFRAG_CONNTRACK_IN_END = 65537, + IP_DEFRAG_CONNTRACK_OUT = 65538, + __IP_DEFRAG_CONNTRACK_OUT_END = 131073, + IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, + IP_DEFRAG_VS_IN = 196610, + IP_DEFRAG_VS_OUT = 196611, + IP_DEFRAG_VS_FWD = 196612, + IP_DEFRAG_AF_PACKET = 196613, + IP_DEFRAG_MACVLAN = 196614, +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3, +}; + +enum { + INET_FRAG_FIRST_IN = 1, + INET_FRAG_LAST_IN = 2, + INET_FRAG_COMPLETE = 4, + INET_FRAG_HASH_DEAD = 8, + INET_FRAG_DROP = 16, +}; + +struct ipq { + struct inet_frag_queue q; + u8 ecn; + u16 max_df_size; + int iif; + unsigned int rid; + struct inet_peer *peer; +}; + +enum xfrm_replay_mode { + XFRM_REPLAY_MODE_LEGACY = 0, + XFRM_REPLAY_MODE_BMP = 1, + XFRM_REPLAY_MODE_ESN = 2, +}; + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE = 0, + PKT_HASH_TYPE_L2 = 1, + PKT_HASH_TYPE_L3 = 2, + PKT_HASH_TYPE_L4 = 3, +}; + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +struct ipcm_cookie { + struct sockcm_cookie sockc; + __be32 addr; + int oif; + struct ip_options_rcu *opt; + __u8 protocol; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; +}; + +struct ip_reply_arg { + struct kvec iov[1]; + int flags; + __wsum csum; + int csumoffset; + int bound_dev_if; + u8 tos; + kuid_t uid; +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; + +enum { + BPFILTER_IPT_SO_SET_REPLACE = 64, + BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65, + BPFILTER_IPT_SET_MAX = 66, +}; + +enum { + BPFILTER_IPT_SO_GET_INFO = 64, + BPFILTER_IPT_SO_GET_ENTRIES = 65, + BPFILTER_IPT_SO_GET_REVISION_MATCH = 66, + BPFILTER_IPT_SO_GET_REVISION_TARGET = 67, + BPFILTER_IPT_GET_MAX = 68, +}; + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + union { + __be32 imsf_slist[1]; + struct { + struct { + } __empty_imsf_slist_flex; + __be32 imsf_slist_flex[0]; + }; + }; +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +} __attribute__((packed)); + +struct group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +} __attribute__((packed)); + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct compat_group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + } __attribute__((packed)); + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + } __attribute__((packed)); + }; +}; + +typedef u32 +inet_ehashfn_t(const struct net *, const __be32, const __u16, const __be32, const __be16); + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; + +struct tcp_bbr_info { + __u32 bbr_bw_lo; + __u32 bbr_bw_hi; + __u32 bbr_min_rtt; + __u32 bbr_pacing_gain; + __u32 bbr_cwnd_gain; + __u32 bbr_bw_hi_lsb; + __u32 bbr_bw_hi_msb; + __u32 bbr_bw_lo_lsb; + __u32 bbr_bw_lo_msb; + __u8 bbr_mode; + __u8 bbr_phase; + __u8 unused1; + __u8 bbr_version; + __u32 bbr_inflight_lo; + __u32 bbr_inflight_hi; + __u32 bbr_extra_acked; +}; + +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; + struct tcp_bbr_info bbr; +}; + +enum tsq_enum { + TSQ_THROTTLED = 0, + TSQ_QUEUED = 1, + TCP_TSQ_DEFERRED = 2, + TCP_WRITE_TIMER_DEFERRED = 3, + TCP_DELACK_TIMER_DEFERRED = 4, + TCP_MTU_REDUCED_DEFERRED = 5, + TCP_ACK_DEFERRED = 6, +}; + +enum tcp_chrono { + TCP_CHRONO_UNSPEC = 0, + TCP_CHRONO_BUSY = 1, + TCP_CHRONO_RWND_LIMITED = 2, + TCP_CHRONO_SNDBUF_LIMITED = 3, + __TCP_CHRONO_MAX = 4, +}; + +enum { + TCP_NO_QUEUE = 0, + TCP_RECV_QUEUE = 1, + TCP_SEND_QUEUE = 2, + TCP_QUEUES_NR = 3, +}; + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16, + ICSK_ACK_NOMEM = 32, +}; + +enum { + TCP_CMSG_INQ = 1, + TCP_CMSG_TS = 2, +}; + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT = 2, + BPF_TCP_SYN_RECV = 3, + BPF_TCP_FIN_WAIT1 = 4, + BPF_TCP_FIN_WAIT2 = 5, + BPF_TCP_TIME_WAIT = 6, + BPF_TCP_CLOSE = 7, + BPF_TCP_CLOSE_WAIT = 8, + BPF_TCP_LAST_ACK = 9, + BPF_TCP_LISTEN = 10, + BPF_TCP_CLOSING = 11, + BPF_TCP_NEW_SYN_RECV = 12, + BPF_TCP_MAX_STATES = 13, +}; + +enum { + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM = 1, + TCP_MIB_RTOMIN = 2, + TCP_MIB_RTOMAX = 3, + TCP_MIB_MAXCONN = 4, + TCP_MIB_ACTIVEOPENS = 5, + TCP_MIB_PASSIVEOPENS = 6, + TCP_MIB_ATTEMPTFAILS = 7, + TCP_MIB_ESTABRESETS = 8, + TCP_MIB_CURRESTAB = 9, + TCP_MIB_INSEGS = 10, + TCP_MIB_OUTSEGS = 11, + TCP_MIB_RETRANSSEGS = 12, + TCP_MIB_INERRS = 13, + TCP_MIB_OUTRSTS = 14, + TCP_MIB_CSUMERRORS = 15, + __TCP_MIB_MAX = 16, +}; + +enum { + TCP_NLA_PAD = 0, + TCP_NLA_BUSY = 1, + TCP_NLA_RWND_LIMITED = 2, + TCP_NLA_SNDBUF_LIMITED = 3, + TCP_NLA_DATA_SEGS_OUT = 4, + TCP_NLA_TOTAL_RETRANS = 5, + TCP_NLA_PACING_RATE = 6, + TCP_NLA_DELIVERY_RATE = 7, + TCP_NLA_SND_CWND = 8, + TCP_NLA_REORDERING = 9, + TCP_NLA_MIN_RTT = 10, + TCP_NLA_RECUR_RETRANS = 11, + TCP_NLA_DELIVERY_RATE_APP_LMT = 12, + TCP_NLA_SNDQ_SIZE = 13, + TCP_NLA_CA_STATE = 14, + TCP_NLA_SND_SSTHRESH = 15, + TCP_NLA_DELIVERED = 16, + TCP_NLA_DELIVERED_CE = 17, + TCP_NLA_BYTES_SENT = 18, + TCP_NLA_BYTES_RETRANS = 19, + TCP_NLA_DSACK_DUPS = 20, + TCP_NLA_REORD_SEEN = 21, + TCP_NLA_SRTT = 22, + TCP_NLA_TIMEOUT_REHASH = 23, + TCP_NLA_BYTES_NOTSENT = 24, + TCP_NLA_EDT = 25, + TCP_NLA_TTL = 26, + TCP_NLA_REHASH = 27, +}; + +struct tcp_skb_cb { + __u32 seq; + __u32 end_seq; + union { + __u32 tcp_tw_isn; + struct { + u16 tcp_gso_segs; + u16 tcp_gso_size; + }; + }; + __u8 tcp_flags; + __u8 sacked; + __u8 ip_dsfield; + __u8 txstamp_ack : 1; + __u8 eor : 1; + __u8 has_rxtstamp : 1; + __u8 unused : 5; + __u32 ack_seq; + union { + struct { + __u32 is_app_limited : 1; + __u32 delivered_ce : 20; + __u32 unused : 11; + __u32 delivered; + u32 first_tx_mstamp; + u32 delivered_mstamp; + u32 in_flight : 20; + u32 unused2 : 12; + u32 lost; + } tx; + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + }; +}; + +struct tcp_splice_state { + struct pipe_inode_info *pipe; + size_t len; + unsigned int flags; +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale : 4; + __u8 tcpi_rcv_wscale : 4; + __u8 tcpi_delivery_rate_app_limited : 1; + __u8 tcpi_fastopen_client_fail : 2; + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + __u32 tcpi_total_retrans; + __u64 tcpi_pacing_rate; + __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; + __u64 tcpi_bytes_received; + __u32 tcpi_segs_out; + __u32 tcpi_segs_in; + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; + __u32 tcpi_data_segs_out; + __u64 tcpi_delivery_rate; + __u64 tcpi_busy_time; + __u64 tcpi_rwnd_limited; + __u64 tcpi_sndbuf_limited; + __u32 tcpi_delivered; + __u32 tcpi_delivered_ce; + __u64 tcpi_bytes_sent; + __u64 tcpi_bytes_retrans; + __u32 tcpi_dsack_dups; + __u32 tcpi_reord_seen; + __u32 tcpi_rcv_ooopack; + __u32 tcpi_snd_wnd; + __u32 tcpi_rcv_wnd; + __u32 tcpi_rehash; + __u16 tcpi_total_rto; + __u16 tcpi_total_rto_recoveries; + __u32 tcpi_total_rto_time; +}; + +struct tcp_zerocopy_receive { + __u64 address; + __u32 length; + __u32 recv_skip_hint; + __u32 inq; + __s32 err; + __u64 copybuf_address; + __s32 copybuf_len; + __u32 flags; + __u64 msg_control; + __u64 msg_controllen; + __u32 msg_flags; + __u32 reserved; +}; + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +struct tcp_repair_window { + __u32 snd_wl1; + __u32 snd_wnd; + __u32 max_window; + __u32 rcv_wnd; + __u32 rcv_wup; +}; + +struct tcp_sigpool { + void *scratch; + struct ahash_request *req; +}; + +enum { + TCP_FLAG_CWR = 32768, + TCP_FLAG_ECE = 16384, + TCP_FLAG_URG = 8192, + TCP_FLAG_ACK = 4096, + TCP_FLAG_PSH = 2048, + TCP_FLAG_RST = 1024, + TCP_FLAG_SYN = 512, + TCP_FLAG_FIN = 256, + TCP_RESERVED_BITS = 15, + TCP_DATA_OFFSET = 240, +}; + +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = 1, + CA_ACK_WIN_UPDATE = 2, + CA_ACK_ECE = 4, +}; + +enum tcp_queue { + TCP_FRAG_IN_WRITE_QUEUE = 0, + TCP_FRAG_IN_RTX_QUEUE = 1, +}; + +enum tcp_fastopen_client_fail { + TFO_STATUS_UNSPEC = 0, + TFO_COOKIE_UNAVAILABLE = 1, + TFO_DATA_NOT_ACKED = 2, + TFO_SYN_RETRANSMITTED = 3, +}; + +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +struct tcp_sacktag_state { + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; +}; + +struct mptcp_ext { + union { + u64 data_ack; + u32 data_ack32; + }; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + __sum16 csum; + u8 use_map : 1; + u8 dsn64 : 1; + u8 data_fin : 1; + u8 use_ack : 1; + u8 ack64 : 1; + u8 mpc_map : 1; + u8 frozen : 1; + u8 reset_transient : 1; + u8 reset_reason : 4; + u8 csum_reqd : 1; + u8 infinite_map : 1; +}; + +struct tsq_tasklet { + struct tasklet_struct tasklet; + struct list_head head; +}; + +enum tsq_flags { + TSQF_THROTTLED = 1, + TSQF_QUEUED = 2, + TCPF_TSQ_DEFERRED = 4, + TCPF_WRITE_TIMER_DEFERRED = 8, + TCPF_DELACK_TIMER_DEFERRED = 16, + TCPF_MTU_REDUCED_DEFERRED = 32, + TCPF_ACK_DEFERRED = 64, +}; + +struct tcp_ao_key; + +struct tcp_key { + union { + struct { + struct tcp_ao_key *ao_key; + char *traffic_key; + u32 sne; + u8 rcv_next; + }; + struct tcp_md5sig_key *md5_key; + }; + enum { + TCP_KEY_NONE = 0, + TCP_KEY_MD5 = 1, + TCP_KEY_AO = 2, + } type; +}; + +struct tcp_ao_key { + struct hlist_node node; + union tcp_ao_addr addr; + u8 key[80]; + unsigned int tcp_sigpool_id; + unsigned int digest_size; + int l3index; + u8 prefixlen; + u8 family; + u8 keylen; + u8 keyflags; + u8 sndid; + u8 rcvid; + u8 maclen; + struct callback_head rcu; + atomic64_t pkt_good; + atomic64_t pkt_bad; + u8 traffic_keys[0]; +}; + +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, +}; + +struct mptcp_addr_info { + u8 id; + sa_family_t family; + __be16 port; + union { + struct in_addr addr; + struct in6_addr addr6; + }; +}; + +struct mptcp_rm_list { + u8 ids[8]; + u8 nr; +}; + +struct mptcp_out_options { + u16 suboptions; + struct mptcp_rm_list rm_list; + u8 join_id; + u8 backup; + u8 reset_reason : 4; + u8 reset_transient : 1; + u8 csum_reqd : 1; + u8 allow_join_id0 : 1; + union { + struct { + u64 sndr_key; + u64 rcvr_key; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + __sum16 csum; + }; + struct { + struct mptcp_addr_info addr; + u64 ahmac; + }; + struct { + struct mptcp_ext ext_copy; + u64 fail_seq; + }; + struct { + u32 nonce; + u32 token; + u64 thmac; + u8 hmac[20]; + }; + }; +}; + +struct tcp_out_options { + u16 options; + u16 mss; + u8 ws; + u8 num_sack_blocks; + u8 hash_size; + u8 bpf_opt_len; + __u8 *hash_location; + __u32 tsval; + __u32 tsecr; + struct tcp_fastopen_cookie *fastopen_cookie; + struct mptcp_out_options mptcp; +}; + +struct static_key_false_deferred { + struct static_key_false key; + unsigned long timeout; + struct delayed_work work; +}; + +struct tcp_seq_afinfo { + sa_family_t family; +}; + +enum { + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS = 1, + ICMP_MIB_INERRORS = 2, + ICMP_MIB_INDESTUNREACHS = 3, + ICMP_MIB_INTIMEEXCDS = 4, + ICMP_MIB_INPARMPROBS = 5, + ICMP_MIB_INSRCQUENCHS = 6, + ICMP_MIB_INREDIRECTS = 7, + ICMP_MIB_INECHOS = 8, + ICMP_MIB_INECHOREPS = 9, + ICMP_MIB_INTIMESTAMPS = 10, + ICMP_MIB_INTIMESTAMPREPS = 11, + ICMP_MIB_INADDRMASKS = 12, + ICMP_MIB_INADDRMASKREPS = 13, + ICMP_MIB_OUTMSGS = 14, + ICMP_MIB_OUTERRORS = 15, + ICMP_MIB_OUTDESTUNREACHS = 16, + ICMP_MIB_OUTTIMEEXCDS = 17, + ICMP_MIB_OUTPARMPROBS = 18, + ICMP_MIB_OUTSRCQUENCHS = 19, + ICMP_MIB_OUTREDIRECTS = 20, + ICMP_MIB_OUTECHOS = 21, + ICMP_MIB_OUTECHOREPS = 22, + ICMP_MIB_OUTTIMESTAMPS = 23, + ICMP_MIB_OUTTIMESTAMPREPS = 24, + ICMP_MIB_OUTADDRMASKS = 25, + ICMP_MIB_OUTADDRMASKREPS = 26, + ICMP_MIB_CSUMERRORS = 27, + ICMP_MIB_RATELIMITGLOBAL = 28, + ICMP_MIB_RATELIMITHOST = 29, + __ICMP_MIB_MAX = 30, +}; + +enum tcp_tw_status { + TCP_TW_SUCCESS = 0, + TCP_TW_RST = 1, + TCP_TW_ACK = 2, + TCP_TW_SYN = 3, +}; + +enum tcp_seq_states { + TCP_SEQ_STATE_LISTENING = 0, + TCP_SEQ_STATE_ESTABLISHED = 1, +}; + +struct tcp_ao_hdr { + u8 kind; + u8 length; + u8 keyid; + u8 rnext_keyid; +}; + +struct tcp4_pseudohdr { + __be32 saddr; + __be32 daddr; + __u8 pad; + __u8 protocol; + __be16 len; +}; + +struct tcp_iter_state { + struct seq_net_private p; + enum tcp_seq_states state; + struct sock *syn_wait_sk; + int bucket; + int offset; + int sbucket; + int num; + loff_t last_pos; +}; + +struct bpf_iter__tcp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct sock_common *sk_common; + }; + uid_t uid; +}; + +struct bpf_tcp_iter_state { + struct tcp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct tcp_md5sig { + struct __kernel_sockaddr_storage tcpm_addr; + __u8 tcpm_flags; + __u8 tcpm_prefixlen; + __u16 tcpm_keylen; + int tcpm_ifindex; + __u8 tcpm_key[80]; +}; + +struct tcp_metrics_block; + +struct tcpm_hash_bucket { + struct tcp_metrics_block __attribute__((btf_type_tag("rcu"))) * chain; +}; + +struct tcp_fastopen_metrics { + u16 mss; + u16 syn_loss : 10; + u16 try_exp : 2; + unsigned long last_syn_loss; + struct tcp_fastopen_cookie cookie; +}; + +struct tcp_metrics_block { + struct tcp_metrics_block __attribute__((btf_type_tag("rcu"))) * tcpm_next; + struct net *tcpm_net; + struct inetpeer_addr tcpm_saddr; + struct inetpeer_addr tcpm_daddr; + unsigned long tcpm_stamp; + u32 tcpm_lock; + u32 tcpm_vals[5]; + struct tcp_fastopen_metrics tcpm_fastopen; + struct callback_head callback_head; +}; + +enum tcp_metric_index { + TCP_METRIC_RTT = 0, + TCP_METRIC_RTTVAR = 1, + TCP_METRIC_SSTHRESH = 2, + TCP_METRIC_CWND = 3, + TCP_METRIC_REORDERING = 4, + TCP_METRIC_RTT_US = 5, + TCP_METRIC_RTTVAR_US = 6, + __TCP_METRIC_MAX = 7, +}; + +enum { + TCP_METRICS_ATTR_UNSPEC = 0, + TCP_METRICS_ATTR_ADDR_IPV4 = 1, + TCP_METRICS_ATTR_ADDR_IPV6 = 2, + TCP_METRICS_ATTR_AGE = 3, + TCP_METRICS_ATTR_TW_TSVAL = 4, + TCP_METRICS_ATTR_TW_TS_STAMP = 5, + TCP_METRICS_ATTR_VALS = 6, + TCP_METRICS_ATTR_FOPEN_MSS = 7, + TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, + TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, + TCP_METRICS_ATTR_FOPEN_COOKIE = 10, + TCP_METRICS_ATTR_SADDR_IPV4 = 11, + TCP_METRICS_ATTR_SADDR_IPV6 = 12, + TCP_METRICS_ATTR_PAD = 13, + __TCP_METRICS_ATTR_MAX = 14, +}; + +enum { + TCP_METRICS_CMD_UNSPEC = 0, + TCP_METRICS_CMD_GET = 1, + TCP_METRICS_CMD_DEL = 2, + __TCP_METRICS_CMD_MAX = 3, +}; + +struct tcp_plb_state { + u8 consec_cong_rounds : 5; + u8 unused : 3; + u32 pause_until; +} __attribute__((packed)); + +struct icmp_filter { + __u32 data; +}; + +struct raw_sock { + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; + +struct raw_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; +}; + +enum { + UDP_FLAGS_CORK = 0, + UDP_FLAGS_NO_CHECK6_TX = 1, + UDP_FLAGS_NO_CHECK6_RX = 2, + UDP_FLAGS_GRO_ENABLED = 3, + UDP_FLAGS_ACCEPT_FRAGLIST = 4, + UDP_FLAGS_ACCEPT_L4 = 5, + UDP_FLAGS_ENCAP_ENABLED = 6, + UDP_FLAGS_UDPLITE_SEND_CC = 7, + UDP_FLAGS_UDPLITE_RECV_CC = 8, +}; + +enum { + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS = 1, + UDP_MIB_NOPORTS = 2, + UDP_MIB_INERRORS = 3, + UDP_MIB_OUTDATAGRAMS = 4, + UDP_MIB_RCVBUFERRORS = 5, + UDP_MIB_SNDBUFERRORS = 6, + UDP_MIB_CSUMERRORS = 7, + UDP_MIB_IGNOREDMULTI = 8, + UDP_MIB_MEMERRORS = 9, + __UDP_MIB_MAX = 10, +}; + +struct udp_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + __u16 cscov; + __u8 partial_cov; +}; + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); + int (*err_handler)(struct sk_buff *, u32); +}; + +struct udp_dev_scratch { + u32 _tsize_state; + u16 len; + bool is_linear; + bool csum_unnecessary; +}; + +struct bpf_iter__udp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct udp_sock *udp_sk; + }; + uid_t uid; + long : 0; + int bucket; +}; + +struct udp_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct bpf_udp_iter_state { + struct udp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + int offset; + struct sock **batch; + bool st_bucket_done; +}; + +struct inet_protosw { + struct list_head list; + unsigned short type; + unsigned short protocol; + struct proto *prot; + const struct proto_ops *ops; + unsigned char flags; +}; + +typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); + +typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, + struct sk_buff *); + +typedef struct sock *(*udp_lookup_t)(const struct sk_buff *, __be16, __be16); + +struct arpreq { + struct sockaddr arp_pa; + struct sockaddr arp_ha; + int arp_flags; + struct sockaddr arp_netmask; + char arp_dev[16]; +}; + +struct icmp_err { + int errno; + unsigned int fatal : 1; +}; + +struct icmp_control { + enum skb_drop_reason (*handler)(struct sk_buff *); + short error; +}; + +enum ip_conntrack_status { + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = 1, + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = 2, + IPS_ASSURED_BIT = 2, + IPS_ASSURED = 4, + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = 8, + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = 16, + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = 32, + IPS_NAT_MASK = 48, + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = 64, + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = 128, + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = 256, + IPS_NAT_DONE_MASK = 384, + IPS_DYING_BIT = 9, + IPS_DYING = 512, + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = 1024, + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = 2048, + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = 4096, + IPS_NAT_CLASH_BIT = 12, + IPS_NAT_CLASH = 4096, + IPS_HELPER_BIT = 13, + IPS_HELPER = 8192, + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = 16384, + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = 32768, + IPS_UNCHANGEABLE_MASK = 56313, + __IPS_MAX_BIT = 16, +}; + +enum { + XFRM_LOOKUP_ICMP = 1, + XFRM_LOOKUP_QUEUE = 2, + XFRM_LOOKUP_KEEP_DST_REF = 4, +}; + +struct icmp_bxm { + struct sk_buff *skb; + int offset; + int data_len; + struct { + struct icmphdr icmph; + __be32 times[3]; + } data; + int head_len; + struct ip_options_data replyopts; +}; + +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; + +struct icmp_ext_hdr { + __u8 reserved1 : 4; + __u8 version : 4; + __u8 reserved2; + __sum16 checksum; +}; + +struct icmp_ext_echo_ctype3_hdr { + __be16 afi; + __u8 addrlen; + __u8 reserved; +}; + +struct icmp_ext_echo_iio { + struct icmp_extobj_hdr extobj_hdr; + union { + char name[16]; + __be32 ifindex; + struct { + struct icmp_ext_echo_ctype3_hdr ctype3_hdr; + union { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + } ip_addr; + } addr; + } ident; +}; + +struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[34]; +}; + +enum { + IFA_UNSPEC = 0, + IFA_ADDRESS = 1, + IFA_LOCAL = 2, + IFA_LABEL = 3, + IFA_BROADCAST = 4, + IFA_ANYCAST = 5, + IFA_CACHEINFO = 6, + IFA_MULTICAST = 7, + IFA_FLAGS = 8, + IFA_RT_PRIORITY = 9, + IFA_TARGET_NETNSID = 10, + IFA_PROTO = 11, + __IFA_MAX = 12, +}; + +enum { + NETCONFA_UNSPEC = 0, + NETCONFA_IFINDEX = 1, + NETCONFA_FORWARDING = 2, + NETCONFA_RP_FILTER = 3, + NETCONFA_MC_FORWARDING = 4, + NETCONFA_PROXY_NEIGH = 5, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, + NETCONFA_INPUT = 7, + NETCONFA_BC_FORWARDING = 8, + __NETCONFA_MAX = 9, +}; + +enum { + IFLA_INET_UNSPEC = 0, + IFLA_INET_CONF = 1, + __IFLA_INET_MAX = 2, +}; + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +struct netconfmsg { + __u8 ncm_family; +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +struct rtentry { + unsigned long rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + unsigned short rt_flags; + short rt_pad2; + unsigned long rt_pad3; + void *rt_pad4; + short rt_metric; + char __attribute__((btf_type_tag("user"))) * rt_dev; + unsigned long rt_mtu; + unsigned long rt_window; + unsigned short rt_irtt; +}; + +struct compat_rtentry { + u32 rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + unsigned short rt_flags; + short rt_pad2; + u32 rt_pad3; + unsigned char rt_tos; + unsigned char rt_class; + short rt_pad4; + short rt_metric; + compat_uptr_t rt_dev; + u32 rt_mtu; + u32 rt_window; + unsigned short rt_irtt; +}; + +struct igmphdr { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; + __u8 qrv : 3; + __u8 suppress : 1; + __u8 resv : 4; + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __sum16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmp_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *in_dev; +}; + +struct igmp_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *idev; + struct ip_mc_list *im; +}; + +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify : 1; + u8 skip_notify_kernel : 1; +}; + +struct fib_config { + u8 fc_dst_len; + dscp_t fc_dscp; + u8 fc_protocol; + u8 fc_scope; + u8 fc_type; + u8 fc_gw_family; + u32 fc_table; + __be32 fc_dst; + union { + __be32 fc_gw4; + struct in6_addr fc_gw6; + }; + int fc_oif; + u32 fc_flags; + u32 fc_priority; + __be32 fc_prefsrc; + u32 fc_nh_id; + struct nlattr *fc_mx; + struct rtnexthop *fc_mp; + int fc_mx_len; + int fc_mp_len; + u32 fc_flow; + u32 fc_nlflags; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; +}; + +struct fib_dump_filter { + u32 table_id; + bool filter_set; + bool dump_routes; + bool dump_exceptions; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + +struct fib_result_nl { + __be32 fl_addr; + u32 fl_mark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + unsigned char tb_id; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; + +struct fib_prop { + int error; + u8 scope; +}; + +struct fib6_config { + u32 fc_table; + u32 fc_metric; + int fc_dst_len; + int fc_src_len; + int fc_ifindex; + u32 fc_flags; + u32 fc_protocol; + u16 fc_type; + u16 fc_delete_all_nh : 1; + u16 fc_ignore_dev_down : 1; + u16 __unused : 14; + u32 fc_nh_id; + struct in6_addr fc_dst; + struct in6_addr fc_src; + struct in6_addr fc_prefsrc; + struct in6_addr fc_gateway; + unsigned long fc_expires; + struct nlattr *fc_mx; + int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; + bool fc_is_fdb; +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; + struct fib_nh *fib_nh; +}; + +typedef unsigned int t_key; + +struct key_vector { + t_key key; + unsigned char pos; + unsigned char bits; + unsigned char slen; + union { + struct hlist_head leaf; + struct { + struct { + } __empty_tnode; + struct key_vector __attribute__((btf_type_tag("rcu"))) * tnode[0]; + }; + }; +}; + +struct trie { + struct key_vector kv[1]; +}; + +struct tnode { + struct callback_head rcu; + t_key empty_children; + t_key full_children; + struct key_vector __attribute__((btf_type_tag("rcu"))) * parent; + struct key_vector kv[1]; +}; + +struct fib_entry_notifier_info { + struct fib_notifier_info info; + u32 dst; + int dst_len; + struct fib_info *fi; + dscp_t dscp; + u8 type; + u32 tb_id; +}; + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int prefixes; + unsigned int nodesizes[32]; +}; + +struct fib_trie_iter { + struct seq_net_private p; + struct fib_table *tb; + struct key_vector *tnode; + unsigned int index; + unsigned int depth; +}; + +struct fib_route_iter { + struct seq_net_private p; + struct fib_table *main_tb; + struct key_vector *tnode; + loff_t pos; + t_key key; +}; + +struct ipfrag_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + }; + struct sk_buff *next_frag; + int frag_run_len; +}; + +struct ping_table { + struct hlist_head hash[64]; + spinlock_t lock; +}; + +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); + void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, + struct sk_buff *); + int (*icmpv6_err_convert)(u8, u8, int *); + void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, + const struct net_device *, int); +}; + +struct icmpv6_echo { + __be16 identifier; + __be16 sequence; +}; + +struct icmpv6_nd_advt { + __u32 reserved : 5; + __u32 override : 1; + __u32 solicited : 1; + __u32 router : 1; + __u32 reserved2 : 24; +}; + +struct icmpv6_nd_ra { + __u8 hop_limit; + __u8 reserved : 3; + __u8 router_pref : 2; + __u8 home_agent : 1; + __u8 other : 1; + __u8 managed : 1; + __be16 rt_lifetime; +}; + +struct icmp6hdr { + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + struct icmpv6_echo u_echo; + struct icmpv6_nd_advt u_nd_advt; + struct icmpv6_nd_ra u_nd_ra; + } icmp6_dataun; +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +struct pingfakehdr { + struct icmphdr icmph; + struct msghdr *msg; + sa_family_t family; + __wsum wcheck; +}; + +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); +}; + +enum { + IFLA_IPTUN_UNSPEC = 0, + IFLA_IPTUN_LINK = 1, + IFLA_IPTUN_LOCAL = 2, + IFLA_IPTUN_REMOTE = 3, + IFLA_IPTUN_TTL = 4, + IFLA_IPTUN_TOS = 5, + IFLA_IPTUN_ENCAP_LIMIT = 6, + IFLA_IPTUN_FLOWINFO = 7, + IFLA_IPTUN_FLAGS = 8, + IFLA_IPTUN_PROTO = 9, + IFLA_IPTUN_PMTUDISC = 10, + IFLA_IPTUN_6RD_PREFIX = 11, + IFLA_IPTUN_6RD_RELAY_PREFIX = 12, + IFLA_IPTUN_6RD_PREFIXLEN = 13, + IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14, + IFLA_IPTUN_ENCAP_TYPE = 15, + IFLA_IPTUN_ENCAP_FLAGS = 16, + IFLA_IPTUN_ENCAP_SPORT = 17, + IFLA_IPTUN_ENCAP_DPORT = 18, + IFLA_IPTUN_COLLECT_METADATA = 19, + IFLA_IPTUN_FWMARK = 20, + __IFLA_IPTUN_MAX = 21, +}; + +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC = 0, + LWTUNNEL_IP_ID = 1, + LWTUNNEL_IP_DST = 2, + LWTUNNEL_IP_SRC = 3, + LWTUNNEL_IP_TTL = 4, + LWTUNNEL_IP_TOS = 5, + LWTUNNEL_IP_FLAGS = 6, + LWTUNNEL_IP_PAD = 7, + LWTUNNEL_IP_OPTS = 8, + __LWTUNNEL_IP_MAX = 9, +}; + +enum { + LWTUNNEL_IP_OPTS_UNSPEC = 0, + LWTUNNEL_IP_OPTS_GENEVE = 1, + LWTUNNEL_IP_OPTS_VXLAN = 2, + LWTUNNEL_IP_OPTS_ERSPAN = 3, + __LWTUNNEL_IP_OPTS_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, + LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, + LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, + LWTUNNEL_IP_OPT_GENEVE_DATA = 3, + __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_VXLAN_GBP = 1, + __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, +}; + +enum { + LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_ERSPAN_VER = 1, + LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, + LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, + LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, + __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, +}; + +enum lwtunnel_ip6_t { + LWTUNNEL_IP6_UNSPEC = 0, + LWTUNNEL_IP6_ID = 1, + LWTUNNEL_IP6_DST = 2, + LWTUNNEL_IP6_SRC = 3, + LWTUNNEL_IP6_HOPLIMIT = 4, + LWTUNNEL_IP6_TC = 5, + LWTUNNEL_IP6_FLAGS = 6, + LWTUNNEL_IP6_PAD = 7, + LWTUNNEL_IP6_OPTS = 8, + __LWTUNNEL_IP6_MAX = 9, +}; + +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; + __u8 hwid_upper : 2; + __u8 ft : 5; + __u8 p : 1; + __u8 o : 1; + __u8 gra : 2; + __u8 dir : 1; + __u8 hwid : 4; +}; + +struct erspan_metadata { + int version; + union { + __be32 index; + struct erspan_md2 md2; + } u; +}; + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 length : 5; + u8 r3 : 1; + u8 r2 : 1; + u8 r1 : 1; + u8 opt_data[0]; +}; + +struct vxlan_metadata { + u32 gbp; +}; + +enum nexthop_event_type { + NEXTHOP_EVENT_DEL = 0, + NEXTHOP_EVENT_REPLACE = 1, + NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, + NEXTHOP_EVENT_BUCKET_REPLACE = 3, +}; + +enum nh_notifier_info_type { + NH_NOTIFIER_INFO_TYPE_SINGLE = 0, + NH_NOTIFIER_INFO_TYPE_GRP = 1, + NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, + NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, +}; + +enum { + NHA_UNSPEC = 0, + NHA_ID = 1, + NHA_GROUP = 2, + NHA_GROUP_TYPE = 3, + NHA_BLACKHOLE = 4, + NHA_OIF = 5, + NHA_GATEWAY = 6, + NHA_ENCAP_TYPE = 7, + NHA_ENCAP = 8, + NHA_GROUPS = 9, + NHA_MASTER = 10, + NHA_FDB = 11, + NHA_RES_GROUP = 12, + NHA_RES_BUCKET = 13, + __NHA_MAX = 14, +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH = 0, + NEXTHOP_GRP_TYPE_RES = 1, + __NEXTHOP_GRP_TYPE_MAX = 2, +}; + +enum { + NHA_RES_GROUP_UNSPEC = 0, + NHA_RES_GROUP_PAD = 0, + NHA_RES_GROUP_BUCKETS = 1, + NHA_RES_GROUP_IDLE_TIMER = 2, + NHA_RES_GROUP_UNBALANCED_TIMER = 3, + NHA_RES_GROUP_UNBALANCED_TIME = 4, + __NHA_RES_GROUP_MAX = 5, +}; + +enum { + NHA_RES_BUCKET_UNSPEC = 0, + NHA_RES_BUCKET_PAD = 0, + NHA_RES_BUCKET_INDEX = 1, + NHA_RES_BUCKET_IDLE_TIME = 2, + NHA_RES_BUCKET_NH_ID = 3, + __NHA_RES_BUCKET_MAX = 4, +}; + +struct nh_notifier_single_info; + +struct nh_notifier_grp_info; + +struct nh_notifier_res_table_info; + +struct nh_notifier_res_bucket_info; + +struct nh_notifier_info { + struct net *net; + struct netlink_ext_ack *extack; + u32 id; + enum nh_notifier_info_type type; + union { + struct nh_notifier_single_info *nh; + struct nh_notifier_grp_info *nh_grp; + struct nh_notifier_res_table_info *nh_res_table; + struct nh_notifier_res_bucket_info *nh_res_bucket; + }; +}; + +struct nh_notifier_single_info { + struct net_device *dev; + u8 gw_family; + union { + __be32 ipv4; + struct in6_addr ipv6; + }; + u8 is_reject : 1; + u8 is_fdb : 1; + u8 has_encap : 1; +}; + +struct nh_notifier_grp_entry_info { + u8 weight; + u32 id; + struct nh_notifier_single_info nh; +}; + +struct nh_notifier_grp_info { + u16 num_nh; + bool is_fdb; + struct nh_notifier_grp_entry_info nh_entries[0]; +}; + +struct nh_notifier_res_table_info { + u16 num_nh_buckets; + struct nh_notifier_single_info nhs[0]; +}; + +struct nh_notifier_res_bucket_info { + u16 bucket_index; + unsigned int idle_timer_ms; + bool force; + struct nh_notifier_single_info old_nh; + struct nh_notifier_single_info new_nh; +}; + +struct nh_config { + u32 nh_id; + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u8 nh_fdb; + u32 nh_flags; + int nh_ifindex; + struct net_device *dev; + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + struct nlattr *nh_grp; + u16 nh_grp_type; + u16 nh_grp_res_num_buckets; + unsigned long nh_grp_res_idle_timer; + unsigned long nh_grp_res_unbalanced_timer; + bool nh_grp_res_has_num_buckets; + bool nh_grp_res_has_idle_timer; + bool nh_grp_res_has_unbalanced_timer; + struct nlattr *nh_encap; + u16 nh_encap_type; + u32 nlflags; + struct nl_info nlinfo; +}; + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; + unsigned char nh_protocol; + unsigned char resvd; + unsigned int nh_flags; +}; + +struct nexthop_grp { + __u32 id; + __u8 weight; + __u8 resvd1; + __u16 resvd2; +}; + +struct nh_dump_filter { + u32 nh_id; + int dev_idx; + int master_idx; + bool group_filter; + bool fdb_filter; + u32 res_bucket_nh_id; +}; + +struct rtm_dump_nh_ctx { + u32 idx; +}; + +struct rtm_dump_res_bucket_ctx { + struct rtm_dump_nh_ctx nh; + u16 bucket_index; +}; + +struct rtm_dump_nexthop_bucket_data { + struct rtm_dump_res_bucket_ctx *ctx; + struct nh_dump_filter filter; +}; + +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *, unsigned int, unsigned int, + struct udp_tunnel_info *); + void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); + void (*add_port)(struct net_device *, struct udp_tunnel_info *); + void (*del_port)(struct net_device *, struct udp_tunnel_info *); + void (*reset_ntf)(struct net_device *); + size_t (*dump_size)(struct net_device *, unsigned int); + int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); +}; + +struct bpfilter_umh_ops { + struct umd_info info; + struct mutex lock; + int (*sockopt)(struct sock *, int, sockptr_t, unsigned int, bool); + int (*start)(); +}; + +struct snmp_mib { + const char *name; + int entry; +}; + +struct fib4_rule { + struct fib_rule common; + u8 dst_len; + u8 src_len; + dscp_t dscp; + __be32 src; + __be32 srcmask; + __be32 dst; + __be32 dstmask; +}; + +struct mr_table_ops { + const struct rhashtable_params *rht_params; + void *cmparg_any; +}; + +struct mfc_cache_cmp_arg { + __be32 mfc_mcastgrp; + __be32 mfc_origin; +}; + +enum { + IPMRA_CREPORT_UNSPEC = 0, + IPMRA_CREPORT_MSGTYPE = 1, + IPMRA_CREPORT_VIF_ID = 2, + IPMRA_CREPORT_SRC_ADDR = 3, + IPMRA_CREPORT_DST_ADDR = 4, + IPMRA_CREPORT_PKT = 5, + IPMRA_CREPORT_TABLE = 6, + __IPMRA_CREPORT_MAX = 7, +}; + +enum { + MFC_STATIC = 1, + MFC_OFFLOAD = 2, +}; + +enum { + PIM_TYPE_HELLO = 0, + PIM_TYPE_REGISTER = 1, + PIM_TYPE_REGISTER_STOP = 2, + PIM_TYPE_JOIN_PRUNE = 3, + PIM_TYPE_BOOTSTRAP = 4, + PIM_TYPE_ASSERT = 5, + PIM_TYPE_GRAFT = 6, + PIM_TYPE_GRAFT_ACK = 7, + PIM_TYPE_CANDIDATE_RP_ADV = 8, +}; + +enum { + IPMRA_TABLE_UNSPEC = 0, + IPMRA_TABLE_ID = 1, + IPMRA_TABLE_CACHE_RES_QUEUE_LEN = 2, + IPMRA_TABLE_MROUTE_REG_VIF_NUM = 3, + IPMRA_TABLE_MROUTE_DO_ASSERT = 4, + IPMRA_TABLE_MROUTE_DO_PIM = 5, + IPMRA_TABLE_VIFS = 6, + IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE = 7, + __IPMRA_TABLE_MAX = 8, +}; + +enum { + IPMRA_VIF_UNSPEC = 0, + IPMRA_VIF = 1, + __IPMRA_VIF_MAX = 2, +}; + +enum { + IPMRA_VIFA_UNSPEC = 0, + IPMRA_VIFA_IFINDEX = 1, + IPMRA_VIFA_VIF_ID = 2, + IPMRA_VIFA_FLAGS = 3, + IPMRA_VIFA_BYTES_IN = 4, + IPMRA_VIFA_BYTES_OUT = 5, + IPMRA_VIFA_PACKETS_IN = 6, + IPMRA_VIFA_PACKETS_OUT = 7, + IPMRA_VIFA_LOCAL_ADDR = 8, + IPMRA_VIFA_REMOTE_ADDR = 9, + IPMRA_VIFA_PAD = 10, + __IPMRA_VIFA_MAX = 11, +}; + +typedef unsigned short vifi_t; + +struct sioc_vif_req { + vifi_t vifi; + unsigned long icount; + unsigned long ocount; + unsigned long ibytes; + unsigned long obytes; +}; + +struct sioc_sg_req { + struct in_addr src; + struct in_addr grp; + unsigned long pktcnt; + unsigned long bytecnt; + unsigned long wrong_if; +}; + +struct vif_device { + struct net_device __attribute__((btf_type_tag("rcu"))) * dev; + netdevice_tracker dev_tracker; + unsigned long bytes_in; + unsigned long bytes_out; + unsigned long pkt_in; + unsigned long pkt_out; + unsigned long rate_limit; + unsigned char threshold; + unsigned short flags; + int link; + struct netdev_phys_item_id dev_parent_id; + __be32 local; + __be32 remote; +}; + +struct mr_table { + struct list_head list; + possible_net_t net; + struct mr_table_ops ops; + u32 id; + struct sock __attribute__((btf_type_tag("rcu"))) * mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct vif_device vif_table[32]; + struct rhltable mfc_hash; + struct list_head mfc_cache_list; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + bool mroute_do_wrvifwhole; + int mroute_reg_vif_num; +}; + +struct igmpmsg { + __u32 unused1; + __u32 unused2; + unsigned char im_msgtype; + unsigned char im_mbz; + unsigned char im_vif; + unsigned char im_vif_hi; + struct in_addr im_src; + struct in_addr im_dst; +}; + +struct mr_mfc { + struct rhlist_head mnode; + unsigned short mfc_parent; + int mfc_flags; + union { + struct { + unsigned long expires; + struct sk_buff_head unresolved; + } unres; + struct { + unsigned long last_assert; + int minvif; + int maxvif; + unsigned long bytes; + unsigned long pkt; + unsigned long wrong_if; + unsigned long lastuse; + unsigned char ttls[32]; + refcount_t refcount; + } res; + } mfc_un; + struct list_head list; + struct callback_head rcu; + void (*free)(struct callback_head *); +}; + +struct mfc_cache { + struct mr_mfc _c; + union { + struct { + __be32 mfc_mcastgrp; + __be32 mfc_origin; + }; + struct mfc_cache_cmp_arg cmparg; + }; +}; + +struct pimreghdr { + __u8 type; + __u8 reserved; + __be16 csum; + __be32 flags; +}; + +struct vifctl { + vifi_t vifc_vifi; + unsigned char vifc_flags; + unsigned char vifc_threshold; + unsigned int vifc_rate_limit; + union { + struct in_addr vifc_lcl_addr; + int vifc_lcl_ifindex; + }; + struct in_addr vifc_rmt_addr; +}; + +struct vif_entry_notifier_info { + struct fib_notifier_info info; + struct net_device *dev; + unsigned short vif_index; + unsigned short vif_flags; + u32 tb_id; +}; + +struct mfc_entry_notifier_info { + struct fib_notifier_info info; + struct mr_mfc *mfc; + u32 tb_id; +}; + +struct ipmr_result { + struct mr_table *mrt; +}; + +struct mfcctl { + struct in_addr mfcc_origin; + struct in_addr mfcc_mcastgrp; + vifi_t mfcc_parent; + unsigned char mfcc_ttls[32]; + unsigned int mfcc_pkt_cnt; + unsigned int mfcc_byte_cnt; + unsigned int mfcc_wrong_if; + int mfcc_expire; +}; + +struct mr_vif_iter { + struct seq_net_private p; + struct mr_table *mrt; + int ct; +}; + +struct mr_mfc_iter { + struct seq_net_private p; + struct mr_table *mrt; + struct list_head *cache; + spinlock_t *lock; +}; + +struct compat_sioc_sg_req { + struct in_addr src; + struct in_addr grp; + compat_ulong_t pktcnt; + compat_ulong_t bytecnt; + compat_ulong_t wrong_if; +}; + +struct compat_sioc_vif_req { + vifi_t vifi; + compat_ulong_t icount; + compat_ulong_t ocount; + compat_ulong_t ibytes; + compat_ulong_t obytes; +}; + +struct rta_mfc_stats { + __u64 mfcs_packets; + __u64 mfcs_bytes; + __u64 mfcs_wrong_if; +}; + +enum bbr_mode { + BBR_STARTUP = 0, + BBR_DRAIN = 1, + BBR_PROBE_BW = 2, + BBR_PROBE_RTT = 3, +}; + +enum bbr_ack_phase { + BBR_ACKS_INIT = 0, + BBR_ACKS_REFILLING = 1, + BBR_ACKS_PROBE_STARTING = 2, + BBR_ACKS_PROBE_FEEDBACK = 3, + BBR_ACKS_PROBE_STOPPING = 4, +}; + +enum bbr_pacing_gain_phase { + BBR_BW_PROBE_UP = 0, + BBR_BW_PROBE_DOWN = 1, + BBR_BW_PROBE_CRUISE = 2, + BBR_BW_PROBE_REFILL = 3, +}; + +enum { + INET_DIAG_NONE = 0, + INET_DIAG_MEMINFO = 1, + INET_DIAG_INFO = 2, + INET_DIAG_VEGASINFO = 3, + INET_DIAG_CONG = 4, + INET_DIAG_TOS = 5, + INET_DIAG_TCLASS = 6, + INET_DIAG_SKMEMINFO = 7, + INET_DIAG_SHUTDOWN = 8, + INET_DIAG_DCTCPINFO = 9, + INET_DIAG_PROTOCOL = 10, + INET_DIAG_SKV6ONLY = 11, + INET_DIAG_LOCALS = 12, + INET_DIAG_PEERS = 13, + INET_DIAG_PAD = 14, + INET_DIAG_MARK = 15, + INET_DIAG_BBRINFO = 16, + INET_DIAG_CLASS_ID = 17, + INET_DIAG_MD5SIG = 18, + INET_DIAG_ULP_INFO = 19, + INET_DIAG_SK_BPF_STORAGES = 20, + INET_DIAG_CGROUP_ID = 21, + INET_DIAG_SOCKOPT = 22, + __INET_DIAG_MAX = 23, +}; + +enum tcp_bbr_phase { + BBR_PHASE_INVALID = 0, + BBR_PHASE_STARTUP = 1, + BBR_PHASE_DRAIN = 2, + BBR_PHASE_PROBE_RTT = 3, + BBR_PHASE_PROBE_BW_UP = 4, + BBR_PHASE_PROBE_BW_DOWN = 5, + BBR_PHASE_PROBE_BW_CRUISE = 6, + BBR_PHASE_PROBE_BW_REFILL = 7, +}; + +struct bbr { + u32 min_rtt_us; + u32 min_rtt_stamp; + u32 probe_rtt_done_stamp; + u32 probe_rtt_min_us; + u32 probe_rtt_min_stamp; + u32 next_rtt_delivered; + u64 cycle_mstamp; + u32 mode : 2; + u32 prev_ca_state : 3; + u32 round_start : 1; + u32 ce_state : 1; + u32 bw_probe_up_rounds : 5; + u32 try_fast_path : 1; + u32 idle_restart : 1; + u32 probe_rtt_round_done : 1; + u32 init_cwnd : 7; + u32 unused_1 : 10; + u32 pacing_gain : 10; + u32 cwnd_gain : 10; + u32 full_bw_reached : 1; + u32 full_bw_cnt : 2; + u32 cycle_idx : 2; + u32 has_seen_rtt : 1; + u32 unused_2 : 6; + u32 prior_cwnd; + u32 full_bw; + u64 ack_epoch_mstamp; + u16 extra_acked[2]; + u32 ack_epoch_acked : 20; + u32 extra_acked_win_rtts : 5; + u32 extra_acked_win_idx : 1; + u32 full_bw_now : 1; + u32 startup_ecn_rounds : 2; + u32 loss_in_cycle : 1; + u32 ecn_in_cycle : 1; + u32 unused_3 : 1; + u32 loss_round_delivered; + u32 undo_bw_lo; + u32 undo_inflight_lo; + u32 undo_inflight_hi; + u32 bw_latest; + u32 bw_lo; + u32 bw_hi[2]; + u32 inflight_latest; + u32 inflight_lo; + u32 inflight_hi; + u32 bw_probe_up_cnt; + u32 bw_probe_up_acks; + u32 probe_wait_us; + u32 prior_rcv_nxt; + u32 ecn_eligible : 1; + u32 ecn_alpha : 9; + u32 bw_probe_samples : 1; + u32 prev_probe_too_high : 1; + u32 stopped_risky_probe : 1; + u32 rounds_since_probe : 8; + u32 loss_round_start : 1; + u32 loss_in_round : 1; + u32 ecn_in_round : 1; + u32 ack_phase : 3; + u32 loss_events_in_round : 4; + u32 initialized : 1; + u32 alpha_last_delivered; + u32 alpha_last_delivered_ce; + u8 unused_4; + struct tcp_plb_state plb; +}; + +struct bbr_context { + u32 sample_bw; +}; + +struct bictcp { + u32 cnt; + u32 last_max_cwnd; + u32 last_cwnd; + u32 last_time; + u32 bic_origin_point; + u32 bic_K; + u32 delay_min; + u32 epoch_start; + u32 ack_cnt; + u32 tcp_cwnd; + u16 unused; + u8 sample_cnt; + u8 found; + u32 round_start; + u32 end_seq; + u32 last_ack; + u32 curr_rtt; +}; + +struct sigpool_entry { + struct crypto_ahash *hash; + const char *alg; + struct kref kref; + uint16_t needs_key : 1; + uint16_t reserved : 15; +}; + +struct scratches_to_free { + struct callback_head rcu; + unsigned int cnt; + void *scratches[0]; +}; + +enum { + TCP_BPF_IPV4 = 0, + TCP_BPF_IPV6 = 1, + TCP_BPF_NUM_PROTS = 2, +}; + +enum { + TCP_BPF_BASE = 0, + TCP_BPF_TX = 1, + TCP_BPF_RX = 2, + TCP_BPF_TXRX = 3, + TCP_BPF_NUM_CFGS = 4, +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_rec; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + spinlock_t encrypt_compl_lock; + int async_notify; + u8 async_capable : 1; + unsigned long tx_bitmask; +}; + +enum { + UDP_BPF_IPV4 = 0, + UDP_BPF_IPV6 = 1, + UDP_BPF_NUM_PROTS = 2, +}; + +typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); + +enum unix_socket_lock_class { + U_LOCK_NORMAL = 0, + U_LOCK_SECOND = 1, + U_LOCK_DIAG = 2, +}; + +struct scm_stat { + atomic_t nr_fds; +}; + +struct unix_address; + +struct unix_sock { + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock; + struct mutex bindlock; + struct sock *peer; + struct list_head link; + atomic_long_t inflight; + spinlock_t lock; + unsigned long gc_flags; + long : 64; + long : 64; + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; + struct sk_buff *oob_skb; + long : 64; +}; + +struct unix_address { + refcount_t refcnt; + int len; + struct sockaddr_un name[0]; +}; + +struct bpf_iter__unix { + union { + struct bpf_iter_meta *meta; + }; + union { + struct unix_sock *unix_sk; + }; + uid_t uid; +}; + +struct bpf_unix_iter_state { + struct seq_net_private p; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +enum { + UNIX_DIAG_NAME = 0, + UNIX_DIAG_VFS = 1, + UNIX_DIAG_PEER = 2, + UNIX_DIAG_ICONS = 3, + UNIX_DIAG_RQLEN = 4, + UNIX_DIAG_MEMINFO = 5, + UNIX_DIAG_SHUTDOWN = 6, + UNIX_DIAG_UID = 7, + __UNIX_DIAG_MAX = 8, +}; + +struct unix_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u16 pad; + __u32 udiag_states; + __u32 udiag_ino; + __u32 udiag_show; + __u32 udiag_cookie[2]; +}; + +struct unix_diag_vfs { + __u32 udiag_vfs_ino; + __u32 udiag_vfs_dev; +}; + +struct unix_diag_rqlen { + __u32 udiag_rqueue; + __u32 udiag_wqueue; +}; + +struct unix_diag_msg { + __u8 udiag_family; + __u8 udiag_type; + __u8 udiag_state; + __u8 pad; + __u32 udiag_ino; + __u32 udiag_cookie[2]; +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; + +struct ioam6_pernet_data { + struct mutex lock; + struct rhashtable namespaces; + struct rhashtable schemas; +}; + +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); + int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); + struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *, const struct sock *, + struct flowi6 *, const struct in6_addr *); + int (*ipv6_route_input)(struct sk_buff *); + struct fib6_table *(*fib6_get_table)(struct net *, u32); + int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); + int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, + struct fib6_result *, int); + void (*fib6_select_path)(const struct net *, struct fib6_result *, + struct flowi6 *, int, bool, const struct sk_buff *, int); + u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, + const struct in6_addr *); + int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, + struct netlink_ext_ack *); + void (*fib6_nh_release)(struct fib6_nh *); + void (*fib6_nh_release_dsts)(struct fib6_nh *); + void (*fib6_update_sernum)(struct net *, struct fib6_info *); + int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); + void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); + void (*udpv6_encap_enable)(); + void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, + const struct in6_addr *, bool, bool, bool, bool); + struct neigh_table *nd_tbl; + int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, + int (*)(struct net *, struct sock *, struct sk_buff *)); + struct net_device *(*ipv6_dev_find)(struct net *, const struct in6_addr *, + struct net_device *); +}; + +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + unsigned long rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +struct compat_in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + u32 rtmsg_type; + u16 rtmsg_dst_len; + u16 rtmsg_src_len; + u32 rtmsg_metric; + u32 rtmsg_info; + u32 rtmsg_flags; + s32 rtmsg_ifindex; +}; + +struct inet6_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + unsigned int flags; +}; + +struct ac6_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +enum { + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS = 1, + ICMP6_MIB_INERRORS = 2, + ICMP6_MIB_OUTMSGS = 3, + ICMP6_MIB_OUTERRORS = 4, + ICMP6_MIB_CSUMERRORS = 5, + ICMP6_MIB_RATELIMITHOST = 6, + __ICMP6_MIB_MAX = 7, +}; + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +struct inet6_ifaddr { + struct in6_addr addr; + __u32 prefix_len; + __u32 rt_priority; + __u32 valid_lft; + __u32 prefered_lft; + refcount_t refcnt; + spinlock_t lock; + int state; + __u32 flags; + __u8 dad_probes; + __u8 stable_privacy_retry; + __u16 scope; + __u64 dad_nonce; + unsigned long cstamp; + unsigned long tstamp; + struct delayed_work dad_work; + struct inet6_dev *idev; + struct fib6_info *rt; + struct hlist_node addr_lst; + struct list_head if_list; + struct list_head if_list_aux; + struct list_head tmp_list; + struct inet6_ifaddr *ifpub; + int regen_count; + bool tokenized; + u8 ifa_proto; + struct callback_head rcu; + struct in6_addr peer_addr; +}; + +struct hop_jumbo_hdr { + u8 nexthdr; + u8 hdrlen; + u8 tlv_type; + u8 tlv_len; + __be32 jumbo_payload_len; +}; + +struct ip6_ra_chain { + struct ip6_ra_chain *next; + struct sock *sk; + int sel; + void (*destructor)(struct sock *); +}; + +struct ipcm6_cookie { + struct sockcm_cookie sockc; + __s16 hlimit; + __s16 tclass; + __u16 gso_size; + __s8 dontfrag; + struct ipv6_txoptions *opt; +}; + +enum { + INET6_IFADDR_STATE_PREDAD = 0, + INET6_IFADDR_STATE_DAD = 1, + INET6_IFADDR_STATE_POSTDAD = 2, + INET6_IFADDR_STATE_ERRDAD = 3, + INET6_IFADDR_STATE_DEAD = 4, +}; + +enum { + IPV6_SADDR_RULE_INIT = 0, + IPV6_SADDR_RULE_LOCAL = 1, + IPV6_SADDR_RULE_SCOPE = 2, + IPV6_SADDR_RULE_PREFERRED = 3, + IPV6_SADDR_RULE_OIF = 4, + IPV6_SADDR_RULE_LABEL = 5, + IPV6_SADDR_RULE_PRIVACY = 6, + IPV6_SADDR_RULE_ORCHID = 7, + IPV6_SADDR_RULE_PREFIX = 8, + IPV6_SADDR_RULE_NOT_OPTIMISTIC = 9, + IPV6_SADDR_RULE_MAX = 10, +}; + +enum { + DAD_PROCESS = 0, + DAD_BEGIN = 1, + DAD_ABORT = 2, +}; + +enum cleanup_prefix_rt_t { + CLEANUP_PREFIX_RT_NOP = 0, + CLEANUP_PREFIX_RT_DEL = 1, + CLEANUP_PREFIX_RT_EXPIRE = 2, +}; + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64 = 0, + IN6_ADDR_GEN_MODE_NONE = 1, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, + IN6_ADDR_GEN_MODE_RANDOM = 3, +}; + +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT = 1, + DEVCONF_MTU6 = 2, + DEVCONF_ACCEPT_RA = 3, + DEVCONF_ACCEPT_REDIRECTS = 4, + DEVCONF_AUTOCONF = 5, + DEVCONF_DAD_TRANSMITS = 6, + DEVCONF_RTR_SOLICITS = 7, + DEVCONF_RTR_SOLICIT_INTERVAL = 8, + DEVCONF_RTR_SOLICIT_DELAY = 9, + DEVCONF_USE_TEMPADDR = 10, + DEVCONF_TEMP_VALID_LFT = 11, + DEVCONF_TEMP_PREFERED_LFT = 12, + DEVCONF_REGEN_MAX_RETRY = 13, + DEVCONF_MAX_DESYNC_FACTOR = 14, + DEVCONF_MAX_ADDRESSES = 15, + DEVCONF_FORCE_MLD_VERSION = 16, + DEVCONF_ACCEPT_RA_DEFRTR = 17, + DEVCONF_ACCEPT_RA_PINFO = 18, + DEVCONF_ACCEPT_RA_RTR_PREF = 19, + DEVCONF_RTR_PROBE_INTERVAL = 20, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, + DEVCONF_PROXY_NDP = 22, + DEVCONF_OPTIMISTIC_DAD = 23, + DEVCONF_ACCEPT_SOURCE_ROUTE = 24, + DEVCONF_MC_FORWARDING = 25, + DEVCONF_DISABLE_IPV6 = 26, + DEVCONF_ACCEPT_DAD = 27, + DEVCONF_FORCE_TLLAO = 28, + DEVCONF_NDISC_NOTIFY = 29, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, + DEVCONF_SUPPRESS_FRAG_NDISC = 32, + DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, + DEVCONF_USE_OPTIMISTIC = 34, + DEVCONF_ACCEPT_RA_MTU = 35, + DEVCONF_STABLE_SECRET = 36, + DEVCONF_USE_OIF_ADDRS_ONLY = 37, + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, + DEVCONF_DROP_UNSOLICITED_NA = 41, + DEVCONF_KEEP_ADDR_ON_DOWN = 42, + DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, + DEVCONF_SEG6_ENABLED = 44, + DEVCONF_SEG6_REQUIRE_HMAC = 45, + DEVCONF_ENHANCED_DAD = 46, + DEVCONF_ADDR_GEN_MODE = 47, + DEVCONF_DISABLE_POLICY = 48, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, + DEVCONF_NDISC_TCLASS = 50, + DEVCONF_RPL_SEG_ENABLED = 51, + DEVCONF_RA_DEFRTR_METRIC = 52, + DEVCONF_IOAM6_ENABLED = 53, + DEVCONF_IOAM6_ID = 54, + DEVCONF_IOAM6_ID_WIDE = 55, + DEVCONF_NDISC_EVICT_NOCARRIER = 56, + DEVCONF_ACCEPT_UNTRACKED_NA = 57, + DEVCONF_ACCEPT_RA_MIN_LFT = 58, + DEVCONF_MAX = 59, +}; + +enum { + IFLA_INET6_UNSPEC = 0, + IFLA_INET6_FLAGS = 1, + IFLA_INET6_CONF = 2, + IFLA_INET6_STATS = 3, + IFLA_INET6_MCAST = 4, + IFLA_INET6_CACHEINFO = 5, + IFLA_INET6_ICMP6STATS = 6, + IFLA_INET6_TOKEN = 7, + IFLA_INET6_ADDR_GEN_MODE = 8, + IFLA_INET6_RA_MTU = 9, + __IFLA_INET6_MAX = 10, +}; + +enum { + PREFIX_UNSPEC = 0, + PREFIX_ADDRESS = 1, + PREFIX_CACHEINFO = 2, + __PREFIX_MAX = 3, +}; + +enum addr_type_t { + UNICAST_ADDR = 0, + MULTICAST_ADDR = 1, + ANYCAST_ADDR = 2, +}; + +union fwnet_hwaddr { + u8 u[16]; + struct { + __be64 uniq_id; + u8 max_rec; + u8 sspd; + u8 fifo[6]; + } uc; +}; + +struct ipv6_saddr_dst { + const struct in6_addr *addr; + int ifindex; + int scope; + int label; + unsigned int prefs; +}; + +struct ipv6_saddr_score { + int rule; + int addr_type; + struct inet6_ifaddr *ifa; + unsigned long scorebits[1]; + int scopedist; + int matchlen; +}; + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + unsigned short prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +struct if6_iter_state { + struct seq_net_private p; + int bucket; + int offset; +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; +}; + +struct ifa6_config { + const struct in6_addr *pfx; + unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; + u32 rt_priority; + u32 ifa_flags; + u32 preferred_lft; + u32 valid_lft; + u16 scope; +}; + +struct in6_validator_info { + struct in6_addr i6vi_addr; + struct inet6_dev *i6vi_dev; + struct netlink_ext_ack *extack; +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +struct ip6addrlbl_init_table { + const struct in6_addr *prefix; + int prefixlen; + u32 label; +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX = 3, +}; + +struct ip6addrlbl_entry { + struct in6_addr prefix; + int prefixlen; + int ifindex; + int addrtype; + u32 label; + struct hlist_node list; + struct callback_head rcu; +}; + +struct ifaddrlblmsg { + __u8 ifal_family; + __u8 __ifal_reserved; + __u8 ifal_prefixlen; + __u8 ifal_flags; + __u32 ifal_index; + __u32 ifal_seq; +}; + +typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, + const struct fib6_result *, + struct fib6_table *, const struct flowi6 *); + +enum rt6_nud_state { + RT6_NUD_FAIL_HARD = -3, + RT6_NUD_FAIL_PROBE = -2, + RT6_NUD_FAIL_DO_RR = -1, + RT6_NUD_SUCCEED = 1, +}; + +enum { + __ND_OPT_PREFIX_INFO_END = 0, + ND_OPT_SOURCE_LL_ADDR = 1, + ND_OPT_TARGET_LL_ADDR = 2, + ND_OPT_PREFIX_INFO = 3, + ND_OPT_REDIRECT_HDR = 4, + ND_OPT_MTU = 5, + ND_OPT_NONCE = 14, + __ND_OPT_ARRAY_MAX = 15, + ND_OPT_ROUTE_INFO = 24, + ND_OPT_RDNSS = 25, + ND_OPT_DNSSL = 31, + ND_OPT_6CO = 34, + ND_OPT_CAPTIVE_PORTAL = 37, + ND_OPT_PREF64 = 38, + __ND_OPT_MAX = 39, +}; + +struct route_info { + __u8 type; + __u8 length; + __u8 prefix_len; + __u8 reserved_l : 3; + __u8 route_pref : 2; + __u8 reserved_h : 3; + __be32 lifetime; + __u8 prefix[0]; +}; + +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + +struct rt6_rtnl_dump_arg { + struct sk_buff *skb; + struct netlink_callback *cb; + struct net *net; + struct fib_dump_filter filter; +}; + +struct trace_event_raw_fib6_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[16]; + __u8 dst[16]; + u16 sport; + u16 dport; + u8 proto; + u8 rt_type; + char name[16]; + __u8 gw[16]; + char __data[0]; +}; + +struct rt6_exception { + struct hlist_node hlist; + struct rt6_info *rt6i; + unsigned long stamp; + struct callback_head rcu; +}; + +struct __rt6_probe_work { + struct work_struct work; + struct in6_addr target; + struct net_device *dev; + netdevice_tracker dev_tracker; +}; + +struct arg_dev_net_ip { + struct net *net; + struct in6_addr *addr; +}; + +struct rt6_mtu_change_arg { + struct net_device *dev; + unsigned int mtu; + struct fib6_info *f6i; +}; + +struct rt6_nh { + struct fib6_info *fib6_info; + struct fib6_config r_cfg; + struct list_head next; +}; + +struct fib6_nh_dm_arg { + struct net *net; + const struct in6_addr *saddr; + int oif; + int flags; + struct fib6_nh *nh; +}; + +typedef struct rt6_info *(*pol_lookup_t)(struct net *, struct fib6_table *, + struct flowi6 *, const struct sk_buff *, int); + +struct fib6_gc_args { + int timeout; + int more; +}; + +struct fib6_nh_match_arg { + const struct net_device *dev; + const struct in6_addr *gw; + struct fib6_nh *match; +}; + +struct fib6_nh_del_cached_rt_arg { + struct fib6_config *cfg; + struct fib6_info *f6i; +}; + +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned char nh_flags; + unsigned long event; + }; +}; + +struct trace_event_data_offsets_fib6_table_lookup {}; + +struct fib6_nh_age_excptn_arg { + struct fib6_gc_args *gc_args; + unsigned long now; +}; + +struct fib6_nh_rd_arg { + struct fib6_result *res; + struct flowi6 *fl6; + const struct in6_addr *gw; + struct rt6_info **ret; +}; + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; +}; + +struct fib6_nh_exception_dump_walker { + struct rt6_rtnl_dump_arg *dump; + struct fib6_info *rt; + unsigned int flags; + unsigned int skip; + unsigned int count; +}; + +struct fib6_nh_frl_arg { + u32 flags; + int oif; + int strict; + int *mpri; + bool *do_rr; + struct fib6_nh *nh; +}; + +struct fib6_nh_excptn_arg { + struct rt6_info *rt; + int plen; +}; + +enum fib6_walk_state { + FWS_S = 0, + FWS_L = 1, + FWS_R = 2, + FWS_C = 3, + FWS_U = 4, +}; + +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + +struct fib6_walker { + struct list_head lh; + struct fib6_node *root; + struct fib6_node *node; + struct fib6_info *leaf; + enum fib6_walk_state state; + unsigned int skip; + unsigned int count; + unsigned int skip_in_node; + int (*func)(struct fib6_walker *); + void *args; +}; + +struct fib6_cleaner { + struct fib6_walker w; + struct net *net; + int (*func)(struct fib6_info *, void *); + int sernum; + void *arg; + bool skip_notify; +}; + +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; + struct netlink_ext_ack *extack; +}; + +struct fib6_entry_notifier_info { + struct fib_notifier_info info; + struct fib6_info *rt; + unsigned int nsiblings; +}; + +struct ipv6_route_iter { + struct seq_net_private p; + struct fib6_walker w; + loff_t skip; + struct fib6_table *tbl; + int sernum; +}; + +struct bpf_iter__ipv6_route { + union { + struct bpf_iter_meta *meta; + }; + union { + struct fib6_info *rt; + }; +}; + +struct fib6_nh_pcpu_arg { + struct fib6_info *from; + const struct fib6_table *table; +}; + +struct lookup_args { + int offset; + const struct in6_addr *addr; +}; + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; +}; + +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + int ipv6mr_ifindex; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +enum { + NDUSEROPT_UNSPEC = 0, + NDUSEROPT_SRCADDR = 1, + __NDUSEROPT_MAX = 2, +}; + +struct nd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + __u8 opt[0]; +}; + +struct rs_msg { + struct icmp6hdr icmph; + __u8 opt[0]; +}; + +struct ra_msg { + struct icmp6hdr icmph; + __be32 reachable_time; + __be32 retrans_timer; +}; + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + unsigned short nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + unsigned short nduseropt_pad2; + unsigned int nduseropt_pad3; +}; + +typedef u32 inet6_ehashfn_t(const struct net *, const struct in6_addr *, const u16, + const struct in6_addr *, const __be16); + +struct icmp6_filter { + __u32 data[8]; +}; + +struct raw6_sock { + struct inet_sock inet; + __u32 checksum; + __u32 offset; + struct icmp6_filter filter; + __u32 ip6mr_table; + struct ipv6_pinfo inet6; +}; + +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +struct icmp6_err { + int err; + int fatal; +}; + +struct icmpv6_msg { + struct sk_buff *skb; + int offset; + uint8_t type; +}; + +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; + __u8 mld2q_qrv : 3; + __u8 mld2q_suppress : 1; + __u8 mld2q_resv2 : 4; + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +struct igmp6_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct igmp6_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; + struct ifmcaddr6 *im; +}; + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER = 0, + IP6_DEFRAG_CONNTRACK_IN = 1, + __IP6_DEFRAG_CONNTRACK_IN = 65536, + IP6_DEFRAG_CONNTRACK_OUT = 65537, + __IP6_DEFRAG_CONNTRACK_OUT = 131072, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, +}; + +struct frag_queue { + struct inet_frag_queue q; + int iif; + __u16 nhoffset; + u8 ecn; +}; + +struct tcp6_pseudohdr { + struct in6_addr saddr; + struct in6_addr daddr; + __be32 len; + __be32 protocol; +}; + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; +}; + +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u32 cmpre : 4; + __u32 cmpri : 4; + __u32 reserved : 4; + __u32 pad : 4; + __u32 reserved1 : 16; + union { + struct { + struct { + } __empty_addr; + struct in6_addr addr[0]; + }; + struct { + struct { + } __empty_data; + __u8 data[0]; + }; + } segments; +}; + +struct ioam6_hdr { + __u8 opt_type; + __u8 opt_len; + char : 8; + __u8 type; +}; + +struct ioam6_trace_hdr { + __be16 namespace_id; + char : 2; + __u8 overflow : 1; + __u8 nodelen : 5; + __u8 remlen : 7; + union { + __be32 type_be32; + struct { + __u32 bit7 : 1; + __u32 bit6 : 1; + __u32 bit5 : 1; + __u32 bit4 : 1; + __u32 bit3 : 1; + __u32 bit2 : 1; + __u32 bit1 : 1; + __u32 bit0 : 1; + __u32 bit15 : 1; + __u32 bit14 : 1; + __u32 bit13 : 1; + __u32 bit12 : 1; + __u32 bit11 : 1; + __u32 bit10 : 1; + __u32 bit9 : 1; + __u32 bit8 : 1; + __u32 bit23 : 1; + __u32 bit22 : 1; + __u32 bit21 : 1; + __u32 bit20 : 1; + __u32 bit19 : 1; + __u32 bit18 : 1; + __u32 bit17 : 1; + __u32 bit16 : 1; + } type; + }; + __u8 data[0]; +}; + +struct ioam6_schema; + +struct ioam6_namespace { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_schema __attribute__((btf_type_tag("rcu"))) * schema; + __be16 id; + __be32 data; + __be64 data_wide; +}; + +struct ioam6_schema { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_namespace __attribute__((btf_type_tag("rcu"))) * ns; + u32 id; + int len; + __be32 hdr; + u8 data[0]; +}; + +struct ip6fl_iter_state { + struct seq_net_private p; + struct pid_namespace *pid_ns; + int bucket; +}; + +enum { + SEG6_ATTR_UNSPEC = 0, + SEG6_ATTR_DST = 1, + SEG6_ATTR_DSTLEN = 2, + SEG6_ATTR_HMACKEYID = 3, + SEG6_ATTR_SECRET = 4, + SEG6_ATTR_SECRETLEN = 5, + SEG6_ATTR_ALGID = 6, + SEG6_ATTR_HMACINFO = 7, + __SEG6_ATTR_MAX = 8, +}; + +enum { + SEG6_CMD_UNSPEC = 0, + SEG6_CMD_SETHMAC = 1, + SEG6_CMD_DUMPHMAC = 2, + SEG6_CMD_SET_TUNSRC = 3, + SEG6_CMD_GET_TUNSRC = 4, + __SEG6_CMD_MAX = 5, +}; + +struct sr6_tlv { + __u8 type; + __u8 len; + __u8 data[0]; +}; + +struct seg6_hmac_info { + struct rhash_head node; + struct callback_head rcu; + u32 hmackeyid; + char secret[64]; + u8 slen; + u8 alg_id; +}; + +enum { + IOAM6_ATTR_UNSPEC = 0, + IOAM6_ATTR_NS_ID = 1, + IOAM6_ATTR_NS_DATA = 2, + IOAM6_ATTR_NS_DATA_WIDE = 3, + IOAM6_ATTR_SC_ID = 4, + IOAM6_ATTR_SC_DATA = 5, + IOAM6_ATTR_SC_NONE = 6, + IOAM6_ATTR_PAD = 7, + __IOAM6_ATTR_MAX = 8, +}; + +enum { + IOAM6_CMD_UNSPEC = 0, + IOAM6_CMD_ADD_NAMESPACE = 1, + IOAM6_CMD_DEL_NAMESPACE = 2, + IOAM6_CMD_DUMP_NAMESPACES = 3, + IOAM6_CMD_ADD_SCHEMA = 4, + IOAM6_CMD_DEL_SCHEMA = 5, + IOAM6_CMD_DUMP_SCHEMAS = 6, + IOAM6_CMD_NS_SET_SCHEMA = 7, + __IOAM6_CMD_MAX = 8, +}; + +struct mfc6_cache_cmp_arg { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; +}; + +enum { + IP6MRA_CREPORT_UNSPEC = 0, + IP6MRA_CREPORT_MSGTYPE = 1, + IP6MRA_CREPORT_MIF_ID = 2, + IP6MRA_CREPORT_SRC_ADDR = 3, + IP6MRA_CREPORT_DST_ADDR = 4, + IP6MRA_CREPORT_PKT = 5, + __IP6MRA_CREPORT_MAX = 6, +}; + +struct mfc6_cache { + struct mr_mfc _c; + union { + struct { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; + }; + struct mfc6_cache_cmp_arg cmparg; + }; +}; + +struct mrt6msg { + __u8 im6_mbz; + __u8 im6_msgtype; + __u16 im6_mif; + __u32 im6_pad; + struct in6_addr im6_src; + struct in6_addr im6_dst; +}; + +struct ip6mr_result { + struct mr_table *mrt; +}; + +struct mif6ctl { + mifi_t mif6c_mifi; + unsigned char mif6c_flags; + unsigned char vifc_threshold; + __u16 mif6c_pifi; + unsigned int vifc_rate_limit; +}; + +typedef __u32 if_mask; + +struct if_set { + if_mask ifs_bits[8]; +}; + +struct mf6cctl { + struct sockaddr_in6 mf6cc_origin; + struct sockaddr_in6 mf6cc_mcastgrp; + mifi_t mf6cc_parent; + struct if_set mf6cc_ifset; +}; + +struct compat_sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + compat_ulong_t pktcnt; + compat_ulong_t bytecnt; + compat_ulong_t wrong_if; +}; + +struct compat_sioc_mif_req6 { + mifi_t mifi; + compat_ulong_t icount; + compat_ulong_t ocount; + compat_ulong_t ibytes; + compat_ulong_t obytes; +}; + +struct br_input_skb_cb { + struct net_device *brdev; + u16 frag_max_size; + u8 igmp; + u8 mrouters_only : 1; + u8 proxyarp_replied : 1; + u8 src_port_isolated : 1; + u8 br_netfilter_broute : 1; + u32 backup_nhid; +}; + +struct nf_bridge_frag_data; + +struct fib6_rule { + struct fib_rule common; + struct rt6key src; + struct rt6key dst; + dscp_t dscp; +}; + +enum { + SEG6_IPTUNNEL_UNSPEC = 0, + SEG6_IPTUNNEL_SRH = 1, + __SEG6_IPTUNNEL_MAX = 2, +}; + +enum { + SEG6_IPTUN_MODE_INLINE = 0, + SEG6_IPTUN_MODE_ENCAP = 1, + SEG6_IPTUN_MODE_L2ENCAP = 2, + SEG6_IPTUN_MODE_ENCAP_RED = 3, + SEG6_IPTUN_MODE_L2ENCAP_RED = 4, +}; + +struct seg6_iptunnel_encap { + int mode; + struct ipv6_sr_hdr srh[0]; +}; + +struct seg6_lwt { + struct dst_cache cache; + struct seg6_iptunnel_encap tuninfo[0]; +}; + +struct seg6_local_lwt; + +struct seg6_local_lwtunnel_ops { + int (*build_state)(struct seg6_local_lwt *, const void *, struct netlink_ext_ack *); + void (*destroy_state)(struct seg6_local_lwt *); +}; + +struct seg6_action_desc { + int action; + unsigned long attrs; + unsigned long optattrs; + int (*input)(struct sk_buff *, struct seg6_local_lwt *); + int static_headroom; + struct seg6_local_lwtunnel_ops slwt_ops; +}; + +enum seg6_end_dt_mode { + DT_INVALID_MODE = -22, + DT_LEGACY_MODE = 0, + DT_VRF_MODE = 1, +}; + +struct seg6_end_dt_info { + enum seg6_end_dt_mode mode; + struct net *net; + int vrf_ifindex; + int vrf_table; + u16 family; +}; + +struct seg6_flavors_info { + __u32 flv_ops; + __u8 lcblock_bits; + __u8 lcnode_func_bits; +}; + +struct pcpu_seg6_local_counters; + +struct seg6_local_lwt { + int action; + struct ipv6_sr_hdr *srh; + int table; + struct in_addr nh4; + struct in6_addr nh6; + int iif; + int oif; + struct bpf_lwt_prog bpf; + struct seg6_end_dt_info dt_info; + struct seg6_flavors_info flv_info; + struct pcpu_seg6_local_counters __attribute__((btf_type_tag("percpu"))) * pcpu_counters; + int headroom; + struct seg6_action_desc *desc; + unsigned long parsed_optattrs; +}; + +struct pcpu_seg6_local_counters { + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t errors; + struct u64_stats_sync syncp; +}; + +struct seg6_action_param { + int (*parse)(struct nlattr **, struct seg6_local_lwt *, struct netlink_ext_ack *); + int (*put)(struct sk_buff *, struct seg6_local_lwt *); + int (*cmp)(struct seg6_local_lwt *, struct seg6_local_lwt *); + void (*destroy)(struct seg6_local_lwt *); +}; + +enum { + SEG6_LOCAL_UNSPEC = 0, + SEG6_LOCAL_ACTION = 1, + SEG6_LOCAL_SRH = 2, + SEG6_LOCAL_TABLE = 3, + SEG6_LOCAL_NH4 = 4, + SEG6_LOCAL_NH6 = 5, + SEG6_LOCAL_IIF = 6, + SEG6_LOCAL_OIF = 7, + SEG6_LOCAL_BPF = 8, + SEG6_LOCAL_VRFTABLE = 9, + SEG6_LOCAL_COUNTERS = 10, + SEG6_LOCAL_FLAVORS = 11, + __SEG6_LOCAL_MAX = 12, +}; + +enum { + IP6_FH_F_FRAG = 1, + IP6_FH_F_AUTH = 2, + IP6_FH_F_SKIP_RH = 4, +}; + +enum { + SEG6_LOCAL_FLV_OP_UNSPEC = 0, + SEG6_LOCAL_FLV_OP_PSP = 1, + SEG6_LOCAL_FLV_OP_USP = 2, + SEG6_LOCAL_FLV_OP_USD = 3, + SEG6_LOCAL_FLV_OP_NEXT_CSID = 4, + __SEG6_LOCAL_FLV_OP_MAX = 5, +}; + +enum seg6_local_flv_action { + SEG6_LOCAL_FLV_ACT_UNSPEC = 0, + SEG6_LOCAL_FLV_ACT_END = 1, + SEG6_LOCAL_FLV_ACT_PSP = 2, + SEG6_LOCAL_FLV_ACT_USP = 3, + SEG6_LOCAL_FLV_ACT_USD = 4, + __SEG6_LOCAL_FLV_ACT_MAX = 5, +}; + +enum seg6_local_pktinfo { + SEG6_LOCAL_PKTINFO_NOHDR = 0, + SEG6_LOCAL_PKTINFO_SL_ZERO = 1, + SEG6_LOCAL_PKTINFO_SL_ONE = 2, + SEG6_LOCAL_PKTINFO_SL_MORE = 3, + __SEG6_LOCAL_PKTINFO_MAX = 4, +}; + +enum l3mdev_type { + L3MDEV_TYPE_UNSPEC = 0, + L3MDEV_TYPE_VRF = 1, + __L3MDEV_TYPE_MAX = 2, +}; + +enum { + SEG6_LOCAL_BPF_PROG_UNSPEC = 0, + SEG6_LOCAL_BPF_PROG = 1, + SEG6_LOCAL_BPF_PROG_NAME = 2, + __SEG6_LOCAL_BPF_PROG_MAX = 3, +}; + +enum { + SEG6_LOCAL_CNT_UNSPEC = 0, + SEG6_LOCAL_CNT_PAD = 1, + SEG6_LOCAL_CNT_PACKETS = 2, + SEG6_LOCAL_CNT_BYTES = 3, + SEG6_LOCAL_CNT_ERRORS = 4, + __SEG6_LOCAL_CNT_MAX = 5, +}; + +enum { + SEG6_LOCAL_FLV_UNSPEC = 0, + SEG6_LOCAL_FLV_OPERATION = 1, + SEG6_LOCAL_FLV_LCBLOCK_BITS = 2, + SEG6_LOCAL_FLV_LCNODE_FN_BITS = 3, + __SEG6_LOCAL_FLV_MAX = 4, +}; + +struct seg6_local_counters { + __u64 packets; + __u64 bytes; + __u64 errors; +}; + +struct seg6_hmac_algo { + u8 alg_id; + char name[64]; + struct crypto_shash *__attribute__((btf_type_tag("percpu"))) * tfms; + struct shash_desc *__attribute__((btf_type_tag("percpu"))) * shashs; +}; + +struct sr6_tlv_hmac { + struct sr6_tlv tlvhdr; + __u16 reserved; + __be32 hmackeyid; + __u8 hmac[32]; +}; + +struct _strp_msg { + struct strp_msg strp; + int accum_len; +}; + +enum nl80211_chan_width { + NL80211_CHAN_WIDTH_20_NOHT = 0, + NL80211_CHAN_WIDTH_20 = 1, + NL80211_CHAN_WIDTH_40 = 2, + NL80211_CHAN_WIDTH_80 = 3, + NL80211_CHAN_WIDTH_80P80 = 4, + NL80211_CHAN_WIDTH_160 = 5, + NL80211_CHAN_WIDTH_5 = 6, + NL80211_CHAN_WIDTH_10 = 7, + NL80211_CHAN_WIDTH_1 = 8, + NL80211_CHAN_WIDTH_2 = 9, + NL80211_CHAN_WIDTH_4 = 10, + NL80211_CHAN_WIDTH_8 = 11, + NL80211_CHAN_WIDTH_16 = 12, + NL80211_CHAN_WIDTH_320 = 13, +}; + +enum ieee80211_edmg_bw_config { + IEEE80211_EDMG_BW_CONFIG_4 = 4, + IEEE80211_EDMG_BW_CONFIG_5 = 5, + IEEE80211_EDMG_BW_CONFIG_6 = 6, + IEEE80211_EDMG_BW_CONFIG_7 = 7, + IEEE80211_EDMG_BW_CONFIG_8 = 8, + IEEE80211_EDMG_BW_CONFIG_9 = 9, + IEEE80211_EDMG_BW_CONFIG_10 = 10, + IEEE80211_EDMG_BW_CONFIG_11 = 11, + IEEE80211_EDMG_BW_CONFIG_12 = 12, + IEEE80211_EDMG_BW_CONFIG_13 = 13, + IEEE80211_EDMG_BW_CONFIG_14 = 14, + IEEE80211_EDMG_BW_CONFIG_15 = 15, +}; + +struct ieee80211_edmg { + u8 channels; + enum ieee80211_edmg_bw_config bw_config; +}; + +struct ieee80211_channel; + +struct cfg80211_chan_def { + struct ieee80211_channel *chan; + enum nl80211_chan_width width; + u32 center_freq1; + u32 center_freq2; + struct ieee80211_edmg edmg; + u16 freq1_offset; +}; + +struct ieee80211_mcs_info { + u8 rx_mask[10]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +}; + +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + struct ieee80211_mcs_info mcs; + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __attribute__((packed)); + +struct key_params; + +struct cfg80211_ibss_params { + const u8 *ssid; + const u8 *bssid; + struct cfg80211_chan_def chandef; + const u8 *ie; + u8 ssid_len; + u8 ie_len; + u16 beacon_interval; + u32 basic_rates; + bool channel_fixed; + bool privacy; + bool control_port; + bool control_port_over_nl80211; + bool userspace_handles_dfs; + int mcast_rate[6]; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct key_params *wep_keys; + int wep_tx_key; +}; + +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM = 0, + NL80211_AUTHTYPE_SHARED_KEY = 1, + NL80211_AUTHTYPE_FT = 2, + NL80211_AUTHTYPE_NETWORK_EAP = 3, + NL80211_AUTHTYPE_SAE = 4, + NL80211_AUTHTYPE_FILS_SK = 5, + NL80211_AUTHTYPE_FILS_SK_PFS = 6, + NL80211_AUTHTYPE_FILS_PK = 7, + __NL80211_AUTHTYPE_NUM = 8, + NL80211_AUTHTYPE_MAX = 7, + NL80211_AUTHTYPE_AUTOMATIC = 8, +}; + +enum nl80211_mfp { + NL80211_MFP_NO = 0, + NL80211_MFP_REQUIRED = 1, + NL80211_MFP_OPTIONAL = 2, +}; + +enum nl80211_sae_pwe_mechanism { + NL80211_SAE_PWE_UNSPECIFIED = 0, + NL80211_SAE_PWE_HUNT_AND_PECK = 1, + NL80211_SAE_PWE_HASH_TO_ELEMENT = 2, + NL80211_SAE_PWE_BOTH = 3, +}; + +struct cfg80211_crypto_settings { + u32 wpa_versions; + u32 cipher_group; + int n_ciphers_pairwise; + u32 ciphers_pairwise[5]; + int n_akm_suites; + u32 akm_suites[10]; + bool control_port; + __be16 control_port_ethertype; + bool control_port_no_encrypt; + bool control_port_over_nl80211; + bool control_port_no_preauth; + const u8 *psk; + const u8 *sae_pwd; + u8 sae_pwd_len; + enum nl80211_sae_pwe_mechanism sae_pwe; +}; + +struct ieee80211_vht_mcs_info { + __le16 rx_mcs_map; + __le16 rx_highest; + __le16 tx_mcs_map; + __le16 tx_highest; +}; + +struct ieee80211_vht_cap { + __le32 vht_cap_info; + struct ieee80211_vht_mcs_info supp_mcs; +}; + +enum nl80211_bss_select_attr { + __NL80211_BSS_SELECT_ATTR_INVALID = 0, + NL80211_BSS_SELECT_ATTR_RSSI = 1, + NL80211_BSS_SELECT_ATTR_BAND_PREF = 2, + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 3, + __NL80211_BSS_SELECT_ATTR_AFTER_LAST = 4, + NL80211_BSS_SELECT_ATTR_MAX = 3, +}; + +enum nl80211_band { + NL80211_BAND_2GHZ = 0, + NL80211_BAND_5GHZ = 1, + NL80211_BAND_60GHZ = 2, + NL80211_BAND_6GHZ = 3, + NL80211_BAND_S1GHZ = 4, + NL80211_BAND_LC = 5, + NUM_NL80211_BANDS = 6, +}; + +struct cfg80211_bss_select_adjust { + enum nl80211_band band; + s8 delta; +}; + +struct cfg80211_bss_selection { + enum nl80211_bss_select_attr behaviour; + union { + enum nl80211_band band_pref; + struct cfg80211_bss_select_adjust adjust; + } param; +}; + +struct cfg80211_connect_params { + struct ieee80211_channel *channel; + struct ieee80211_channel *channel_hint; + const u8 *bssid; + const u8 *bssid_hint; + const u8 *ssid; + size_t ssid_len; + enum nl80211_auth_type auth_type; + const u8 *ie; + size_t ie_len; + bool privacy; + enum nl80211_mfp mfp; + struct cfg80211_crypto_settings crypto; + const u8 *key; + u8 key_len; + u8 key_idx; + u32 flags; + int bg_scan_period; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; + bool pbss; + struct cfg80211_bss_selection bss_select; + const u8 *prev_bssid; + const u8 *fils_erp_username; + size_t fils_erp_username_len; + const u8 *fils_erp_realm; + size_t fils_erp_realm_len; + u16 fils_erp_next_seq_num; + const u8 *fils_erp_rrk; + size_t fils_erp_rrk_len; + bool want_1x; + struct ieee80211_edmg edmg; +}; + +struct cfg80211_cached_keys; + +struct cfg80211_internal_bss; + +enum nl80211_iftype { + NL80211_IFTYPE_UNSPECIFIED = 0, + NL80211_IFTYPE_ADHOC = 1, + NL80211_IFTYPE_STATION = 2, + NL80211_IFTYPE_AP = 3, + NL80211_IFTYPE_AP_VLAN = 4, + NL80211_IFTYPE_WDS = 5, + NL80211_IFTYPE_MONITOR = 6, + NL80211_IFTYPE_MESH_POINT = 7, + NL80211_IFTYPE_P2P_CLIENT = 8, + NL80211_IFTYPE_P2P_GO = 9, + NL80211_IFTYPE_P2P_DEVICE = 10, + NL80211_IFTYPE_OCB = 11, + NL80211_IFTYPE_NAN = 12, + NUM_NL80211_IFTYPES = 13, + NL80211_IFTYPE_MAX = 12, +}; + +struct cfg80211_conn; + +enum ieee80211_bss_type { + IEEE80211_BSS_TYPE_ESS = 0, + IEEE80211_BSS_TYPE_PBSS = 1, + IEEE80211_BSS_TYPE_IBSS = 2, + IEEE80211_BSS_TYPE_MBSS = 3, + IEEE80211_BSS_TYPE_ANY = 4, +}; + +struct wiphy; + +struct wiphy_work; + +typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *); + +struct wiphy_work { + struct list_head entry; + wiphy_work_func_t func; +}; + +struct cfg80211_cqm_config; + +struct wireless_dev { + struct wiphy *wiphy; + enum nl80211_iftype iftype; + struct list_head list; + struct net_device *netdev; + u32 identifier; + struct list_head mgmt_registrations; + u8 mgmt_registrations_need_update : 1; + bool use_4addr; + bool is_running; + bool registered; + bool registering; + short : 0; + u8 address[6]; + struct cfg80211_conn *conn; + struct cfg80211_cached_keys *connect_keys; + enum ieee80211_bss_type conn_bss_type; + u32 conn_owner_nlportid; + struct work_struct disconnect_wk; + u8 disconnect_bssid[6]; + struct list_head event_list; + spinlock_t event_lock; + u8 connected : 1; + bool ps; + int ps_timeout; + u32 ap_unexpected_nlportid; + u32 owner_nlportid; + bool nl_owner_dead; + bool cac_started; + unsigned long cac_start_time; + unsigned int cac_time_ms; + struct { + struct cfg80211_ibss_params ibss; + struct cfg80211_connect_params connect; + struct cfg80211_cached_keys *keys; + const u8 *ie; + size_t ie_len; + u8 bssid[6]; + u8 prev_bssid[6]; + u8 ssid[32]; + s8 default_key; + s8 default_mgmt_key; + bool prev_bssid_valid; + } wext; + struct wiphy_work cqm_rssi_work; + struct cfg80211_cqm_config __attribute__((btf_type_tag("rcu"))) * cqm_config; + struct list_head pmsr_list; + spinlock_t pmsr_lock; + struct work_struct pmsr_free_wk; + unsigned long unprot_beacon_reported; + union { + struct { + u8 connected_addr[6]; + u8 ssid[32]; + u8 ssid_len; + long : 0; + } client; + struct { + int beacon_interval; + struct cfg80211_chan_def preset_chandef; + struct cfg80211_chan_def chandef; + u8 id[32]; + u8 id_len; + u8 id_up_len; + } mesh; + struct { + struct cfg80211_chan_def preset_chandef; + u8 ssid[32]; + u8 ssid_len; + } ap; + struct { + struct cfg80211_internal_bss *current_bss; + struct cfg80211_chan_def chandef; + int beacon_interval; + u8 ssid[32]; + u8 ssid_len; + } ibss; + struct { + struct cfg80211_chan_def chandef; + } ocb; + } u; + struct { + u8 addr[6]; + union { + struct { + unsigned int beacon_interval; + struct cfg80211_chan_def chandef; + } ap; + struct { + struct cfg80211_internal_bss *current_bss; + } client; + }; + } links[15]; + u16 valid_links; +}; + +enum cfg80211_signal_type { + CFG80211_SIGNAL_TYPE_NONE = 0, + CFG80211_SIGNAL_TYPE_MBM = 1, + CFG80211_SIGNAL_TYPE_UNSPEC = 2, +}; + +struct rfkill; + +struct mac_address; + +struct ieee80211_txrx_stypes; + +struct ieee80211_iface_combination; + +struct wiphy_iftype_akm_suites; + +struct wiphy_wowlan_support; + +struct cfg80211_wowlan; + +struct wiphy_iftype_ext_capab; + +struct ieee80211_supported_band; + +struct regulatory_request; + +struct ieee80211_regdomain; + +struct iw_handler_def; + +struct wiphy_coalesce_support; + +struct wiphy_vendor_command; + +struct nl80211_vendor_cmd_info; + +struct cfg80211_pmsr_capabilities; + +struct cfg80211_sar_capa; + +struct wiphy { + struct mutex mtx; + u8 perm_addr[6]; + u8 addr_mask[6]; + struct mac_address *addresses; + const struct ieee80211_txrx_stypes *mgmt_stypes; + const struct ieee80211_iface_combination *iface_combinations; + int n_iface_combinations; + u16 software_iftypes; + u16 n_addresses; + u16 interface_modes; + u16 max_acl_mac_addrs; + u32 flags; + u32 regulatory_flags; + u32 features; + u8 ext_features[9]; + u32 ap_sme_capa; + enum cfg80211_signal_type signal_type; + int bss_priv_size; + u8 max_scan_ssids; + u8 max_sched_scan_reqs; + u8 max_sched_scan_ssids; + u8 max_match_sets; + u16 max_scan_ie_len; + u16 max_sched_scan_ie_len; + u32 max_sched_scan_plans; + u32 max_sched_scan_plan_interval; + u32 max_sched_scan_plan_iterations; + int n_cipher_suites; + const u32 *cipher_suites; + int n_akm_suites; + const u32 *akm_suites; + const struct wiphy_iftype_akm_suites *iftype_akm_suites; + unsigned int num_iftype_akm_suites; + u8 retry_short; + u8 retry_long; + u32 frag_threshold; + u32 rts_threshold; + u8 coverage_class; + char fw_version[32]; + u32 hw_version; + const struct wiphy_wowlan_support *wowlan; + struct cfg80211_wowlan *wowlan_config; + u16 max_remain_on_channel_duration; + u8 max_num_pmkids; + u32 available_antennas_tx; + u32 available_antennas_rx; + u32 probe_resp_offload; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; + const struct wiphy_iftype_ext_capab *iftype_ext_capab; + unsigned int num_iftype_ext_capab; + const void *privid; + struct ieee80211_supported_band *bands[6]; + void (*reg_notifier)(struct wiphy *, struct regulatory_request *); + const struct ieee80211_regdomain __attribute__((btf_type_tag("rcu"))) * regd; + struct device dev; + bool registered; + struct dentry *debugfsdir; + const struct ieee80211_ht_cap *ht_capa_mod_mask; + const struct ieee80211_vht_cap *vht_capa_mod_mask; + struct list_head wdev_list; + possible_net_t _net; + const struct iw_handler_def *wext; + const struct wiphy_coalesce_support *coalesce; + const struct wiphy_vendor_command *vendor_commands; + const struct nl80211_vendor_cmd_info *vendor_events; + int n_vendor_commands; + int n_vendor_events; + u16 max_ap_assoc_sta; + u8 max_num_csa_counters; + u32 bss_select_support; + u8 nan_supported_bands; + u32 txq_limit; + u32 txq_memory_limit; + u32 txq_quantum; + unsigned long tx_queue_len; + u8 support_mbssid : 1; + u8 support_only_he_mbssid : 1; + const struct cfg80211_pmsr_capabilities *pmsr_capa; + struct { + u64 peer; + u64 vif; + u8 max_retry; + } tid_config_support; + u8 max_data_retry_count; + const struct cfg80211_sar_capa *sar_capa; + struct rfkill *rfkill; + u8 mbssid_max_interfaces; + u8 ema_max_profile_periodicity; + u16 max_num_akm_suites; + u16 hw_timestamp_max_peers; + long : 0; + char priv[0]; +}; + +struct mac_address { + u8 addr[6]; +}; + +struct ieee80211_txrx_stypes { + u16 tx; + u16 rx; +}; + +struct ieee80211_iface_limit; + +struct ieee80211_iface_combination { + const struct ieee80211_iface_limit *limits; + u32 num_different_channels; + u16 max_interfaces; + u8 n_limits; + bool beacon_int_infra_match; + u8 radar_detect_widths; + u8 radar_detect_regions; + u32 beacon_int_min_gcd; +}; + +struct ieee80211_iface_limit { + u16 max; + u16 types; +}; + +struct wiphy_iftype_akm_suites { + u16 iftypes_mask; + const u32 *akm_suites; + int n_akm_suites; +}; + +struct wiphy_wowlan_tcp_support; + +struct wiphy_wowlan_support { + u32 flags; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; + int max_nd_match_sets; + const struct wiphy_wowlan_tcp_support *tcp; +}; + +struct nl80211_wowlan_tcp_data_token_feature; + +struct wiphy_wowlan_tcp_support { + const struct nl80211_wowlan_tcp_data_token_feature *tok; + u32 data_payload_max; + u32 data_interval_max; + u32 wake_payload_max; + bool seq; +}; + +struct nl80211_wowlan_tcp_data_token_feature { + __u32 min_len; + __u32 max_len; + __u32 bufsize; +}; + +struct cfg80211_pkt_pattern; + +struct cfg80211_wowlan_tcp; + +struct cfg80211_sched_scan_request; + +struct cfg80211_wowlan { + bool any; + bool disconnect; + bool magic_pkt; + bool gtk_rekey_failure; + bool eap_identity_req; + bool four_way_handshake; + bool rfkill_release; + struct cfg80211_pkt_pattern *patterns; + struct cfg80211_wowlan_tcp *tcp; + int n_patterns; + struct cfg80211_sched_scan_request *nd_config; +}; + +struct cfg80211_pkt_pattern { + const u8 *mask; + const u8 *pattern; + int pattern_len; + int pkt_offset; +}; + +struct nl80211_wowlan_tcp_data_seq { + __u32 start; + __u32 offset; + __u32 len; +}; + +struct nl80211_wowlan_tcp_data_token { + __u32 offset; + __u32 len; + __u8 token_stream[0]; +}; + +struct cfg80211_wowlan_tcp { + struct socket *sock; + __be32 src; + __be32 dst; + u16 src_port; + u16 dst_port; + u8 dst_mac[6]; + int payload_len; + const u8 *payload; + struct nl80211_wowlan_tcp_data_seq payload_seq; + u32 data_interval; + u32 wake_len; + const u8 *wake_data; + const u8 *wake_mask; + u32 tokens_size; + struct nl80211_wowlan_tcp_data_token payload_tok; +}; + +struct cfg80211_ssid; + +struct cfg80211_match_set; + +struct cfg80211_sched_scan_plan; + +struct cfg80211_sched_scan_request { + u64 reqid; + struct cfg80211_ssid *ssids; + int n_ssids; + u32 n_channels; + const u8 *ie; + size_t ie_len; + u32 flags; + struct cfg80211_match_set *match_sets; + int n_match_sets; + s32 min_rssi_thold; + u32 delay; + struct cfg80211_sched_scan_plan *scan_plans; + int n_scan_plans; + u8 mac_addr[6]; + u8 mac_addr_mask[6]; + bool relative_rssi_set; + s8 relative_rssi; + struct cfg80211_bss_select_adjust rssi_adjust; + struct wiphy *wiphy; + struct net_device *dev; + unsigned long scan_start; + bool report_results; + struct callback_head callback_head; + u32 owner_nlportid; + bool nl_owner_dead; + struct list_head list; + struct ieee80211_channel *channels[0]; +}; + +struct cfg80211_ssid { + u8 ssid[32]; + u8 ssid_len; +}; + +struct cfg80211_match_set { + struct cfg80211_ssid ssid; + u8 bssid[6]; + s32 rssi_thold; + s32 per_band_rssi_thold[6]; +}; + +struct cfg80211_sched_scan_plan { + u32 interval; + u32 iterations; +}; + +enum nl80211_dfs_state { + NL80211_DFS_USABLE = 0, + NL80211_DFS_UNAVAILABLE = 1, + NL80211_DFS_AVAILABLE = 2, +}; + +struct ieee80211_channel { + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + u16 hw_value; + u32 flags; + int max_antenna_gain; + int max_power; + int max_reg_power; + bool beacon_found; + u32 orig_flags; + int orig_mag; + int orig_mpwr; + enum nl80211_dfs_state dfs_state; + unsigned long dfs_state_entered; + unsigned int dfs_cac_ms; + s8 psd; +}; + +struct wiphy_iftype_ext_capab { + enum nl80211_iftype iftype; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; + u16 eml_capabilities; + u16 mld_capa_and_ops; +}; + +struct ieee80211_sta_ht_cap { + u16 cap; + bool ht_supported; + u8 ampdu_factor; + u8 ampdu_density; + struct ieee80211_mcs_info mcs; + short : 0; +} __attribute__((packed)); + +struct ieee80211_sta_vht_cap { + bool vht_supported; + u32 cap; + struct ieee80211_vht_mcs_info vht_mcs; +}; + +struct ieee80211_sta_s1g_cap { + bool s1g; + u8 cap[10]; + u8 nss_mcs[5]; +}; + +struct ieee80211_rate; + +struct ieee80211_sband_iftype_data; + +struct ieee80211_supported_band { + struct ieee80211_channel *channels; + struct ieee80211_rate *bitrates; + enum nl80211_band band; + int n_channels; + int n_bitrates; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_s1g_cap s1g_cap; + struct ieee80211_edmg edmg_cap; + u16 n_iftype_data; + const struct ieee80211_sband_iftype_data *iftype_data; +}; + +struct ieee80211_rate { + u32 flags; + u16 bitrate; + u16 hw_value; + u16 hw_value_short; +}; + +struct ieee80211_he_cap_elem { + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; +}; + +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +}; + +struct ieee80211_sta_he_cap { + bool has_he; + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + u8 ppe_thres[25]; +} __attribute__((packed)); + +struct ieee80211_he_6ghz_capa { + __le16 capa; +}; + +struct ieee80211_eht_cap_elem_fixed { + u8 mac_cap_info[2]; + u8 phy_cap_info[9]; +}; + +struct ieee80211_eht_mcs_nss_supp_20mhz_only { + union { + struct { + u8 rx_tx_mcs7_max_nss; + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; + }; + u8 rx_tx_max_nss[4]; + }; +}; + +struct ieee80211_eht_mcs_nss_supp_bw { + union { + struct { + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; + }; + u8 rx_tx_max_nss[3]; + }; +}; + +struct ieee80211_eht_mcs_nss_supp { + union { + struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz; + struct { + struct ieee80211_eht_mcs_nss_supp_bw _80; + struct ieee80211_eht_mcs_nss_supp_bw _160; + struct ieee80211_eht_mcs_nss_supp_bw _320; + } bw; + }; +}; + +struct ieee80211_sta_eht_cap { + bool has_eht; + struct ieee80211_eht_cap_elem_fixed eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp; + u8 eht_ppe_thres[32]; +}; + +struct ieee80211_sband_iftype_data { + u16 types_mask; + struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct ieee80211_sta_eht_cap eht_cap; + struct { + const u8 *data; + unsigned int len; + } vendor_elems; +} __attribute__((packed)); + +enum nl80211_reg_initiator { + NL80211_REGDOM_SET_BY_CORE = 0, + NL80211_REGDOM_SET_BY_USER = 1, + NL80211_REGDOM_SET_BY_DRIVER = 2, + NL80211_REGDOM_SET_BY_COUNTRY_IE = 3, +}; + +enum nl80211_user_reg_hint_type { + NL80211_USER_REG_HINT_USER = 0, + NL80211_USER_REG_HINT_CELL_BASE = 1, + NL80211_USER_REG_HINT_INDOOR = 2, +}; + +enum nl80211_dfs_regions { + NL80211_DFS_UNSET = 0, + NL80211_DFS_FCC = 1, + NL80211_DFS_ETSI = 2, + NL80211_DFS_JP = 3, +}; + +enum environment_cap { + ENVIRON_ANY = 0, + ENVIRON_INDOOR = 1, + ENVIRON_OUTDOOR = 2, +}; + +struct regulatory_request { + struct callback_head callback_head; + int wiphy_idx; + enum nl80211_reg_initiator initiator; + enum nl80211_user_reg_hint_type user_reg_hint_type; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + bool intersect; + bool processed; + enum environment_cap country_ie_env; + struct list_head list; +}; + +struct ieee80211_freq_range { + u32 start_freq_khz; + u32 end_freq_khz; + u32 max_bandwidth_khz; +}; + +struct ieee80211_power_rule { + u32 max_antenna_gain; + u32 max_eirp; +}; + +struct ieee80211_wmm_ac { + u16 cw_min; + u16 cw_max; + u16 cot; + u8 aifsn; +}; + +struct ieee80211_wmm_rule { + struct ieee80211_wmm_ac client[4]; + struct ieee80211_wmm_ac ap[4]; +}; + +struct ieee80211_reg_rule { + struct ieee80211_freq_range freq_range; + struct ieee80211_power_rule power_rule; + struct ieee80211_wmm_rule wmm_rule; + u32 flags; + u32 dfs_cac_ms; + bool has_wmm; + s8 psd; +}; + +struct ieee80211_regdomain { + struct callback_head callback_head; + u32 n_reg_rules; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + struct ieee80211_reg_rule reg_rules[0]; +}; + +struct iw_request_info; + +union iwreq_data; + +typedef int (*iw_handler)(struct net_device *, struct iw_request_info *, + union iwreq_data *, char *); + +struct iw_statistics; + +struct iw_handler_def { + const iw_handler *standard; + __u16 num_standard; + struct iw_statistics *(*get_wireless_stats)(struct net_device *); +}; + +struct iw_request_info { + __u16 cmd; + __u16 flags; +}; + +struct iw_point { + void __attribute__((btf_type_tag("user"))) * pointer; + __u16 length; + __u16 flags; +}; + +struct iw_param { + __s32 value; + __u8 fixed; + __u8 disabled; + __u16 flags; +}; + +struct iw_freq { + __s32 m; + __s16 e; + __u8 i; + __u8 flags; +}; + +struct iw_quality { + __u8 qual; + __u8 level; + __u8 noise; + __u8 updated; +}; + +union iwreq_data { + char name[16]; + struct iw_point essid; + struct iw_param nwid; + struct iw_freq freq; + struct iw_param sens; + struct iw_param bitrate; + struct iw_param txpower; + struct iw_param rts; + struct iw_param frag; + __u32 mode; + struct iw_param retry; + struct iw_point encoding; + struct iw_param power; + struct iw_quality qual; + struct sockaddr ap_addr; + struct sockaddr addr; + struct iw_param param; + struct iw_point data; +}; + +struct iw_discarded { + __u32 nwid; + __u32 code; + __u32 fragment; + __u32 retries; + __u32 misc; +}; + +struct iw_missed { + __u32 beacon; +}; + +struct iw_statistics { + __u16 status; + struct iw_quality qual; + struct iw_discarded discard; + struct iw_missed miss; +}; + +struct wiphy_coalesce_support { + int n_rules; + int max_delay; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; +}; + +struct nl80211_vendor_cmd_info { + __u32 vendor_id; + __u32 subcmd; +}; + +struct wiphy_vendor_command { + struct nl80211_vendor_cmd_info info; + u32 flags; + int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int); + int (*dumpit)(struct wiphy *, struct wireless_dev *, struct sk_buff *, + const void *, int, unsigned long *); + const struct nla_policy *policy; + unsigned int maxattr; +}; + +struct cfg80211_pmsr_capabilities { + unsigned int max_peers; + u8 report_ap_tsf : 1; + u8 randomize_mac_addr : 1; + struct { + u32 preambles; + u32 bandwidths; + s8 max_bursts_exponent; + u8 max_ftms_per_burst; + u8 supported : 1; + u8 asap : 1; + u8 non_asap : 1; + u8 request_lci : 1; + u8 request_civicloc : 1; + u8 trigger_based : 1; + u8 non_trigger_based : 1; + } ftm; +}; + +enum nl80211_sar_type { + NL80211_SAR_TYPE_POWER = 0, + NUM_NL80211_SAR_TYPE = 1, +}; + +struct cfg80211_sar_freq_ranges; + +struct cfg80211_sar_capa { + enum nl80211_sar_type type; + u32 num_freq_ranges; + const struct cfg80211_sar_freq_ranges *freq_ranges; +}; + +struct cfg80211_sar_freq_ranges { + u32 start_freq; + u32 end_freq; +}; + +enum nl80211_key_mode { + NL80211_KEY_RX_TX = 0, + NL80211_KEY_NO_TX = 1, + NL80211_KEY_SET_TX = 2, +}; + +struct key_params { + const u8 *key; + const u8 *seq; + int key_len; + int seq_len; + u16 vlan_id; + u32 cipher; + enum nl80211_key_mode mode; +}; + +struct iw_ioctl_description { + __u8 header_type; + __u8 token_type; + __u16 token_size; + __u16 min_tokens; + __u16 max_tokens; + __u32 flags; +}; + +enum wiphy_flags { + WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = 1, + WIPHY_FLAG_SUPPORTS_MLO = 2, + WIPHY_FLAG_SPLIT_SCAN_6GHZ = 4, + WIPHY_FLAG_NETNS_OK = 8, + WIPHY_FLAG_PS_ON_BY_DEFAULT = 16, + WIPHY_FLAG_4ADDR_AP = 32, + WIPHY_FLAG_4ADDR_STATION = 64, + WIPHY_FLAG_CONTROL_PORT_PROTOCOL = 128, + WIPHY_FLAG_IBSS_RSN = 256, + WIPHY_FLAG_MESH_AUTH = 1024, + WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = 2048, + WIPHY_FLAG_SUPPORTS_FW_ROAM = 8192, + WIPHY_FLAG_AP_UAPSD = 16384, + WIPHY_FLAG_SUPPORTS_TDLS = 32768, + WIPHY_FLAG_TDLS_EXTERNAL_SETUP = 65536, + WIPHY_FLAG_HAVE_AP_SME = 131072, + WIPHY_FLAG_REPORTS_OBSS = 262144, + WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = 524288, + WIPHY_FLAG_OFFCHAN_TX = 1048576, + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = 2097152, + WIPHY_FLAG_SUPPORTS_5_10_MHZ = 4194304, + WIPHY_FLAG_HAS_CHANNEL_SWITCH = 8388608, + WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = 16777216, + WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = 33554432, +}; + +struct compat_iw_point { + compat_caddr_t pointer; + __u16 length; + __u16 flags; +}; + +struct iwreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union iwreq_data u; +}; + +typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, + struct iw_request_info *, iw_handler); + +struct iw_event { + __u16 len; + __u16 cmd; + union iwreq_data u; +}; + +struct __compat_iw_event { + __u16 len; + __u16 cmd; + union { + compat_caddr_t pointer; + struct { + struct { + } __empty_ptr_bytes; + __u8 ptr_bytes[0]; + }; + }; +}; + +struct iw_encode_ext { + __u32 ext_flags; + __u8 tx_seq[8]; + __u8 rx_seq[8]; + struct sockaddr addr; + __u16 alg; + __u16 key_len; + __u8 key[0]; +}; + +enum p9_trans_status { + Connected = 0, + BeginDisconnect = 1, + Disconnected = 2, + Hung = 3, +}; + +struct p9_client; + +struct p9_req_t; + +struct p9_trans_module { + struct list_head list; + char *name; + int maxsize; + bool pooled_rbuffers; + int def; + struct module *owner; + int (*create)(struct p9_client *, const char *, char *); + void (*close)(struct p9_client *); + int (*request)(struct p9_client *, struct p9_req_t *); + int (*cancel)(struct p9_client *, struct p9_req_t *); + int (*cancelled)(struct p9_client *, struct p9_req_t *); + int (*zc_request)(struct p9_client *, struct p9_req_t *, struct iov_iter *, + struct iov_iter *, int, int, int); + int (*show_options)(struct seq_file *, struct p9_client *); +}; + +struct p9_client { + spinlock_t lock; + unsigned int msize; + unsigned char proto_version; + struct p9_trans_module *trans_mod; + enum p9_trans_status status; + void *trans; + struct kmem_cache *fcall_cache; + union { + struct { + int rfd; + int wfd; + } fd; + struct { + u16 port; + bool privport; + } tcp; + } trans_opts; + struct idr fids; + struct idr reqs; + char name[65]; +}; + +struct p9_fcall { + u32 size; + u8 id; + u16 tag; + size_t offset; + size_t capacity; + struct kmem_cache *cache; + u8 *sdata; + bool zc; +}; + +struct p9_req_t { + int status; + int t_err; + refcount_t refcount; + wait_queue_head_t wq; + struct p9_fcall tc; + struct p9_fcall rc; + struct list_head req_list; +}; + +typedef s8 int8_t; + +typedef void (*btf_trace_9p_client_req)(void *, struct p9_client *, int8_t, int); + +typedef void (*btf_trace_9p_client_res)(void *, struct p9_client *, int8_t, int, int); + +typedef void (*btf_trace_9p_protocol_dump)(void *, struct p9_client *, struct p9_fcall *); + +struct p9_fid; + +typedef void (*btf_trace_9p_fid_ref)(void *, struct p9_fid *, __u8); + +struct p9_qid { + u8 type; + u32 version; + u64 path; +}; + +struct p9_fid { + struct p9_client *clnt; + u32 fid; + refcount_t count; + int mode; + struct p9_qid qid; + u32 iounit; + kuid_t uid; + void *rdir; + struct hlist_node dlist; + struct hlist_node ilist; +}; + +enum p9_proto_versions { + p9_proto_legacy = 0, + p9_proto_2000u = 1, + p9_proto_2000L = 2, +}; + +enum p9_fid_reftype { + P9_FID_REF_CREATE = 0, + P9_FID_REF_GET = 1, + P9_FID_REF_PUT = 2, + P9_FID_REF_DESTROY = 3, +} __attribute__((mode(byte))); + +enum p9_msg_t { + P9_TLERROR = 6, + P9_RLERROR = 7, + P9_TSTATFS = 8, + P9_RSTATFS = 9, + P9_TLOPEN = 12, + P9_RLOPEN = 13, + P9_TLCREATE = 14, + P9_RLCREATE = 15, + P9_TSYMLINK = 16, + P9_RSYMLINK = 17, + P9_TMKNOD = 18, + P9_RMKNOD = 19, + P9_TRENAME = 20, + P9_RRENAME = 21, + P9_TREADLINK = 22, + P9_RREADLINK = 23, + P9_TGETATTR = 24, + P9_RGETATTR = 25, + P9_TSETATTR = 26, + P9_RSETATTR = 27, + P9_TXATTRWALK = 30, + P9_RXATTRWALK = 31, + P9_TXATTRCREATE = 32, + P9_RXATTRCREATE = 33, + P9_TREADDIR = 40, + P9_RREADDIR = 41, + P9_TFSYNC = 50, + P9_RFSYNC = 51, + P9_TLOCK = 52, + P9_RLOCK = 53, + P9_TGETLOCK = 54, + P9_RGETLOCK = 55, + P9_TLINK = 70, + P9_RLINK = 71, + P9_TMKDIR = 72, + P9_RMKDIR = 73, + P9_TRENAMEAT = 74, + P9_RRENAMEAT = 75, + P9_TUNLINKAT = 76, + P9_RUNLINKAT = 77, + P9_TVERSION = 100, + P9_RVERSION = 101, + P9_TAUTH = 102, + P9_RAUTH = 103, + P9_TATTACH = 104, + P9_RATTACH = 105, + P9_TERROR = 106, + P9_RERROR = 107, + P9_TFLUSH = 108, + P9_RFLUSH = 109, + P9_TWALK = 110, + P9_RWALK = 111, + P9_TOPEN = 112, + P9_ROPEN = 113, + P9_TCREATE = 114, + P9_RCREATE = 115, + P9_TREAD = 116, + P9_RREAD = 117, + P9_TWRITE = 118, + P9_RWRITE = 119, + P9_TCLUNK = 120, + P9_RCLUNK = 121, + P9_TREMOVE = 122, + P9_RREMOVE = 123, + P9_TSTAT = 124, + P9_RSTAT = 125, + P9_TWSTAT = 126, + P9_RWSTAT = 127, +}; + +enum p9_open_mode_t { + P9_OREAD = 0, + P9_OWRITE = 1, + P9_ORDWR = 2, + P9_OEXEC = 3, + P9_OTRUNC = 16, + P9_OREXEC = 32, + P9_ORCLOSE = 64, + P9_OAPPEND = 128, + P9_OEXCL = 4096, + P9L_MODE_MASK = 8191, + P9L_DIRECT = 8192, + P9L_NOWRITECACHE = 16384, + P9L_LOOSE = 32768, +}; + +enum { + Opt_msize = 0, + Opt_trans = 1, + Opt_legacy = 2, + Opt_version = 3, + Opt_err___5 = 4, +}; + +enum p9_req_status_t { + REQ_STATUS_ALLOC = 0, + REQ_STATUS_UNSENT = 1, + REQ_STATUS_SENT = 2, + REQ_STATUS_RCVD = 3, + REQ_STATUS_FLSHD = 4, + REQ_STATUS_ERROR = 5, +}; + +struct trace_event_raw_9p_client_req { + struct trace_entry ent; + void *clnt; + __u8 type; + __u32 tag; + char __data[0]; +}; + +struct trace_event_raw_9p_client_res { + struct trace_entry ent; + void *clnt; + __u8 type; + __u32 tag; + __u32 err; + char __data[0]; +}; + +struct trace_event_raw_9p_protocol_dump { + struct trace_entry ent; + void *clnt; + __u8 type; + __u16 tag; + u32 __data_loc_line; + char __data[0]; +}; + +struct trace_event_raw_9p_fid_ref { + struct trace_entry ent; + int fid; + int refcount; + __u8 type; + char __data[0]; +}; + +struct trace_event_data_offsets_9p_protocol_dump { + u32 line; +}; + +struct p9_wstat { + u16 size; + u16 type; + u32 dev; + struct p9_qid qid; + u32 mode; + u32 atime; + u32 mtime; + u64 length; + const char *name; + const char *uid; + const char *gid; + const char *muid; + char *extension; + kuid_t n_uid; + kgid_t n_gid; + kuid_t n_muid; +}; + +struct p9_stat_dotl { + u64 st_result_mask; + struct p9_qid qid; + u32 st_mode; + kuid_t st_uid; + kgid_t st_gid; + u64 st_nlink; + u64 st_rdev; + u64 st_size; + u64 st_blksize; + u64 st_blocks; + u64 st_atime_sec; + u64 st_atime_nsec; + u64 st_mtime_sec; + u64 st_mtime_nsec; + u64 st_ctime_sec; + u64 st_ctime_nsec; + u64 st_btime_sec; + u64 st_btime_nsec; + u64 st_gen; + u64 st_data_version; +}; + +struct trace_event_data_offsets_9p_client_req {}; + +struct trace_event_data_offsets_9p_client_res {}; + +struct trace_event_data_offsets_9p_fid_ref {}; + +struct p9_iattr_dotl { + u32 valid; + u32 mode; + kuid_t uid; + kgid_t gid; + u64 size; + u64 atime_sec; + u64 atime_nsec; + u64 mtime_sec; + u64 mtime_nsec; +}; + +struct p9_rstatfs { + u32 type; + u32 bsize; + u64 blocks; + u64 bfree; + u64 bavail; + u64 files; + u64 ffree; + u64 fsid; + u32 namelen; +}; + +struct p9_flock { + u8 type; + u32 flags; + u64 start; + u64 length; + u32 proc_id; + char *client_id; +}; + +struct p9_getlock { + u8 type; + u64 start; + u64 length; + u32 proc_id; + char *client_id; +}; + +struct errormap { + char *name; + int val; + int namelen; + struct hlist_node list; +}; + +struct p9_dirent { + struct p9_qid qid; + u64 d_off; + unsigned char d_type; + char d_name[256]; +}; + +enum { + Rworksched = 1, + Rpending = 2, + Wworksched = 4, + Wpending = 8, +}; + +enum { + Opt_port = 0, + Opt_rfdno = 1, + Opt_wfdno = 2, + Opt_err___6 = 3, + Opt_privport = 4, +}; + +struct p9_conn; + +struct p9_poll_wait { + struct p9_conn *conn; + wait_queue_entry_t wait; + wait_queue_head_t *wait_addr; +}; + +struct p9_conn { + struct list_head mux_list; + struct p9_client *client; + int err; + spinlock_t req_lock; + struct list_head req_list; + struct list_head unsent_req_list; + struct p9_req_t *rreq; + struct p9_req_t *wreq; + char tmp_buf[7]; + struct p9_fcall rc; + int wpos; + int wsize; + char *wbuf; + struct list_head poll_pending_link; + struct p9_poll_wait poll_wait[2]; + poll_table pt; + struct work_struct rq; + struct work_struct wq; + unsigned long wsched; +}; + +struct p9_trans_fd { + struct file *rd; + struct file *wr; + struct p9_conn conn; +}; + +struct p9_fd_opts { + int rfd; + int wfd; + u16 port; + bool privport; +}; + +typedef int (*lookup_by_table_id_t)(struct net *, u32); + +struct l3mdev_handler { + lookup_by_table_id_t dev_lookup; +}; + +struct xdp_ring; + +struct xsk_queue { + u32 ring_mask; + u32 nentries; + u32 cached_prod; + u32 cached_cons; + struct xdp_ring *ring; + u64 invalid_descs; + u64 queue_empty_descs; + size_t ring_vmalloc_size; +}; + +struct xdp_ring { + u32 producer; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + u32 pad1; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + u32 consumer; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + u32 pad2; + u32 flags; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + u32 pad3; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct xdp_rxtx_ring { + struct xdp_ring ptrs; + struct xdp_desc desc[0]; +}; + +struct xdp_umem_ring { + struct xdp_ring ptrs; + u64 desc[0]; +}; + +struct xsk_map; + +struct xsk_map_node { + struct list_head node; + struct xsk_map *map; + struct xdp_sock __attribute__((btf_type_tag("rcu"))) * *map_entry; +}; + +struct xsk_map { + struct bpf_map map; + spinlock_t lock; + atomic_t count; + struct xdp_sock __attribute__((btf_type_tag("rcu"))) * xsk_map[0]; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; + long : 64; +}; + +struct sockaddr_xdp { + __u16 sxdp_family; + __u16 sxdp_flags; + __u32 sxdp_ifindex; + __u32 sxdp_queue_id; + __u32 sxdp_shared_umem_fd; +}; + +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +struct parsed_desc { + u32 mb; + u32 valid; +}; + +struct xdp_umem_reg { + __u64 addr; + __u64 len; + __u32 chunk_size; + __u32 headroom; + __u32 flags; +}; + +struct xdp_statistics { + __u64 rx_dropped; + __u64 rx_invalid_descs; + __u64 tx_invalid_descs; + __u64 rx_ring_full; + __u64 rx_fill_ring_empty_descs; + __u64 tx_ring_empty_descs; +}; + +struct xdp_ring_offset { + __u64 producer; + __u64 consumer; + __u64 desc; + __u64 flags; +}; + +struct xdp_mmap_offsets { + struct xdp_ring_offset rx; + struct xdp_ring_offset tx; + struct xdp_ring_offset fr; + struct xdp_ring_offset cr; +}; + +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + +struct xdp_options { + __u32 flags; +}; + +struct xsk_dma_map { + dma_addr_t *dma_pages; + struct device *dev; + struct net_device *netdev; + refcount_t users; + struct list_head list; + u32 dma_pages_cnt; + bool dma_need_sync; +}; + +enum { + XDP_DIAG_NONE = 0, + XDP_DIAG_INFO = 1, + XDP_DIAG_UID = 2, + XDP_DIAG_RX_RING = 3, + XDP_DIAG_TX_RING = 4, + XDP_DIAG_UMEM = 5, + XDP_DIAG_UMEM_FILL_RING = 6, + XDP_DIAG_UMEM_COMPLETION_RING = 7, + XDP_DIAG_MEMINFO = 8, + XDP_DIAG_STATS = 9, + __XDP_DIAG_MAX = 10, +}; + +struct xdp_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u16 pad; + __u32 xdiag_ino; + __u32 xdiag_show; + __u32 xdiag_cookie[2]; +}; + +struct xdp_diag_msg { + __u8 xdiag_family; + __u8 xdiag_type; + __u16 pad; + __u32 xdiag_ino; + __u32 xdiag_cookie[2]; +}; + +struct xdp_diag_info { + __u32 ifindex; + __u32 queue_id; +}; + +struct xdp_diag_ring { + __u32 entries; +}; + +struct xdp_diag_umem { + __u64 size; + __u32 id; + __u32 num_pages; + __u32 chunk_size; + __u32 headroom; + __u32 ifindex; + __u32 queue_id; + __u32 flags; + __u32 refs; +}; + +struct xdp_diag_stats { + __u64 n_rx_dropped; + __u64 n_rx_invalid; + __u64 n_rx_full; + __u64 n_fill_ring_empty; + __u64 n_tx_invalid; + __u64 n_tx_ring_empty; +}; + +struct mptcp_subflow_context; + +typedef void (*btf_trace_mptcp_subflow_get_send)(void *, struct mptcp_subflow_context *); + +struct mptcp_subflow_context { + struct list_head node; + union { + struct { + unsigned long avg_pacing_rate; + u64 local_key; + u64 remote_key; + u64 idsn; + u64 map_seq; + u32 snd_isn; + u32 token; + u32 rel_write_seq; + u32 map_subflow_seq; + u32 ssn_offset; + u32 map_data_len; + __wsum map_data_csum; + u32 map_csum_len; + u32 request_mptcp : 1; + u32 request_join : 1; + u32 request_bkup : 1; + u32 mp_capable : 1; + u32 mp_join : 1; + u32 fully_established : 1; + u32 pm_notified : 1; + u32 conn_finished : 1; + u32 map_valid : 1; + u32 map_csum_reqd : 1; + u32 map_data_fin : 1; + u32 mpc_map : 1; + u32 backup : 1; + u32 send_mp_prio : 1; + u32 send_mp_fail : 1; + u32 send_fastclose : 1; + u32 send_infinite_map : 1; + u32 remote_key_valid : 1; + u32 disposable : 1; + u32 stale : 1; + u32 local_id_valid : 1; + u32 valid_csum_seen : 1; + u32 is_mptfo : 1; + u32 __unused : 9; + bool data_avail; + bool scheduled; + u32 remote_nonce; + u64 thmac; + u32 local_nonce; + u32 remote_token; + union { + u8 hmac[20]; + u64 iasn; + }; + u8 local_id; + u8 remote_id; + u8 reset_seen : 1; + u8 reset_transient : 1; + u8 reset_reason : 4; + u8 stale_count; + u32 subflow_id; + long delegated_status; + unsigned long fail_tout; + }; + struct { + unsigned long avg_pacing_rate; + u64 local_key; + u64 remote_key; + u64 idsn; + u64 map_seq; + u32 snd_isn; + u32 token; + u32 rel_write_seq; + u32 map_subflow_seq; + u32 ssn_offset; + u32 map_data_len; + __wsum map_data_csum; + u32 map_csum_len; + u32 request_mptcp : 1; + u32 request_join : 1; + u32 request_bkup : 1; + u32 mp_capable : 1; + u32 mp_join : 1; + u32 fully_established : 1; + u32 pm_notified : 1; + u32 conn_finished : 1; + u32 map_valid : 1; + u32 map_csum_reqd : 1; + u32 map_data_fin : 1; + u32 mpc_map : 1; + u32 backup : 1; + u32 send_mp_prio : 1; + u32 send_mp_fail : 1; + u32 send_fastclose : 1; + u32 send_infinite_map : 1; + u32 remote_key_valid : 1; + u32 disposable : 1; + u32 stale : 1; + u32 local_id_valid : 1; + u32 valid_csum_seen : 1; + u32 is_mptfo : 1; + u32 __unused : 9; + bool data_avail; + bool scheduled; + u32 remote_nonce; + u64 thmac; + u32 local_nonce; + u32 remote_token; + union { + u8 hmac[20]; + u64 iasn; + }; + u8 local_id; + u8 remote_id; + u8 reset_seen : 1; + u8 reset_transient : 1; + u8 reset_reason : 4; + u8 stale_count; + u32 subflow_id; + long delegated_status; + unsigned long fail_tout; + } reset; + }; + struct list_head delegated_node; + u32 setsockopt_seq; + u32 stale_rcv_tstamp; + int cached_sndbuf; + struct sock *tcp_sock; + struct sock *conn; + const struct inet_connection_sock_af_ops *icsk_af_ops; + void (*tcp_state_change)(struct sock *); + void (*tcp_error_report)(struct sock *); + struct callback_head rcu; +}; + +typedef void (*btf_trace_mptcp_sendmsg_frag)(void *, struct mptcp_ext *); + +typedef void (*btf_trace_get_mapping_status)(void *, struct mptcp_ext *); + +typedef void (*btf_trace_ack_update_msk)(void *, u64, u64, u64, u64, u64); + +typedef void (*btf_trace_subflow_check_data_avail)(void *, __u8, struct sk_buff *); + +struct mptcp_delegated_action { + struct napi_struct napi; + struct list_head head; +}; + +enum linux_mptcp_mib_field { + MPTCP_MIB_NUM = 0, + MPTCP_MIB_MPCAPABLEPASSIVE = 1, + MPTCP_MIB_MPCAPABLEACTIVE = 2, + MPTCP_MIB_MPCAPABLEACTIVEACK = 3, + MPTCP_MIB_MPCAPABLEPASSIVEACK = 4, + MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK = 5, + MPTCP_MIB_MPCAPABLEACTIVEFALLBACK = 6, + MPTCP_MIB_TOKENFALLBACKINIT = 7, + MPTCP_MIB_RETRANSSEGS = 8, + MPTCP_MIB_JOINNOTOKEN = 9, + MPTCP_MIB_JOINSYNRX = 10, + MPTCP_MIB_JOINSYNACKRX = 11, + MPTCP_MIB_JOINSYNACKMAC = 12, + MPTCP_MIB_JOINACKRX = 13, + MPTCP_MIB_JOINACKMAC = 14, + MPTCP_MIB_DSSNOMATCH = 15, + MPTCP_MIB_INFINITEMAPTX = 16, + MPTCP_MIB_INFINITEMAPRX = 17, + MPTCP_MIB_DSSTCPMISMATCH = 18, + MPTCP_MIB_DATACSUMERR = 19, + MPTCP_MIB_OFOQUEUETAIL = 20, + MPTCP_MIB_OFOQUEUE = 21, + MPTCP_MIB_OFOMERGE = 22, + MPTCP_MIB_NODSSWINDOW = 23, + MPTCP_MIB_DUPDATA = 24, + MPTCP_MIB_ADDADDR = 25, + MPTCP_MIB_ADDADDRTX = 26, + MPTCP_MIB_ADDADDRTXDROP = 27, + MPTCP_MIB_ECHOADD = 28, + MPTCP_MIB_ECHOADDTX = 29, + MPTCP_MIB_ECHOADDTXDROP = 30, + MPTCP_MIB_PORTADD = 31, + MPTCP_MIB_ADDADDRDROP = 32, + MPTCP_MIB_JOINPORTSYNRX = 33, + MPTCP_MIB_JOINPORTSYNACKRX = 34, + MPTCP_MIB_JOINPORTACKRX = 35, + MPTCP_MIB_MISMATCHPORTSYNRX = 36, + MPTCP_MIB_MISMATCHPORTACKRX = 37, + MPTCP_MIB_RMADDR = 38, + MPTCP_MIB_RMADDRDROP = 39, + MPTCP_MIB_RMADDRTX = 40, + MPTCP_MIB_RMADDRTXDROP = 41, + MPTCP_MIB_RMSUBFLOW = 42, + MPTCP_MIB_MPPRIOTX = 43, + MPTCP_MIB_MPPRIORX = 44, + MPTCP_MIB_MPFAILTX = 45, + MPTCP_MIB_MPFAILRX = 46, + MPTCP_MIB_MPFASTCLOSETX = 47, + MPTCP_MIB_MPFASTCLOSERX = 48, + MPTCP_MIB_MPRSTTX = 49, + MPTCP_MIB_MPRSTRX = 50, + MPTCP_MIB_RCVPRUNED = 51, + MPTCP_MIB_SUBFLOWSTALE = 52, + MPTCP_MIB_SUBFLOWRECOVER = 53, + MPTCP_MIB_SNDWNDSHARED = 54, + MPTCP_MIB_RCVWNDSHARED = 55, + MPTCP_MIB_RCVWNDCONFLICTUPDATE = 56, + MPTCP_MIB_RCVWNDCONFLICT = 57, + __MPTCP_MIB_MAX = 58, +}; + +enum mptcp_event_type { + MPTCP_EVENT_UNSPEC = 0, + MPTCP_EVENT_CREATED = 1, + MPTCP_EVENT_ESTABLISHED = 2, + MPTCP_EVENT_CLOSED = 3, + MPTCP_EVENT_ANNOUNCED = 6, + MPTCP_EVENT_REMOVED = 7, + MPTCP_EVENT_SUB_ESTABLISHED = 10, + MPTCP_EVENT_SUB_CLOSED = 11, + MPTCP_EVENT_SUB_PRIORITY = 13, + MPTCP_EVENT_LISTENER_CREATED = 15, + MPTCP_EVENT_LISTENER_CLOSED = 16, +}; + +enum { + MPTCP_CMSG_TS = 1, + MPTCP_CMSG_INQ = 2, +}; + +struct mptcp_pm_data { + struct mptcp_addr_info local; + struct mptcp_addr_info remote; + struct list_head anno_list; + struct list_head userspace_pm_local_addr_list; + spinlock_t lock; + u8 addr_signal; + bool server_side; + bool work_pending; + bool accept_addr; + bool accept_subflow; + bool remote_deny_join_id0; + u8 add_addr_signaled; + u8 add_addr_accepted; + u8 local_addr_used; + u8 pm_type; + u8 subflows; + u8 status; + unsigned long id_avail_bitmap[4]; + struct mptcp_rm_list rm_list_tx; + struct mptcp_rm_list rm_list_rx; +}; + +struct mptcp_data_frag; + +struct mptcp_sched_ops; + +struct mptcp_sock { + struct inet_connection_sock sk; + u64 local_key; + u64 remote_key; + u64 write_seq; + u64 bytes_sent; + u64 snd_nxt; + u64 bytes_received; + u64 ack_seq; + atomic64_t rcv_wnd_sent; + u64 rcv_data_fin_seq; + u64 bytes_retrans; + u64 bytes_consumed; + int rmem_fwd_alloc; + int snd_burst; + int old_wspace; + u64 recovery_snd_nxt; + u64 bytes_acked; + u64 snd_una; + u64 wnd_end; + unsigned long timer_ival; + u32 token; + int rmem_released; + unsigned long flags; + unsigned long cb_flags; + unsigned long push_pending; + bool recovery; + bool can_ack; + bool fully_established; + bool rcv_data_fin; + bool snd_data_fin_enable; + bool rcv_fastclose; + bool use_64bit_ack; + bool csum_enabled; + bool allow_infinite_fallback; + u8 pending_state; + u8 mpc_endpoint_id; + u8 recvmsg_inq : 1; + u8 cork : 1; + u8 nodelay : 1; + u8 fastopening : 1; + u8 in_accept_queue : 1; + u8 free_first : 1; + struct work_struct work; + struct sk_buff *ooo_last_skb; + struct rb_root out_of_order_queue; + struct sk_buff_head receive_queue; + struct list_head conn_list; + struct list_head rtx_queue; + struct mptcp_data_frag *first_pending; + struct list_head join_list; + struct sock *first; + struct mptcp_pm_data pm; + struct mptcp_sched_ops *sched; + struct { + u32 space; + u32 copied; + u64 time; + u64 rtt_us; + } rcvq_space; + u8 scaling_ratio; + u32 subflow_id; + u32 setsockopt_seq; + char ca_name[16]; +}; + +struct mptcp_data_frag { + struct list_head list; + u64 data_seq; + u16 data_len; + u16 offset; + u16 overhead; + u16 already_sent; + struct page *page; +}; + +struct mptcp_sched_data; + +struct mptcp_sched_ops { + int (*get_subflow)(struct mptcp_sock *, struct mptcp_sched_data *); + char name[16]; + struct module *owner; + struct list_head list; + void (*init)(struct mptcp_sock *); + void (*release)(struct mptcp_sock *); +}; + +struct mptcp_sched_data { + bool reinject; + u8 subflows; + struct mptcp_subflow_context *contexts[8]; +}; + +struct trace_event_raw_mptcp_subflow_get_send { + struct trace_entry ent; + bool active; + bool free; + u32 snd_wnd; + u32 pace; + u8 backup; + u64 ratio; + char __data[0]; +}; + +struct trace_event_raw_mptcp_dump_mpext { + struct trace_entry ent; + u64 data_ack; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + u16 csum; + u8 use_map; + u8 dsn64; + u8 data_fin; + u8 use_ack; + u8 ack64; + u8 mpc_map; + u8 frozen; + u8 reset_transient; + u8 reset_reason; + u8 csum_reqd; + u8 infinite_map; + char __data[0]; +}; + +struct trace_event_raw_ack_update_msk { + struct trace_entry ent; + u64 data_ack; + u64 old_snd_una; + u64 new_snd_una; + u64 new_wnd_end; + u64 msk_wnd_end; + char __data[0]; +}; + +struct trace_event_raw_subflow_check_data_avail { + struct trace_entry ent; + u8 status; + const void *skb; + char __data[0]; +}; + +struct mptcp_skb_cb { + u64 map_seq; + u64 end_seq; + u32 offset; + u8 has_rxtstamp : 1; +}; + +struct mptcp_subflow_request_sock { + struct tcp_request_sock sk; + u16 mp_capable : 1; + u16 mp_join : 1; + u16 backup : 1; + u16 csum_reqd : 1; + u16 allow_join_id0 : 1; + u8 local_id; + u8 remote_id; + u64 local_key; + u64 idsn; + u32 token; + u32 ssn_offset; + u64 thmac; + u32 local_nonce; + u32 remote_nonce; + struct mptcp_sock *msk; + struct hlist_nulls_node token_node; +}; + +struct mptcp_sendmsg_info { + int mss_now; + int size_goal; + u16 limit; + u16 sent; + unsigned int flags; + bool data_lock_held; +}; + +struct trace_event_data_offsets_mptcp_subflow_get_send {}; + +struct trace_event_data_offsets_mptcp_dump_mpext {}; + +struct trace_event_data_offsets_ack_update_msk {}; + +struct trace_event_data_offsets_subflow_check_data_avail {}; + +struct subflow_send_info { + struct sock *ssk; + u64 linger_time; +}; + +struct mptcp_options_received { + u64 sndr_key; + u64 rcvr_key; + u64 data_ack; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + __sum16 csum; + u16 suboptions; + u32 token; + u32 nonce; + u16 use_map : 1; + u16 dsn64 : 1; + u16 data_fin : 1; + u16 use_ack : 1; + u16 ack64 : 1; + u16 mpc_map : 1; + u16 reset_reason : 4; + u16 reset_transient : 1; + u16 echo : 1; + u16 backup : 1; + u16 deny_join_id0 : 1; + u16 __unused : 2; + u8 join_id; + u64 thmac; + u8 hmac[20]; + struct mptcp_addr_info addr; + struct mptcp_rm_list rm_list; + u64 ahmac; + u64 fail_seq; +}; + +enum mapping_status { + MAPPING_OK = 0, + MAPPING_INVALID = 1, + MAPPING_EMPTY = 2, + MAPPING_DATA_FIN = 3, + MAPPING_DUMMY = 4, + MAPPING_BAD_CSUM = 5, +}; + +enum mptcp_pm_type { + MPTCP_PM_TYPE_KERNEL = 0, + MPTCP_PM_TYPE_USERSPACE = 1, + __MPTCP_PM_TYPE_NR = 2, + __MPTCP_PM_TYPE_MAX = 1, +}; + +enum mptcp_addr_signal_status { + MPTCP_ADD_ADDR_SIGNAL = 0, + MPTCP_ADD_ADDR_ECHO = 1, + MPTCP_RM_ADDR_SIGNAL = 2, +}; + +struct csum_pseudo_header { + __be64 data_seq; + __be32 subflow_seq; + __be16 data_len; + __sum16 csum; +}; + +struct token_bucket { + spinlock_t lock; + int chain_len; + struct hlist_nulls_head req_chain; + struct hlist_nulls_head msk_chain; +}; + +struct mptcp_pernet { + struct ctl_table_header *ctl_table_hdr; + unsigned int add_addr_timeout; + unsigned int close_timeout; + unsigned int stale_loss_cnt; + u8 mptcp_enabled; + u8 checksum_enabled; + u8 allow_join_initial_addr_port; + u8 pm_type; + char scheduler[16]; +}; + +enum mptcp_pm_status { + MPTCP_PM_ADD_ADDR_RECEIVED = 0, + MPTCP_PM_ADD_ADDR_SEND_ACK = 1, + MPTCP_PM_RM_ADDR_RECEIVED = 2, + MPTCP_PM_ESTABLISHED = 3, + MPTCP_PM_SUBFLOW_ESTABLISHED = 4, + MPTCP_PM_ALREADY_ESTABLISHED = 5, + MPTCP_PM_MPC_ENDPOINT_ACCOUNTED = 6, +}; + +struct mptcp_pm_addr_entry { + struct list_head list; + struct mptcp_addr_info addr; + u8 flags; + int ifindex; + struct socket *lsk; +}; + +enum { + INET_ULP_INFO_UNSPEC = 0, + INET_ULP_INFO_NAME = 1, + INET_ULP_INFO_TLS = 2, + INET_ULP_INFO_MPTCP = 3, + __INET_ULP_INFO_MAX = 4, +}; + +enum { + MPTCP_SUBFLOW_ATTR_UNSPEC = 0, + MPTCP_SUBFLOW_ATTR_TOKEN_REM = 1, + MPTCP_SUBFLOW_ATTR_TOKEN_LOC = 2, + MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ = 3, + MPTCP_SUBFLOW_ATTR_MAP_SEQ = 4, + MPTCP_SUBFLOW_ATTR_MAP_SFSEQ = 5, + MPTCP_SUBFLOW_ATTR_SSN_OFFSET = 6, + MPTCP_SUBFLOW_ATTR_MAP_DATALEN = 7, + MPTCP_SUBFLOW_ATTR_FLAGS = 8, + MPTCP_SUBFLOW_ATTR_ID_REM = 9, + MPTCP_SUBFLOW_ATTR_ID_LOC = 10, + MPTCP_SUBFLOW_ATTR_PAD = 11, + __MPTCP_SUBFLOW_ATTR_MAX = 12, +}; + +enum { + MPTCP_PM_ADDR_ATTR_UNSPEC = 0, + MPTCP_PM_ADDR_ATTR_FAMILY = 1, + MPTCP_PM_ADDR_ATTR_ID = 2, + MPTCP_PM_ADDR_ATTR_ADDR4 = 3, + MPTCP_PM_ADDR_ATTR_ADDR6 = 4, + MPTCP_PM_ADDR_ATTR_PORT = 5, + MPTCP_PM_ADDR_ATTR_FLAGS = 6, + MPTCP_PM_ADDR_ATTR_IF_IDX = 7, + __MPTCP_PM_ADDR_ATTR_MAX = 8, +}; + +enum { + MPTCP_PM_ENDPOINT_ADDR = 1, + __MPTCP_PM_ENDPOINT_MAX = 2, +}; + +enum { + MPTCP_PM_CMD_UNSPEC = 0, + MPTCP_PM_CMD_ADD_ADDR = 1, + MPTCP_PM_CMD_DEL_ADDR = 2, + MPTCP_PM_CMD_GET_ADDR = 3, + MPTCP_PM_CMD_FLUSH_ADDRS = 4, + MPTCP_PM_CMD_SET_LIMITS = 5, + MPTCP_PM_CMD_GET_LIMITS = 6, + MPTCP_PM_CMD_SET_FLAGS = 7, + MPTCP_PM_CMD_ANNOUNCE = 8, + MPTCP_PM_CMD_REMOVE = 9, + MPTCP_PM_CMD_SUBFLOW_CREATE = 10, + MPTCP_PM_CMD_SUBFLOW_DESTROY = 11, + __MPTCP_PM_CMD_AFTER_LAST = 12, +}; + +enum { + MPTCP_PM_ATTR_UNSPEC = 0, + MPTCP_PM_ATTR_ADDR = 1, + MPTCP_PM_ATTR_RCV_ADD_ADDRS = 2, + MPTCP_PM_ATTR_SUBFLOWS = 3, + MPTCP_PM_ATTR_TOKEN = 4, + MPTCP_PM_ATTR_LOC_ID = 5, + MPTCP_PM_ATTR_ADDR_REMOTE = 6, + __MPTCP_ATTR_AFTER_LAST = 7, +}; + +enum mptcp_event_attr { + MPTCP_ATTR_UNSPEC = 0, + MPTCP_ATTR_TOKEN = 1, + MPTCP_ATTR_FAMILY = 2, + MPTCP_ATTR_LOC_ID = 3, + MPTCP_ATTR_REM_ID = 4, + MPTCP_ATTR_SADDR4 = 5, + MPTCP_ATTR_SADDR6 = 6, + MPTCP_ATTR_DADDR4 = 7, + MPTCP_ATTR_DADDR6 = 8, + MPTCP_ATTR_SPORT = 9, + MPTCP_ATTR_DPORT = 10, + MPTCP_ATTR_BACKUP = 11, + MPTCP_ATTR_ERROR = 12, + MPTCP_ATTR_FLAGS = 13, + MPTCP_ATTR_TIMEOUT = 14, + MPTCP_ATTR_IF_IDX = 15, + MPTCP_ATTR_RESET_REASON = 16, + MPTCP_ATTR_RESET_FLAGS = 17, + MPTCP_ATTR_SERVER_SIDE = 18, + __MPTCP_ATTR_MAX = 19, +}; + +struct mptcp_pm_add_entry { + struct list_head list; + struct mptcp_addr_info addr; + u8 retrans_times; + struct timer_list add_timer; + struct mptcp_sock *sock; +}; + +struct pm_nl_pernet { + spinlock_t lock; + struct list_head local_addr_list; + unsigned int addrs; + unsigned int stale_loss_cnt; + unsigned int add_addr_signal_max; + unsigned int add_addr_accept_max; + unsigned int local_addr_max; + unsigned int subflows_max; + unsigned int next_id; + unsigned long id_bitmap[4]; +}; + +struct mptcp_subflow_data { + __u32 size_subflow_data; + __u32 num_subflows; + __u32 size_kernel; + __u32 size_user; +}; + +struct mptcp_info { + __u8 mptcpi_subflows; + __u8 mptcpi_add_addr_signal; + __u8 mptcpi_add_addr_accepted; + __u8 mptcpi_subflows_max; + __u8 mptcpi_add_addr_signal_max; + __u8 mptcpi_add_addr_accepted_max; + __u32 mptcpi_flags; + __u32 mptcpi_token; + __u64 mptcpi_write_seq; + __u64 mptcpi_snd_una; + __u64 mptcpi_rcv_nxt; + __u8 mptcpi_local_addr_used; + __u8 mptcpi_local_addr_max; + __u8 mptcpi_csum_enabled; + __u32 mptcpi_retransmits; + __u64 mptcpi_bytes_retrans; + __u64 mptcpi_bytes_sent; + __u64 mptcpi_bytes_received; + __u64 mptcpi_bytes_acked; +}; + +struct mptcp_full_info { + __u32 size_tcpinfo_kernel; + __u32 size_tcpinfo_user; + __u32 size_sfinfo_kernel; + __u32 size_sfinfo_user; + __u32 num_subflows; + __u32 size_arrays_user; + __u64 subflow_info; + __u64 tcp_info; + struct mptcp_info mptcp_info; +}; + +struct mptcp_subflow_addrs { + union { + __kernel_sa_family_t sa_family; + struct sockaddr sa_local; + struct sockaddr_in sin_local; + struct sockaddr_in6 sin6_local; + struct __kernel_sockaddr_storage ss_local; + }; + union { + struct sockaddr sa_remote; + struct sockaddr_in sin_remote; + struct sockaddr_in6 sin6_remote; + struct __kernel_sockaddr_storage ss_remote; + }; +}; + +struct mptcp_subflow_info { + __u32 id; + struct mptcp_subflow_addrs addrs; +}; + +struct join_entry { + u32 token; + u32 remote_nonce; + u32 local_nonce; + u8 join_id; + u8 local_id; + u8 backup; + u8 valid; +}; + +enum { + TLS_RECORD_TYPE_CHANGE_CIPHER_SPEC = 20, + TLS_RECORD_TYPE_ALERT = 21, + TLS_RECORD_TYPE_HANDSHAKE = 22, + TLS_RECORD_TYPE_DATA = 23, + TLS_RECORD_TYPE_HEARTBEAT = 24, + TLS_RECORD_TYPE_TLS12_CID = 25, + TLS_RECORD_TYPE_ACK = 26, +}; + +enum hp_flags_bits { + HANDSHAKE_F_PROTO_NOTIFY = 0, +}; + +enum { + HANDSHAKE_CMD_READY = 1, + HANDSHAKE_CMD_ACCEPT = 2, + HANDSHAKE_CMD_DONE = 3, + __HANDSHAKE_CMD_MAX = 4, + HANDSHAKE_CMD_MAX = 3, +}; + +enum { + HANDSHAKE_A_ACCEPT_SOCKFD = 1, + HANDSHAKE_A_ACCEPT_HANDLER_CLASS = 2, + HANDSHAKE_A_ACCEPT_MESSAGE_TYPE = 3, + HANDSHAKE_A_ACCEPT_TIMEOUT = 4, + HANDSHAKE_A_ACCEPT_AUTH_MODE = 5, + HANDSHAKE_A_ACCEPT_PEER_IDENTITY = 6, + HANDSHAKE_A_ACCEPT_CERTIFICATE = 7, + HANDSHAKE_A_ACCEPT_PEERNAME = 8, + __HANDSHAKE_A_ACCEPT_MAX = 9, + HANDSHAKE_A_ACCEPT_MAX = 8, +}; + +enum { + HANDSHAKE_A_DONE_STATUS = 1, + HANDSHAKE_A_DONE_SOCKFD = 2, + HANDSHAKE_A_DONE_REMOTE_AUTH = 3, + __HANDSHAKE_A_DONE_MAX = 4, + HANDSHAKE_A_DONE_MAX = 3, +}; + +enum hn_flags_bits { + HANDSHAKE_F_NET_DRAINING = 0, +}; + +struct handshake_proto; + +struct handshake_req { + struct list_head hr_list; + struct rhash_head hr_rhash; + unsigned long hr_flags; + const struct handshake_proto *hr_proto; + struct sock *hr_sk; + void (*hr_odestruct)(struct sock *); + char hr_priv[0]; +}; + +struct handshake_proto { + int hp_handler_class; + size_t hp_privsize; + unsigned long hp_flags; + int (*hp_accept)(struct handshake_req *, struct genl_info *, int); + void (*hp_done)(struct handshake_req *, unsigned int, struct genl_info *); + void (*hp_destroy)(struct handshake_req *); +}; + +struct handshake_net { + spinlock_t hn_lock; + int hn_pending; + int hn_pending_max; + struct list_head hn_requests; + unsigned long hn_flags; +}; + +enum handshake_handler_class { + HANDSHAKE_HANDLER_CLASS_NONE = 0, + HANDSHAKE_HANDLER_CLASS_TLSHD = 1, + HANDSHAKE_HANDLER_CLASS_MAX = 2, +}; + +enum hr_flags_bits { + HANDSHAKE_F_REQ_COMPLETED = 0, + HANDSHAKE_F_REQ_SESSION = 1, +}; + +enum handshake_msg_type { + HANDSHAKE_MSG_TYPE_UNSPEC = 0, + HANDSHAKE_MSG_TYPE_CLIENTHELLO = 1, + HANDSHAKE_MSG_TYPE_SERVERHELLO = 2, +}; + +enum handshake_auth { + HANDSHAKE_AUTH_UNSPEC = 0, + HANDSHAKE_AUTH_UNAUTH = 1, + HANDSHAKE_AUTH_PSK = 2, + HANDSHAKE_AUTH_X509 = 3, +}; + +enum { + TLS_ALERT_LEVEL_WARNING = 1, + TLS_ALERT_LEVEL_FATAL = 2, +}; + +enum { + TLS_ALERT_DESC_CLOSE_NOTIFY = 0, + TLS_ALERT_DESC_UNEXPECTED_MESSAGE = 10, + TLS_ALERT_DESC_BAD_RECORD_MAC = 20, + TLS_ALERT_DESC_RECORD_OVERFLOW = 22, + TLS_ALERT_DESC_HANDSHAKE_FAILURE = 40, + TLS_ALERT_DESC_BAD_CERTIFICATE = 42, + TLS_ALERT_DESC_UNSUPPORTED_CERTIFICATE = 43, + TLS_ALERT_DESC_CERTIFICATE_REVOKED = 44, + TLS_ALERT_DESC_CERTIFICATE_EXPIRED = 45, + TLS_ALERT_DESC_CERTIFICATE_UNKNOWN = 46, + TLS_ALERT_DESC_ILLEGAL_PARAMETER = 47, + TLS_ALERT_DESC_UNKNOWN_CA = 48, + TLS_ALERT_DESC_ACCESS_DENIED = 49, + TLS_ALERT_DESC_DECODE_ERROR = 50, + TLS_ALERT_DESC_DECRYPT_ERROR = 51, + TLS_ALERT_DESC_TOO_MANY_CIDS_REQUESTED = 52, + TLS_ALERT_DESC_PROTOCOL_VERSION = 70, + TLS_ALERT_DESC_INSUFFICIENT_SECURITY = 71, + TLS_ALERT_DESC_INTERNAL_ERROR = 80, + TLS_ALERT_DESC_INAPPROPRIATE_FALLBACK = 86, + TLS_ALERT_DESC_USER_CANCELED = 90, + TLS_ALERT_DESC_MISSING_EXTENSION = 109, + TLS_ALERT_DESC_UNSUPPORTED_EXTENSION = 110, + TLS_ALERT_DESC_UNRECOGNIZED_NAME = 112, + TLS_ALERT_DESC_BAD_CERTIFICATE_STATUS_RESPONSE = 113, + TLS_ALERT_DESC_UNKNOWN_PSK_IDENTITY = 115, + TLS_ALERT_DESC_CERTIFICATE_REQUIRED = 116, + TLS_ALERT_DESC_NO_APPLICATION_PROTOCOL = 120, +}; + +enum { + TLS_NO_KEYRING = 0, + TLS_NO_PEERID = 0, + TLS_NO_CERT = 0, + TLS_NO_PRIVKEY = 0, +}; + +enum { + HANDSHAKE_A_X509_CERT = 1, + HANDSHAKE_A_X509_PRIVKEY = 2, + __HANDSHAKE_A_X509_MAX = 3, + HANDSHAKE_A_X509_MAX = 2, +}; + +struct tls_handshake_req { + void (*th_consumer_done)(void *, int, key_serial_t); + void *th_consumer_data; + int th_type; + unsigned int th_timeout_ms; + int th_auth_mode; + const char *th_peername; + key_serial_t th_keyring; + key_serial_t th_certificate; + key_serial_t th_privkey; + unsigned int th_num_peerids; + key_serial_t th_peerid[5]; +}; + +typedef void (*tls_done_func_t)(void *, int, key_serial_t); + +struct tls_handshake_args { + struct socket *ta_sock; + tls_done_func_t ta_done; + void *ta_data; + const char *ta_peername; + unsigned int ta_timeout_ms; + key_serial_t ta_keyring; + key_serial_t ta_my_cert; + key_serial_t ta_my_privkey; + unsigned int ta_num_peerids; + key_serial_t ta_my_peerids[5]; +}; + +typedef void (*btf_trace_handshake_submit)(void *, const struct net *, + const struct handshake_req *, const struct sock *); + +typedef void (*btf_trace_handshake_submit_err)(void *, const struct net *, + const struct handshake_req *, + const struct sock *, int); + +typedef void (*btf_trace_handshake_cancel)(void *, const struct net *, + const struct handshake_req *, const struct sock *); + +typedef void (*btf_trace_handshake_cancel_none)(void *, const struct net *, + const struct handshake_req *, + const struct sock *); + +typedef void (*btf_trace_handshake_cancel_busy)(void *, const struct net *, + const struct handshake_req *, + const struct sock *); + +typedef void (*btf_trace_handshake_destruct)(void *, const struct net *, + const struct handshake_req *, + const struct sock *); + +typedef void (*btf_trace_handshake_complete)(void *, const struct net *, + const struct handshake_req *, + const struct sock *, int); + +typedef void (*btf_trace_handshake_notify_err)(void *, const struct net *, + const struct handshake_req *, + const struct sock *, int); + +typedef void (*btf_trace_handshake_cmd_accept)(void *, const struct net *, + const struct handshake_req *, + const struct sock *, int); + +typedef void (*btf_trace_handshake_cmd_accept_err)(void *, const struct net *, + const struct handshake_req *, + const struct sock *, int); + +typedef void (*btf_trace_handshake_cmd_done)(void *, const struct net *, + const struct handshake_req *, + const struct sock *, int); + +typedef void (*btf_trace_handshake_cmd_done_err)(void *, const struct net *, + const struct handshake_req *, + const struct sock *, int); + +typedef void (*btf_trace_tls_contenttype)(void *, const struct sock *, unsigned char); + +typedef void (*btf_trace_tls_alert_send)(void *, const struct sock *, unsigned char, + unsigned char); + +typedef void (*btf_trace_tls_alert_recv)(void *, const struct sock *, unsigned char, + unsigned char); + +struct trace_event_raw_handshake_event_class { + struct trace_entry ent; + const void *req; + const void *sk; + unsigned int netns_ino; + char __data[0]; +}; + +struct trace_event_raw_handshake_error_class { + struct trace_entry ent; + const void *req; + const void *sk; + int err; + unsigned int netns_ino; + char __data[0]; +}; + +struct trace_event_raw_handshake_complete { + struct trace_entry ent; + const void *req; + const void *sk; + int status; + unsigned int netns_ino; + char __data[0]; +}; + +struct trace_event_raw_handshake_fd_class { + struct trace_entry ent; + const void *req; + const void *sk; + int fd; + unsigned int netns_ino; + char __data[0]; +}; + +struct trace_event_raw_tls_contenttype { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + unsigned int netns_ino; + unsigned long type; + char __data[0]; +}; + +struct trace_event_raw_handshake_alert_class { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + unsigned int netns_ino; + unsigned long level; + unsigned long description; + char __data[0]; +}; + +struct trace_event_data_offsets_handshake_event_class {}; + +struct trace_event_data_offsets_handshake_fd_class {}; + +struct trace_event_data_offsets_handshake_error_class {}; + +struct trace_event_data_offsets_handshake_alert_class {}; + +struct trace_event_data_offsets_handshake_complete {}; + +struct trace_event_data_offsets_tls_contenttype {}; + +struct pcibios_fwaddrmap { + struct list_head list; + struct pci_dev *dev; + resource_size_t fw_addr[17]; +}; + +struct pci_check_idx_range { + int start; + int end; +}; + +struct pci_raw_ops { + int (*read)(unsigned int, unsigned int, unsigned int, int, int, u32 *); + int (*write)(unsigned int, unsigned int, unsigned int, int, int, u32); +}; + +struct pci_mmcfg_region { + struct list_head list; + struct resource res; + u64 address; + char *virt; + u16 segment; + u8 start_bus; + u8 end_bus; + char name[30]; +}; + +struct pci_mmcfg_hostbridge_probe { + u32 bus; + u32 devfn; + u32 vendor; + u32 device; + const char *(*probe)(); +}; + +struct acpi_table_mcfg { + struct acpi_table_header header; + u8 reserved[8]; +}; + +struct acpi_mcfg_allocation { + u64 address; + u16 pci_segment; + u8 start_bus_number; + u8 end_bus_number; + u32 reserved; +}; + +typedef bool (*check_reserved_t)(u64, u64, enum e820_type); + +struct pci_root_info { + struct acpi_pci_root_info common; + struct pci_sysdata sd; + bool mcfg_added; + u8 start_bus; + u8 end_bus; +}; + +struct irq_info___2 { + u8 bus; + u8 devfn; + struct { + u8 link; + u16 bitmap; + } __attribute__((packed)) irq[4]; + u8 slot; + u8 rfu; +}; + +struct irq_routing_table { + u32 signature; + u16 version; + u16 size; + u8 rtr_bus; + u8 rtr_devfn; + u16 exclusive_irqs; + u16 rtr_vendor; + u16 rtr_device; + u32 miniport_data; + u8 rfu[11]; + u8 checksum; + struct irq_info___2 slots[0]; +}; + +struct irq_router { + char *name; + u16 vendor; + u16 device; + int (*get)(struct pci_dev *, struct pci_dev *, int); + int (*set)(struct pci_dev *, struct pci_dev *, int, int); + int (*lvl)(struct pci_dev *, struct pci_dev *, int, int); +}; + +struct irq_router_handler { + u16 vendor; + int (*probe)(struct irq_router *, struct pci_dev *, u16); +}; + +struct irt_routing_table { + u32 signature; + u8 size; + u8 used; + u16 exclusive_irqs; + struct irq_info___2 slots[0]; +}; + +enum pci_bf_sort_state { + pci_bf_sort_default = 0, + pci_force_nobf = 1, + pci_force_bf = 2, + pci_dmi_bf = 3, +}; + +struct pci_setup_rom { + struct setup_data data; + uint16_t vendor; + uint16_t devid; + uint64_t pcilen; + unsigned long segment; + unsigned long bus; + unsigned long device; + unsigned long function; + uint8_t romdata[0]; +}; + +struct pci_root_res { + struct list_head list; + struct resource res; +}; + +struct pci_root_info___2 { + struct list_head list; + char name[12]; + struct list_head resources; + struct resource busn; + int node; + int link; +}; + +struct amd_hostbridge { + u32 bus; + u32 slot; + u32 device; +}; + +struct restore_data_record { + unsigned long jump_address; + unsigned long jump_address_phys; + unsigned long cr3; + unsigned long magic; + unsigned long e820_checksum; +}; + +typedef struct elf32_hdr Elf32_Ehdr; + +typedef struct elf32_phdr Elf32_Phdr; + +typedef struct elf32_note Elf32_Nhdr; + +struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +}; + +enum cpio_fields { + C_MAGIC = 0, + C_INO = 1, + C_MODE = 2, + C_UID = 3, + C_GID = 4, + C_NLINK = 5, + C_MTIME = 6, + C_FILESIZE = 7, + C_MAJ = 8, + C_MIN = 9, + C_RMAJ = 10, + C_RMIN = 11, + C_NAMESIZE = 12, + C_CHKSUM = 13, + C_NFIELDS = 14, +}; + +struct fprop_local_single { + unsigned long events; + unsigned int period; + raw_spinlock_t lock; +}; + +struct ida_bitmap { + unsigned long bitmap[16]; +}; + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +enum { + LOGIC_PIO_INDIRECT = 0, + LOGIC_PIO_CPU_MMIO = 1, +}; + +struct logic_pio_host_ops; + +struct logic_pio_hwaddr { + struct list_head list; + struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + unsigned long flags; + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *, unsigned long, size_t); + void (*out)(void *, unsigned long, u32, size_t); + u32 (*ins)(void *, unsigned long, void *, size_t, unsigned int); + void (*outs)(void *, unsigned long, const void *, size_t, unsigned int); +}; + +typedef void (*btf_trace_ma_op)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_read)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_write)(void *, const char *, struct ma_state *, unsigned long, + void *); + +enum maple_type { + maple_dense = 0, + maple_leaf_64 = 1, + maple_range_64 = 2, + maple_arange_64 = 3, +}; + +struct trace_event_raw_ma_op { + struct trace_entry ent; + const char *fn; + unsigned long min; + unsigned long max; + unsigned long index; + unsigned long last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_read { + struct trace_entry ent; + const char *fn; + unsigned long min; + unsigned long max; + unsigned long index; + unsigned long last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_write { + struct trace_entry ent; + const char *fn; + unsigned long min; + unsigned long max; + unsigned long index; + unsigned long last; + unsigned long piv; + void *val; + void *node; + char __data[0]; +}; + +struct maple_pnode; + +struct maple_metadata { + unsigned char end; + unsigned char gap; +}; + +struct maple_range_64 { + struct maple_pnode *parent; + unsigned long pivot[15]; + union { + void __attribute__((btf_type_tag("rcu"))) * slot[16]; + struct { + void __attribute__((btf_type_tag("rcu"))) * pad[15]; + struct maple_metadata meta; + }; + }; +}; + +struct maple_arange_64 { + struct maple_pnode *parent; + unsigned long pivot[9]; + void __attribute__((btf_type_tag("rcu"))) * slot[10]; + unsigned long gap[10]; + struct maple_metadata meta; +}; + +struct maple_node { + union { + struct { + struct maple_pnode *parent; + void __attribute__((btf_type_tag("rcu"))) * slot[31]; + }; + struct { + void *pad; + struct callback_head rcu; + struct maple_enode *piv_parent; + unsigned char parent_slot; + enum maple_type type; + unsigned char slot_len; + unsigned int ma_flags; + }; + struct maple_range_64 mr64; + struct maple_arange_64 ma64; + struct maple_alloc alloc; + }; +}; + +struct maple_topiary { + struct maple_pnode *parent; + struct maple_enode *next; +}; + +struct ma_wr_state { + struct ma_state *mas; + struct maple_node *node; + unsigned long r_min; + unsigned long r_max; + enum maple_type type; + unsigned char offset_end; + unsigned char node_end; + unsigned long *pivots; + unsigned long end_piv; + void __attribute__((btf_type_tag("rcu"))) * *slots; + void *entry; + void *content; +}; + +struct maple_big_node { + struct maple_pnode *parent; + unsigned long pivot[33]; + union { + struct maple_enode *slot[34]; + struct { + unsigned long padding[21]; + unsigned long gap[21]; + }; + }; + unsigned char b_end; + enum maple_type type; +}; + +struct ma_topiary; + +struct maple_subtree_state { + struct ma_state *orig_l; + struct ma_state *orig_r; + struct ma_state *l; + struct ma_state *m; + struct ma_state *r; + struct ma_topiary *free; + struct ma_topiary *destroy; + struct maple_big_node *bn; +}; + +struct ma_topiary { + struct maple_enode *head; + struct maple_enode *tail; + struct maple_tree *mtree; +}; + +struct trace_event_data_offsets_ma_op {}; + +struct trace_event_data_offsets_ma_read {}; + +struct trace_event_data_offsets_ma_write {}; + +struct radix_tree_preload { + local_lock_t lock; + unsigned int nr; + struct xa_node *nodes; +}; + +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +struct printf_spec { + unsigned int type : 8; + int field_width : 24; + unsigned int flags : 8; + unsigned int base : 8; + int precision : 16; +}; + +struct page_flags_fields { + int width; + int shift; + int mask; + const struct printf_spec *spec; + const char *name; +}; + +enum format_type { + FORMAT_TYPE_NONE = 0, + FORMAT_TYPE_WIDTH = 1, + FORMAT_TYPE_PRECISION = 2, + FORMAT_TYPE_CHAR = 3, + FORMAT_TYPE_STR = 4, + FORMAT_TYPE_PTR = 5, + FORMAT_TYPE_PERCENT_CHAR = 6, + FORMAT_TYPE_INVALID = 7, + FORMAT_TYPE_LONG_LONG = 8, + FORMAT_TYPE_ULONG = 9, + FORMAT_TYPE_LONG = 10, + FORMAT_TYPE_UBYTE = 11, + FORMAT_TYPE_BYTE = 12, + FORMAT_TYPE_USHORT = 13, + FORMAT_TYPE_SHORT = 14, + FORMAT_TYPE_UINT = 15, + FORMAT_TYPE_INT = 16, + FORMAT_TYPE_SIZE_T = 17, + FORMAT_TYPE_PTRDIFF = 18, +}; + +enum { + st_wordstart = 0, + st_wordcmp = 1, + st_wordskip = 2, +}; + +enum { + st_wordstart___2 = 0, + st_wordcmp___2 = 1, + st_wordskip___2 = 2, + st_bufcpy = 3, +}; + +enum reg_type { + REG_TYPE_RM = 0, + REG_TYPE_REG = 1, + REG_TYPE_INDEX = 2, + REG_TYPE_BASE = 3, +}; + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute pop +#endif + +#endif /* __VMLINUX_H__ */ diff --git a/tracer/src/main.rs b/tracer/src/main.rs new file mode 100644 index 0000000000..3a9fe1af47 --- /dev/null +++ b/tracer/src/main.rs @@ -0,0 +1,73 @@ +use libbpf_rs::skel::{OpenSkel, Skel, SkelBuilder}; +use libbpf_rs::{PerfBufferBuilder, UprobeOpts}; +use std::os::unix::ffi::OsStrExt; +use object::read::elf::{Dyn, FileHeader}; + +mod uprobe { + include!(concat!(env!("OUT_DIR"), "/uprobe.skel.rs")); +} +use object::Object; +use uprobe::*; +fn handle_lost_events(cpu: i32, count: u64) { + eprintln!("Lost {count} events on CPU {cpu}"); +} +fn handle_event(cpu: i32, data: &[u8]) { + eprintln!("Got {} bytes of data on {cpu}", data.len()); +} +fn main() { + let mut builder = UprobeSkelBuilder::default(); + let picom_path = std::env::args().nth(1).unwrap(); + let data = std::fs::read(&picom_path).unwrap(); + let file = object::read::elf::ElfFile64::<'_, object::NativeEndian, _>::parse(&*data).unwrap(); + let header = file.raw_header(); + let sections = header.sections(object::NativeEndian, &*data).unwrap(); + let (dyanmic, dynamic_index) = sections.dynamic(object::NativeEndian, &*data).unwrap().unwrap(); + let strings = sections.strings(object::NativeEndian, &*data, dynamic_index).unwrap(); + let mut runpath = None; + let mut libc_name = None; + for d in dyanmic { + if d.is_string(object::NativeEndian) { + let s = d.string(object::NativeEndian, strings).unwrap(); + let tag = d.d_tag(object::NativeEndian); + if tag == object::elf::DT_RUNPATH as u64 { + runpath = Some(s.to_vec()); + } + if tag == object::elf::DT_NEEDED as u64 && s.starts_with(b"libc.so") { + libc_name = Some(s.to_vec()) + } + eprintln!("{} {}", tag, String::from_utf8_lossy(s)); + } + } + let runpath = runpath.unwrap(); + let libc_name = libc_name.unwrap(); + let libc_name = std::ffi::OsStr::from_bytes(&libc_name); + let mut libc_path = None; + for p in runpath.split(|ch| *ch == b':') { + let p = std::ffi::OsStr::from_bytes(p); + let p = std::path::Path::new(p); + let p = p.join(libc_name); + if p.exists() { + libc_path = Some(p); + } + } + let libc_path = libc_path.unwrap(); + eprintln!("{}", libc_path.to_string_lossy()); + builder.obj_builder.debug(true); + + let open_skel = builder.open().unwrap(); + let mut skel = open_skel.load().unwrap(); + let obj = skel.object_mut(); + let xcb_probe = obj.prog_mut("uprobe_xcb_conn").unwrap(); + let _link = xcb_probe.attach_uprobe_with_opts(-1, &picom_path, 0, UprobeOpts { + retprobe: false, + func_name: "xcb_connection_probe".to_string(), + ..Default::default() + }).unwrap(); + let epoll_probe = obj.prog_mut("uprobe_epoll_wait").unwrap(); + let _link2 = epoll_probe.attach_uprobe_with_opts(-1, libc_path, 0, UprobeOpts { + retprobe: false, + func_name: "epoll_wait".to_string(), + ..Default::default() + }).unwrap(); + std::thread::park(); +}