1
Fork 0

Merge from rustc

This commit is contained in:
The Miri Conjob Bot 2023-08-31 05:40:49 +00:00
commit 31d9ac14f5
2064 changed files with 19188 additions and 11173 deletions

View file

@ -107,6 +107,15 @@ dependencies = [
"yansi-term", "yansi-term",
] ]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]] [[package]]
name = "anstream" name = "anstream"
version = "0.3.2" version = "0.3.2"
@ -143,7 +152,7 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [ dependencies = [
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -153,7 +162,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
dependencies = [ dependencies = [
"anstyle", "anstyle",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -502,7 +511,7 @@ checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]] [[package]]
name = "clippy" name = "clippy"
version = "0.1.73" version = "0.1.74"
dependencies = [ dependencies = [
"clippy_lints", "clippy_lints",
"clippy_utils", "clippy_utils",
@ -522,7 +531,7 @@ dependencies = [
"tester", "tester",
"tokio", "tokio",
"toml 0.7.5", "toml 0.7.5",
"ui_test", "ui_test 0.18.1",
"walkdir", "walkdir",
] ]
@ -541,7 +550,7 @@ dependencies = [
[[package]] [[package]]
name = "clippy_lints" name = "clippy_lints"
version = "0.1.73" version = "0.1.74"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"cargo_metadata", "cargo_metadata",
@ -566,7 +575,7 @@ dependencies = [
[[package]] [[package]]
name = "clippy_utils" name = "clippy_utils"
version = "0.1.73" version = "0.1.74"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"if_chain", "if_chain",
@ -628,6 +637,12 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "comma"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]] [[package]]
name = "compiler_builtins" name = "compiler_builtins"
version = "0.1.100" version = "0.1.100"
@ -664,6 +679,19 @@ dependencies = [
"windows", "windows",
] ]
[[package]]
name = "console"
version = "0.15.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
"unicode-width",
"windows-sys 0.45.0",
]
[[package]] [[package]]
name = "convert_case" name = "convert_case"
version = "0.4.0" version = "0.4.0"
@ -786,7 +814,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e"
dependencies = [ dependencies = [
"nix", "nix",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -862,7 +890,7 @@ checksum = "a0afaad2b26fa326569eb264b1363e8ae3357618c43982b3f285f0774ce76b69"
[[package]] [[package]]
name = "declare_clippy_lint" name = "declare_clippy_lint"
version = "0.1.73" version = "0.1.74"
dependencies = [ dependencies = [
"itertools", "itertools",
"quote", "quote",
@ -1035,6 +1063,12 @@ dependencies = [
"log", "log",
] ]
[[package]]
name = "encode_unicode"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
[[package]] [[package]]
name = "encoding_rs" name = "encoding_rs"
version = "0.8.32" version = "0.8.32"
@ -1097,7 +1131,7 @@ checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a"
dependencies = [ dependencies = [
"errno-dragonfly", "errno-dragonfly",
"libc", "libc",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -1180,7 +1214,7 @@ dependencies = [
"cfg-if", "cfg-if",
"libc", "libc",
"redox_syscall 0.2.16", "redox_syscall 0.2.16",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -1824,6 +1858,19 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "indicatif"
version = "0.17.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730"
dependencies = [
"console",
"instant",
"number_prefix",
"portable-atomic",
"unicode-width",
]
[[package]] [[package]]
name = "indoc" name = "indoc"
version = "1.0.9" version = "1.0.9"
@ -1880,7 +1927,7 @@ checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
dependencies = [ dependencies = [
"hermit-abi 0.3.2", "hermit-abi 0.3.2",
"libc", "libc",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -1897,7 +1944,7 @@ checksum = "24fddda5af7e54bf7da53067d6e802dbcc381d0a8eef629df528e3ebf68755cb"
dependencies = [ dependencies = [
"hermit-abi 0.3.2", "hermit-abi 0.3.2",
"rustix 0.38.2", "rustix 0.38.2",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -1994,6 +2041,12 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "levenshtein"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760"
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.147" version = "0.2.147"
@ -2290,7 +2343,7 @@ checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2"
dependencies = [ dependencies = [
"libc", "libc",
"wasi", "wasi",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -2299,7 +2352,7 @@ version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "359f76430b20a79f9e20e115b3428614e654f04fab314482fc0fda0ebd3c6044" checksum = "359f76430b20a79f9e20e115b3428614e654f04fab314482fc0fda0ebd3c6044"
dependencies = [ dependencies = [
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -2321,7 +2374,7 @@ dependencies = [
"rustc_version", "rustc_version",
"serde", "serde",
"smallvec", "smallvec",
"ui_test", "ui_test 0.11.7",
] ]
[[package]] [[package]]
@ -2415,6 +2468,12 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "number_prefix"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]] [[package]]
name = "object" name = "object"
version = "0.32.0" version = "0.32.0"
@ -2546,6 +2605,15 @@ dependencies = [
"libm 0.1.4", "libm 0.1.4",
] ]
[[package]]
name = "pad"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3"
dependencies = [
"unicode-width",
]
[[package]] [[package]]
name = "panic_abort" name = "panic_abort"
version = "0.0.0" version = "0.0.0"
@ -2614,7 +2682,7 @@ dependencies = [
"libc", "libc",
"redox_syscall 0.3.5", "redox_syscall 0.3.5",
"smallvec", "smallvec",
"windows-targets", "windows-targets 0.48.1",
] ]
[[package]] [[package]]
@ -2749,6 +2817,12 @@ dependencies = [
"rustc-hash", "rustc-hash",
] ]
[[package]]
name = "portable-atomic"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e"
[[package]] [[package]]
name = "ppv-lite86" name = "ppv-lite86"
version = "0.2.17" version = "0.2.17"
@ -2761,6 +2835,16 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
[[package]]
name = "prettydiff"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ff1fec61082821f8236cf6c0c14e8172b62ce8a72a0eedc30d3b247bb68dc11"
dependencies = [
"ansi_term",
"pad",
]
[[package]] [[package]]
name = "proc-macro-hack" name = "proc-macro-hack"
version = "0.5.20+deprecated" version = "0.5.20+deprecated"
@ -4251,8 +4335,11 @@ dependencies = [
name = "rustc_smir" name = "rustc_smir"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"rustc_driver",
"rustc_hir", "rustc_hir",
"rustc_interface",
"rustc_middle", "rustc_middle",
"rustc_session",
"rustc_span", "rustc_span",
"rustc_target", "rustc_target",
"scoped-tls", "scoped-tls",
@ -4534,7 +4621,7 @@ dependencies = [
"io-lifetimes", "io-lifetimes",
"libc", "libc",
"linux-raw-sys 0.3.8", "linux-raw-sys 0.3.8",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -4547,7 +4634,7 @@ dependencies = [
"errno", "errno",
"libc", "libc",
"linux-raw-sys 0.4.3", "linux-raw-sys 0.4.3",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -4588,7 +4675,7 @@ version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88"
dependencies = [ dependencies = [
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -5017,7 +5104,7 @@ dependencies = [
"fastrand", "fastrand",
"redox_syscall 0.3.5", "redox_syscall 0.3.5",
"rustix 0.37.22", "rustix 0.37.22",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -5058,7 +5145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237" checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
dependencies = [ dependencies = [
"rustix 0.37.22", "rustix 0.37.22",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -5257,7 +5344,7 @@ dependencies = [
"num_cpus", "num_cpus",
"pin-project-lite", "pin-project-lite",
"socket2", "socket2",
"windows-sys", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@ -5494,6 +5581,33 @@ dependencies = [
"tempfile", "tempfile",
] ]
[[package]]
name = "ui_test"
version = "0.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "640159421816683e558867ffc0e60ed3a3ed97ec6ccb22c03adb41bf87c5cfa4"
dependencies = [
"annotate-snippets",
"anyhow",
"bstr",
"cargo-platform",
"cargo_metadata",
"color-eyre",
"colored",
"comma",
"crossbeam-channel",
"indicatif",
"lazy_static",
"levenshtein",
"prettydiff",
"regex",
"rustc_version",
"rustfix",
"serde",
"serde_json",
"tempfile",
]
[[package]] [[package]]
name = "unic-langid" name = "unic-langid"
version = "0.9.1" version = "0.9.1"
@ -5846,7 +5960,7 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f"
dependencies = [ dependencies = [
"windows-targets", "windows-targets 0.48.1",
] ]
[[package]] [[package]]
@ -5865,13 +5979,37 @@ version = "0.49.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f5bca94a32bf1e6a376522b6601275a3b611ee885ec0f1b6a05f17e8cfd3385" checksum = "2f5bca94a32bf1e6a376522b6601275a3b611ee885ec0f1b6a05f17e8cfd3385"
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]] [[package]]
name = "windows-sys" name = "windows-sys"
version = "0.48.0" version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [ dependencies = [
"windows-targets", "windows-targets 0.48.1",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
] ]
[[package]] [[package]]
@ -5880,13 +6018,13 @@ version = "0.48.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
dependencies = [ dependencies = [
"windows_aarch64_gnullvm", "windows_aarch64_gnullvm 0.48.0",
"windows_aarch64_msvc", "windows_aarch64_msvc 0.48.0",
"windows_i686_gnu", "windows_i686_gnu 0.48.0",
"windows_i686_msvc", "windows_i686_msvc 0.48.0",
"windows_x86_64_gnu", "windows_x86_64_gnu 0.48.0",
"windows_x86_64_gnullvm", "windows_x86_64_gnullvm 0.48.0",
"windows_x86_64_msvc", "windows_x86_64_msvc 0.48.0",
] ]
[[package]] [[package]]
@ -5895,42 +6033,84 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b34c9a3b28cb41db7385546f7f9a8179348dffc89923dde66857b1ba5312f6b4" checksum = "b34c9a3b28cb41db7385546f7f9a8179348dffc89923dde66857b1ba5312f6b4"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]] [[package]]
name = "windows_aarch64_gnullvm" name = "windows_aarch64_gnullvm"
version = "0.48.0" version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]] [[package]]
name = "windows_aarch64_msvc" name = "windows_aarch64_msvc"
version = "0.48.0" version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]] [[package]]
name = "windows_i686_gnu" name = "windows_i686_gnu"
version = "0.48.0" version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]] [[package]]
name = "windows_i686_msvc" name = "windows_i686_msvc"
version = "0.48.0" version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]] [[package]]
name = "windows_x86_64_gnu" name = "windows_x86_64_gnu"
version = "0.48.0" version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]] [[package]]
name = "windows_x86_64_gnullvm" name = "windows_x86_64_gnullvm"
version = "0.48.0" version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]] [[package]]
name = "windows_x86_64_msvc" name = "windows_x86_64_msvc"
version = "0.48.0" version = "0.48.0"

View file

@ -157,8 +157,10 @@ pub trait LayoutCalculator {
// for non-ZST uninhabited data (mostly partial initialization). // for non-ZST uninhabited data (mostly partial initialization).
let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| { let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited()); let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
let is_zst = fields.iter().all(|f| f.0.is_zst()); // We cannot ignore alignment; that might lead us to entirely discard a variant and
uninhabited && is_zst // produce an enum that is less aligned than it should be!
let is_1zst = fields.iter().all(|f| f.0.is_1zst());
uninhabited && is_1zst
}; };
let (present_first, present_second) = { let (present_first, present_second) = {
let mut present_variants = variants let mut present_variants = variants
@ -357,10 +359,8 @@ pub trait LayoutCalculator {
// It'll fit, but we need to make some adjustments. // It'll fit, but we need to make some adjustments.
match layout.fields { match layout.fields {
FieldsShape::Arbitrary { ref mut offsets, .. } => { FieldsShape::Arbitrary { ref mut offsets, .. } => {
for (j, offset) in offsets.iter_enumerated_mut() { for offset in offsets.iter_mut() {
if !variants[i][j].0.is_zst() { *offset += this_offset;
*offset += this_offset;
}
} }
} }
_ => { _ => {
@ -504,7 +504,7 @@ pub trait LayoutCalculator {
// to make room for a larger discriminant. // to make room for a larger discriminant.
for field_idx in st.fields.index_by_increasing_offset() { for field_idx in st.fields.index_by_increasing_offset() {
let field = &field_layouts[FieldIdx::from_usize(field_idx)]; let field = &field_layouts[FieldIdx::from_usize(field_idx)];
if !field.0.is_zst() || field.align().abi.bytes() != 1 { if !field.0.is_1zst() {
start_align = start_align.min(field.align().abi); start_align = start_align.min(field.align().abi);
break; break;
} }
@ -603,12 +603,15 @@ pub trait LayoutCalculator {
abi = Abi::Scalar(tag); abi = Abi::Scalar(tag);
} else { } else {
// Try to use a ScalarPair for all tagged enums. // Try to use a ScalarPair for all tagged enums.
// That's possible only if we can find a common primitive type for all variants.
let mut common_prim = None; let mut common_prim = None;
let mut common_prim_initialized_in_all_variants = true; let mut common_prim_initialized_in_all_variants = true;
for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) { for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else { let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
panic!(); panic!();
}; };
// We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst()); let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
let (field, offset) = match (fields.next(), fields.next()) { let (field, offset) = match (fields.next(), fields.next()) {
(None, None) => { (None, None) => {
@ -954,9 +957,6 @@ fn univariant(
}; };
( (
// Place ZSTs first to avoid "interesting offsets", especially with only one
// or two non-ZST fields. This helps Scalar/ScalarPair layouts.
!f.0.is_zst(),
// Then place largest alignments first. // Then place largest alignments first.
cmp::Reverse(alignment_group_key(f)), cmp::Reverse(alignment_group_key(f)),
// Then prioritize niche placement within alignment group according to // Then prioritize niche placement within alignment group according to
@ -1073,9 +1073,10 @@ fn univariant(
let size = min_size.align_to(align.abi); let size = min_size.align_to(align.abi);
let mut layout_of_single_non_zst_field = None; let mut layout_of_single_non_zst_field = None;
let mut abi = Abi::Aggregate { sized }; let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs. // Try to make this a Scalar/ScalarPair.
if sized && size.bytes() > 0 { if sized && size.bytes() > 0 {
// All other fields must be ZSTs. // We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst()); let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {

View file

@ -1660,15 +1660,25 @@ pub struct PointeeInfo {
impl LayoutS { impl LayoutS {
/// Returns `true` if the layout corresponds to an unsized type. /// Returns `true` if the layout corresponds to an unsized type.
#[inline]
pub fn is_unsized(&self) -> bool { pub fn is_unsized(&self) -> bool {
self.abi.is_unsized() self.abi.is_unsized()
} }
#[inline]
pub fn is_sized(&self) -> bool { pub fn is_sized(&self) -> bool {
self.abi.is_sized() self.abi.is_sized()
} }
/// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
pub fn is_1zst(&self) -> bool {
self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
}
/// Returns `true` if the type is a ZST and not unsized. /// Returns `true` if the type is a ZST and not unsized.
///
/// Note that this does *not* imply that the type is irrelevant for layout! It can still have
/// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
pub fn is_zst(&self) -> bool { pub fn is_zst(&self) -> bool {
match self.abi { match self.abi {
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,

View file

@ -480,7 +480,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.0.sess.span_fatal(span, err.to_string()) self.0.sess.span_fatal(span, err.to_string())
} else { } else {
span_bug!(span, "failed to get layout for `{}`: {}", ty, err) self.0.sess.span_fatal(span, format!("failed to get layout for `{}`: {}", ty, err))
} }
} }
} }

View file

@ -88,7 +88,8 @@ fn unsize_ptr<'tcx>(
let src_f = src_layout.field(fx, i); let src_f = src_layout.field(fx, i);
assert_eq!(src_layout.fields.offset(i).bytes(), 0); assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0); assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
if src_f.is_zst() { if src_f.is_1zst() {
// We are looking for the one non-1-ZST field; this is not it.
continue; continue;
} }
assert_eq!(src_layout.size, src_f.size); assert_eq!(src_layout.size, src_f.size);
@ -151,6 +152,7 @@ pub(crate) fn coerce_unsized_into<'tcx>(
let dst_f = dst.place_field(fx, FieldIdx::new(i)); let dst_f = dst.place_field(fx, FieldIdx::new(i));
if dst_f.layout().is_zst() { if dst_f.layout().is_zst() {
// No data here, nothing to copy/coerce.
continue; continue;
} }

View file

@ -51,8 +51,8 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
'descend_newtypes: while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() { 'descend_newtypes: while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
for i in 0..arg.layout().fields.count() { for i in 0..arg.layout().fields.count() {
let field = arg.value_field(fx, FieldIdx::new(i)); let field = arg.value_field(fx, FieldIdx::new(i));
if !field.layout().is_zst() { if !field.layout().is_1zst() {
// we found the one non-zero-sized field that is allowed // we found the one non-1-ZST field that is allowed
// now find *its* non-zero-sized field, or stop if it's a // now find *its* non-zero-sized field, or stop if it's a
// pointer // pointer
arg = field; arg = field;

View file

@ -7,6 +7,7 @@ use rustc_codegen_ssa::traits::{
BaseTypeMethods, BaseTypeMethods,
MiscMethods, MiscMethods,
}; };
use rustc_codegen_ssa::errors as ssa_errors;
use rustc_data_structures::base_n; use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::span_bug; use rustc_middle::span_bug;
@ -479,7 +480,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.sess().emit_fatal(respan(span, err.into_diagnostic())) self.sess().emit_fatal(respan(span, err.into_diagnostic()))
} else { } else {
span_bug!(span, "failed to get layout for `{}`: {}", ty, err) self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
} }
} }
} }

View file

@ -10,6 +10,7 @@ use crate::value::Value;
use cstr::cstr; use cstr::cstr;
use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh}; use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
use rustc_codegen_ssa::errors as ssa_errors;
use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::traits::*;
use rustc_data_structures::base_n; use rustc_data_structures::base_n;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
@ -1000,7 +1001,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() }) self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
} else { } else {
span_bug!(span, "failed to get layout for `{ty}`: {err:?}") self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
} }
} }
} }

View file

@ -16,7 +16,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_llvm::RustString; use rustc_llvm::RustString;
use rustc_middle::bug; use rustc_middle::bug;
use rustc_middle::mir::coverage::{CodeRegion, CounterId, CoverageKind, ExpressionId, Op, Operand}; use rustc_middle::mir::coverage::{CounterId, CoverageKind};
use rustc_middle::mir::Coverage; use rustc_middle::mir::Coverage;
use rustc_middle::ty; use rustc_middle::ty;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
@ -104,144 +104,67 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) { fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
let bx = self; let bx = self;
let Some(coverage_context) = bx.coverage_context() else { return };
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
let func_coverage = coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
let Coverage { kind, code_region } = coverage.clone(); let Coverage { kind, code_region } = coverage.clone();
match kind { match kind {
CoverageKind::Counter { function_source_hash, id } => { CoverageKind::Counter { function_source_hash, id } => {
if bx.set_function_source_hash(instance, function_source_hash) { debug!(
// If `set_function_source_hash()` returned true, the coverage map is enabled, "ensuring function source hash is set for instance={:?}; function_source_hash={}",
// so continue adding the counter. instance, function_source_hash,
if let Some(code_region) = code_region { );
// Note: Some counters do not have code regions, but may still be referenced func_coverage.set_function_source_hash(function_source_hash);
// from expressions. In that case, don't add the counter to the coverage map,
// but do inject the counter intrinsic.
bx.add_coverage_counter(instance, id, code_region);
}
let coverageinfo = bx.tcx().coverageinfo(instance.def); if let Some(code_region) = code_region {
// Note: Some counters do not have code regions, but may still be referenced
let fn_name = bx.get_pgo_func_name_var(instance); // from expressions. In that case, don't add the counter to the coverage map,
let hash = bx.const_u64(function_source_hash); // but do inject the counter intrinsic.
let num_counters = bx.const_u32(coverageinfo.num_counters);
let index = bx.const_u32(id.as_u32());
debug!( debug!(
"codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})", "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
fn_name, hash, num_counters, index, instance, id, code_region,
); );
bx.instrprof_increment(fn_name, hash, num_counters, index); func_coverage.add_counter(id, code_region);
} }
// We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
// as that needs an exclusive borrow.
drop(coverage_map);
let coverageinfo = bx.tcx().coverageinfo(instance.def);
let fn_name = bx.get_pgo_func_name_var(instance);
let hash = bx.const_u64(function_source_hash);
let num_counters = bx.const_u32(coverageinfo.num_counters);
let index = bx.const_u32(id.as_u32());
debug!(
"codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
fn_name, hash, num_counters, index,
);
bx.instrprof_increment(fn_name, hash, num_counters, index);
} }
CoverageKind::Expression { id, lhs, op, rhs } => { CoverageKind::Expression { id, lhs, op, rhs } => {
bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region); debug!(
"adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}",
instance, id, lhs, op, rhs, code_region,
);
func_coverage.add_counter_expression(id, lhs, op, rhs, code_region);
} }
CoverageKind::Unreachable => { CoverageKind::Unreachable => {
bx.add_coverage_unreachable( let code_region =
instance, code_region.expect("unreachable regions always have code regions");
code_region.expect("unreachable regions always have code regions"), debug!(
"adding unreachable code to coverage_map: instance={:?}, at {:?}",
instance, code_region,
); );
func_coverage.add_unreachable_region(code_region);
} }
} }
} }
} }
// These methods used to be part of trait `CoverageInfoBuilderMethods`, but
// after moving most coverage code out of SSA they are now just ordinary methods.
impl<'tcx> Builder<'_, '_, 'tcx> {
/// Returns true if the function source hash was added to the coverage map (even if it had
/// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
/// not enabled (a coverage map is not being generated).
fn set_function_source_hash(
&mut self,
instance: Instance<'tcx>,
function_source_hash: u64,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"ensuring function source hash is set for instance={:?}; function_source_hash={}",
instance, function_source_hash,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.set_function_source_hash(function_source_hash);
true
} else {
false
}
}
/// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
/// is not enabled (a coverage map is not being generated).
fn add_coverage_counter(
&mut self,
instance: Instance<'tcx>,
id: CounterId,
region: CodeRegion,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
instance, id, region,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter(id, region);
true
} else {
false
}
}
/// Returns true if the expression was added to the coverage map; false if
/// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
fn add_coverage_counter_expression(
&mut self,
instance: Instance<'tcx>,
id: ExpressionId,
lhs: Operand,
op: Op,
rhs: Operand,
region: Option<CodeRegion>,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
region: {:?}",
instance, id, lhs, op, rhs, region,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter_expression(id, lhs, op, rhs, region);
true
} else {
false
}
}
/// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
/// is not enabled (a coverage map is not being generated).
fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"adding unreachable code to coverage_map: instance={:?}, at {:?}",
instance, region,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_unreachable_region(region);
true
} else {
false
}
}
}
fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> { fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
let tcx = cx.tcx; let tcx = cx.tcx;

View file

@ -445,9 +445,9 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => { ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id) build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
} }
// Box<T, A> may have a non-ZST allocator A. In that case, we // Box<T, A> may have a non-1-ZST allocator A. In that case, we
// cannot treat Box<T, A> as just an owned alias of `*mut T`. // cannot treat Box<T, A> as just an owned alias of `*mut T`.
ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => { ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_1zst() => {
build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id) build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
} }
ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id), ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),

View file

@ -35,6 +35,8 @@ codegen_ssa_extract_bundled_libs_parse_archive = failed to parse archive '{$rlib
codegen_ssa_extract_bundled_libs_read_entry = failed to read entry '{$rlib}': {$error} codegen_ssa_extract_bundled_libs_read_entry = failed to read entry '{$rlib}': {$error}
codegen_ssa_extract_bundled_libs_write_file = failed to write file '{$rlib}': {$error} codegen_ssa_extract_bundled_libs_write_file = failed to write file '{$rlib}': {$error}
codegen_ssa_failed_to_get_layout = failed to get layout for {$ty}: {$err}
codegen_ssa_failed_to_write = failed to write {$path}: {$error} codegen_ssa_failed_to_write = failed to write {$path}: {$error}
codegen_ssa_ignoring_emit_path = ignoring emit path because multiple .{$extension} files were produced codegen_ssa_ignoring_emit_path = ignoring emit path because multiple .{$extension} files were produced

View file

@ -226,9 +226,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
let mut file = write::Object::new(binary_format, architecture, endianness); let mut file = write::Object::new(binary_format, architecture, endianness);
if sess.target.is_like_osx { if sess.target.is_like_osx {
if let Some(build_version) = macho_object_build_version_for_target(&sess.target) { file.set_macho_build_version(macho_object_build_version_for_target(&sess.target))
file.set_macho_build_version(build_version)
}
} }
let e_flags = match architecture { let e_flags = match architecture {
Architecture::Mips => { Architecture::Mips => {
@ -334,31 +332,28 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
Some(file) Some(file)
} }
/// Apple's LD, when linking for Mac Catalyst, requires object files to /// Since Xcode 15 Apple's LD requires object files to contain information about what they were
/// contain information about what they were built for (LC_BUILD_VERSION): /// built for (LC_BUILD_VERSION): the platform (macOS/watchOS etc), minimum OS version, and SDK
/// the platform (macOS/watchOS etc), minimum OS version, and SDK version. /// version. This returns a `MachOBuildVersion` for the target.
/// This returns a `MachOBuildVersion` if necessary for the target. fn macho_object_build_version_for_target(target: &Target) -> object::write::MachOBuildVersion {
fn macho_object_build_version_for_target(
target: &Target,
) -> Option<object::write::MachOBuildVersion> {
if !target.llvm_target.ends_with("-macabi") {
return None;
}
/// The `object` crate demands "X.Y.Z encoded in nibbles as xxxx.yy.zz" /// The `object` crate demands "X.Y.Z encoded in nibbles as xxxx.yy.zz"
/// e.g. minOS 14.0 = 0x000E0000, or SDK 16.2 = 0x00100200 /// e.g. minOS 14.0 = 0x000E0000, or SDK 16.2 = 0x00100200
fn pack_version((major, minor): (u32, u32)) -> u32 { fn pack_version((major, minor): (u32, u32)) -> u32 {
(major << 16) | (minor << 8) (major << 16) | (minor << 8)
} }
let platform = object::macho::PLATFORM_MACCATALYST; let platform =
let min_os = (14, 0); rustc_target::spec::current_apple_platform(target).expect("unknown Apple target OS");
let sdk = (16, 2); let min_os = rustc_target::spec::current_apple_deployment_target(target)
.expect("unknown Apple target OS");
let sdk =
rustc_target::spec::current_apple_sdk_version(platform).expect("unknown Apple target OS");
let mut build_version = object::write::MachOBuildVersion::default(); let mut build_version = object::write::MachOBuildVersion::default();
build_version.platform = platform; build_version.platform = platform;
build_version.minos = pack_version(min_os); build_version.minos = pack_version(min_os);
build_version.sdk = pack_version(sdk); build_version.sdk = pack_version(sdk);
Some(build_version) build_version
} }
pub enum MetadataPosition { pub enum MetadataPosition {

View file

@ -202,7 +202,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(src, unsized_info(bx, a, b, old_info)) (src, unsized_info(bx, a, b, old_info))
} }
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b); assert_eq!(def_a, def_b); // implies same number of fields
let src_layout = bx.cx().layout_of(src_ty); let src_layout = bx.cx().layout_of(src_ty);
let dst_layout = bx.cx().layout_of(dst_ty); let dst_layout = bx.cx().layout_of(dst_ty);
if src_ty == dst_ty { if src_ty == dst_ty {
@ -211,7 +211,8 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let mut result = None; let mut result = None;
for i in 0..src_layout.fields.count() { for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(bx.cx(), i); let src_f = src_layout.field(bx.cx(), i);
if src_f.is_zst() { if src_f.is_1zst() {
// We are looking for the one non-1-ZST field; this is not it.
continue; continue;
} }
@ -272,13 +273,14 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
} }
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b); assert_eq!(def_a, def_b); // implies same number of fields
for i in def_a.variant(FIRST_VARIANT).fields.indices() { for i in def_a.variant(FIRST_VARIANT).fields.indices() {
let src_f = src.project_field(bx, i.as_usize()); let src_f = src.project_field(bx, i.as_usize());
let dst_f = dst.project_field(bx, i.as_usize()); let dst_f = dst.project_field(bx, i.as_usize());
if dst_f.layout.is_zst() { if dst_f.layout.is_zst() {
// No data here, nothing to copy/coerce.
continue; continue;
} }

View file

@ -7,6 +7,7 @@ use rustc_errors::{
IntoDiagnosticArg, IntoDiagnosticArg,
}; };
use rustc_macros::Diagnostic; use rustc_macros::Diagnostic;
use rustc_middle::ty::layout::LayoutError;
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
use rustc_span::{Span, Symbol}; use rustc_span::{Span, Symbol};
use rustc_type_ir::FloatTy; use rustc_type_ir::FloatTy;
@ -1030,6 +1031,15 @@ pub struct TargetFeatureSafeTrait {
pub def: Span, pub def: Span,
} }
#[derive(Diagnostic)]
#[diag(codegen_ssa_failed_to_get_layout)]
pub struct FailedToGetLayout<'tcx> {
#[primary_span]
pub span: Span,
pub ty: Ty<'tcx>,
pub err: LayoutError<'tcx>,
}
#[derive(Diagnostic)] #[derive(Diagnostic)]
#[diag(codegen_ssa_error_creating_remark_dir)] #[diag(codegen_ssa_error_creating_remark_dir)]
pub struct ErrorCreatingRemarkDir { pub struct ErrorCreatingRemarkDir {

View file

@ -933,8 +933,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{ {
for i in 0..op.layout.fields.count() { for i in 0..op.layout.fields.count() {
let field = op.extract_field(bx, i); let field = op.extract_field(bx, i);
if !field.layout.is_zst() { if !field.layout.is_1zst() {
// we found the one non-zero-sized field that is allowed // we found the one non-1-ZST field that is allowed
// now find *its* non-zero-sized field, or stop if it's a // now find *its* non-zero-sized field, or stop if it's a
// pointer // pointer
op = field; op = field;
@ -975,10 +975,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{ {
for i in 0..op.layout.fields.count() { for i in 0..op.layout.fields.count() {
let field = op.extract_field(bx, i); let field = op.extract_field(bx, i);
if !field.layout.is_zst() { if !field.layout.is_1zst() {
// we found the one non-zero-sized field that is allowed // We found the one non-1-ZST field that is allowed. (The rules
// now find *its* non-zero-sized field, or stop if it's a // for `DispatchFromDyn` ensure there's exactly one such field.)
// pointer // Now find *its* non-zero-sized field, or stop if it's a
// pointer.
op = field; op = field;
continue 'descend_newtypes; continue 'descend_newtypes;
} }

View file

@ -145,7 +145,7 @@ impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> {
if layout.is_zst() { if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which // Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but // doesn't matter because they don't contain data, but
// we need something in the operand. // we need something sufficiently aligned in the operand.
LocalRef::Operand(OperandRef::zero_sized(layout)) LocalRef::Operand(OperandRef::zero_sized(layout))
} else { } else {
LocalRef::PendingOperand LocalRef::PendingOperand

View file

@ -50,7 +50,8 @@ pub enum OperandValue<V> {
/// from [`ConstMethods::const_poison`]. /// from [`ConstMethods::const_poison`].
/// ///
/// An `OperandValue` *must* be this variant for any type for which /// An `OperandValue` *must* be this variant for any type for which
/// `is_zst` on its `Layout` returns `true`. /// `is_zst` on its `Layout` returns `true`. Note however that
/// these values can still require alignment.
ZeroSized, ZeroSized,
} }

View file

@ -114,7 +114,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx.struct_gep(ty, self.llval, 1) bx.struct_gep(ty, self.llval, 1)
} }
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. // ZST fields (even some that require alignment) are not included in Scalar,
// ScalarPair, and Vector layouts, so manually offset the pointer.
bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())]) bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
} }
Abi::Scalar(_) | Abi::ScalarPair(..) => { Abi::Scalar(_) | Abi::ScalarPair(..) => {

View file

@ -1004,6 +1004,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::Rvalue::Aggregate(..) => { mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(ty); let ty = self.monomorphize(ty);
// For ZST this can be `OperandValueKind::ZeroSized`.
self.cx.spanned_layout_of(ty, span).is_zst() self.cx.spanned_layout_of(ty, span).is_zst()
} }
} }

View file

@ -384,7 +384,7 @@ const_eval_unreachable_unwind =
const_eval_unsigned_offset_from_overflow = const_eval_unsigned_offset_from_overflow =
`ptr_offset_from_unsigned` called when first pointer has smaller offset than second: {$a_offset} < {$b_offset} `ptr_offset_from_unsigned` called when first pointer has smaller offset than second: {$a_offset} < {$b_offset}
const_eval_unsized_local = unsized locals are not supported
const_eval_unstable_const_fn = `{$def_path}` is not yet stable as a const fn const_eval_unstable_const_fn = `{$def_path}` is not yet stable as a const fn
const_eval_unstable_in_stable = const_eval_unstable_in_stable =

View file

@ -61,6 +61,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
&ret.clone().into(), &ret.clone().into(),
StackPopCleanup::Root { cleanup: false }, StackPopCleanup::Root { cleanup: false },
)?; )?;
ecx.storage_live_for_always_live_locals()?;
// The main interpreter loop. // The main interpreter loop.
while ecx.step()? {} while ecx.step()? {}

View file

@ -795,6 +795,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
use crate::fluent_generated::*; use crate::fluent_generated::*;
match self { match self {
UnsupportedOpInfo::Unsupported(s) => s.clone().into(), UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite, UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy, UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int, UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
@ -814,7 +815,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
// `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to // `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to
// be further processed by validity checking which then turns it into something nice to // be further processed by validity checking which then turns it into something nice to
// print. So it's not worth the effort of having diagnostics that can print the `info`. // print. So it's not worth the effort of having diagnostics that can print the `info`.
Unsupported(_) | ReadPointerAsInt(_) => {} UnsizedLocal | Unsupported(_) | ReadPointerAsInt(_) => {}
OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => { OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
builder.set_arg("ptr", ptr); builder.set_arg("ptr", ptr);
} }

View file

@ -410,21 +410,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.unsize_into_ptr(src, dest, *s, *c) self.unsize_into_ptr(src, dest, *s, *c)
} }
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b); assert_eq!(def_a, def_b); // implies same number of fields
// unsizing of generic struct with pointer fields // Unsizing of generic struct with pointer fields, like `Arc<T>` -> `Arc<Trait>`.
// Example: `Arc<T>` -> `Arc<Trait>` // There can be extra fields as long as they don't change their type or are 1-ZST.
// here we need to increase the size of every &T thin ptr field to a fat ptr // There might also be no field that actually needs unsizing.
let mut found_cast_field = false;
for i in 0..src.layout.fields.count() { for i in 0..src.layout.fields.count() {
let cast_ty_field = cast_ty.field(self, i); let cast_ty_field = cast_ty.field(self, i);
if cast_ty_field.is_zst() {
continue;
}
let src_field = self.project_field(src, i)?; let src_field = self.project_field(src, i)?;
let dst_field = self.project_field(dest, i)?; let dst_field = self.project_field(dest, i)?;
if src_field.layout.ty == cast_ty_field.ty { if src_field.layout.is_1zst() && cast_ty_field.is_1zst() {
// Skip 1-ZST fields.
} else if src_field.layout.ty == cast_ty_field.ty {
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?; self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
} else { } else {
if found_cast_field {
span_bug!(self.cur_span(), "unsize_into: more than one field to cast");
}
found_cast_field = true;
self.unsize_into(&src_field, cast_ty_field, &dst_field)?; self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
} }
} }

View file

@ -25,8 +25,8 @@ use super::{
Scalar, StackPopJump, Scalar, StackPopJump,
}; };
use crate::errors::{self, ErroneousConstUsed}; use crate::errors::{self, ErroneousConstUsed};
use crate::fluent_generated as fluent;
use crate::util; use crate::util;
use crate::{fluent_generated as fluent, ReportErrorExt};
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> { pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance. /// Stores the `Machine` instance.
@ -158,7 +158,8 @@ pub enum StackPopCleanup {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct LocalState<'tcx, Prov: Provenance = AllocId> { pub struct LocalState<'tcx, Prov: Provenance = AllocId> {
pub value: LocalValue<Prov>, pub value: LocalValue<Prov>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice /// Don't modify if `Some`, this is only used to prevent computing the layout twice.
/// Avoids computing the layout of locals that are never actually initialized.
pub layout: Cell<Option<TyAndLayout<'tcx>>>, pub layout: Cell<Option<TyAndLayout<'tcx>>>,
} }
@ -177,7 +178,7 @@ pub enum LocalValue<Prov: Provenance = AllocId> {
impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> { impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
/// Read the local's value or error if the local is not yet live or not live anymore. /// Read the local's value or error if the local is not yet live or not live anymore.
#[inline] #[inline(always)]
pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> { pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value { match &self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
@ -190,7 +191,7 @@ impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
/// ///
/// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
/// anywhere else. You may be invalidating machine invariants if you do! /// anywhere else. You may be invalidating machine invariants if you do!
#[inline] #[inline(always)]
pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> { pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
match &mut self.value { match &mut self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
@ -432,6 +433,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id)) .map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id))
} }
/// Turn the given error into a human-readable string. Expects the string to be printed, so if
/// `RUSTC_CTFE_BACKTRACE` is set this will show a backtrace of the rustc internals that
/// triggered the error.
///
/// This is NOT the preferred way to render an error; use `report` from `const_eval` instead.
/// However, this is useful when error messages appear in ICEs.
pub fn format_error(&self, e: InterpErrorInfo<'tcx>) -> String {
let (e, backtrace) = e.into_parts();
backtrace.print_backtrace();
// FIXME(fee1-dead), HACK: we want to use the error as title therefore we can just extract the
// label and arguments from the InterpError.
let handler = &self.tcx.sess.parse_sess.span_diagnostic;
#[allow(rustc::untranslatable_diagnostic)]
let mut diag = self.tcx.sess.struct_allow("");
let msg = e.diagnostic_message();
e.add_args(handler, &mut diag);
let s = handler.eagerly_translate_to_string(msg, diag.args());
diag.cancel();
s
}
#[inline(always)] #[inline(always)]
pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] { pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
M::stack(self) M::stack(self)
@ -462,7 +484,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
#[inline(always)] #[inline(always)]
pub(super) fn body(&self) -> &'mir mir::Body<'tcx> { pub fn body(&self) -> &'mir mir::Body<'tcx> {
self.frame().body self.frame().body
} }
@ -684,15 +706,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return_to_block: StackPopCleanup, return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
trace!("body: {:#?}", body); trace!("body: {:#?}", body);
let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
let locals = IndexVec::from_elem(dead_local, &body.local_decls);
// First push a stack frame so we have access to the local args // First push a stack frame so we have access to the local args
let pre_frame = Frame { let pre_frame = Frame {
body, body,
loc: Right(body.span), // Span used for errors caused during preamble. loc: Right(body.span), // Span used for errors caused during preamble.
return_to_block, return_to_block,
return_place: return_place.clone(), return_place: return_place.clone(),
// empty local array, we fill it in below, after we are inside the stack frame and locals,
// all methods actually know about the frame
locals: IndexVec::new(),
instance, instance,
tracing_span: SpanGuard::new(), tracing_span: SpanGuard::new(),
extra: (), extra: (),
@ -707,19 +729,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.eval_mir_constant(&ct, Some(span), None)?; self.eval_mir_constant(&ct, Some(span), None)?;
} }
// Most locals are initially dead.
let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
// Now mark those locals as live that have no `Storage*` annotations.
let always_live = always_storage_live_locals(self.body());
for local in locals.indices() {
if always_live.contains(local) {
locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
}
}
// done // done
self.frame_mut().locals = locals;
M::after_stack_push(self)?; M::after_stack_push(self)?;
self.frame_mut().loc = Left(mir::Location::START); self.frame_mut().loc = Left(mir::Location::START);
@ -886,12 +896,96 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
} }
/// Mark a storage as live, killing the previous content. /// In the current stack frame, mark all locals as live that are not arguments and don't have
pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { /// `Storage*` annotations (this includes the return place).
assert!(local != mir::RETURN_PLACE, "Cannot make return place live"); pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> {
self.storage_live(mir::RETURN_PLACE)?;
let body = self.body();
let always_live = always_storage_live_locals(body);
for local in body.vars_and_temps_iter() {
if always_live.contains(local) {
self.storage_live(local)?;
}
}
Ok(())
}
pub fn storage_live_dyn(
&mut self,
local: mir::Local,
meta: MemPlaceMeta<M::Provenance>,
) -> InterpResult<'tcx> {
trace!("{:?} is now live", local); trace!("{:?} is now live", local);
let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); // We avoid `ty.is_trivially_sized` since that (a) cannot assume WF, so it recurses through
// all fields of a tuple, and (b) does something expensive for ADTs.
fn is_very_trivially_sized(ty: Ty<'_>) -> bool {
match ty.kind() {
ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::GeneratorWitnessMIR(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
| ty::Error(_) => true,
ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
ty::Tuple(tys) => tys.last().iter().all(|ty| is_very_trivially_sized(**ty)),
// We don't want to do any queries, so there is not much we can do with ADTs.
ty::Adt(..) => false,
ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
ty::Infer(ty::TyVar(_)) => false,
ty::Bound(..)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("`is_very_trivially_sized` applied to unexpected type: {:?}", ty)
}
}
}
// This is a hot function, we avoid computing the layout when possible.
// `unsized_` will be `None` for sized types and `Some(layout)` for unsized types.
let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) {
None
} else {
// We need the layout.
let layout = self.layout_of_local(self.frame(), local, None)?;
if layout.is_sized() { None } else { Some(layout) }
};
let local_val = LocalValue::Live(if let Some(layout) = unsized_ {
if !meta.has_meta() {
throw_unsup!(UnsizedLocal);
}
// Need to allocate some memory, since `Immediate::Uninit` cannot be unsized.
let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
Operand::Indirect(*dest_place)
} else {
assert!(!meta.has_meta()); // we're dropping the metadata
// Just make this an efficient immediate.
// Note that not calling `layout_of` here does have one real consequence:
// if the type is too big, we'll only notice this when the local is actually initialized,
// which is a bit too late -- we should ideally notice this alreayd here, when the memory
// is conceptually allocated. But given how rare that error is and that this is a hot function,
// we accept this downside for now.
Operand::Immediate(Immediate::Uninit)
});
// StorageLive expects the local to be dead, and marks it live. // StorageLive expects the local to be dead, and marks it live.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val); let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
if !matches!(old, LocalValue::Dead) { if !matches!(old, LocalValue::Dead) {
@ -900,6 +994,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(()) Ok(())
} }
/// Mark a storage as live, killing the previous content.
#[inline(always)]
pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
self.storage_live_dyn(local, MemPlaceMeta::None)
}
pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> { pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local); trace!("{:?} is now dead", local);

View file

@ -378,7 +378,8 @@ pub fn intern_const_alloc_recursive<
ecx.tcx.sess.delay_span_bug( ecx.tcx.sess.delay_span_bug(
ecx.tcx.span, ecx.tcx.span,
format!( format!(
"error during interning should later cause validation failure: {error:?}" "error during interning should later cause validation failure: {}",
ecx.format_error(error),
), ),
); );
} }

View file

@ -33,7 +33,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
/// `Scalar::Initialized`). /// `Scalar::Initialized`).
ScalarPair(Scalar<Prov>, Scalar<Prov>), ScalarPair(Scalar<Prov>, Scalar<Prov>),
/// A value of fully uninitialized memory. Can have arbitrary size and layout. /// A value of fully uninitialized memory. Can have arbitrary size and layout, but must be sized.
Uninit, Uninit,
} }
@ -190,16 +190,19 @@ impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline] #[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self { pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
ImmTy { imm: val.into(), layout } ImmTy { imm: val.into(), layout }
} }
#[inline] #[inline(always)]
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self { pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(layout.is_sized(), "immediates must be sized");
ImmTy { imm, layout } ImmTy { imm, layout }
} }
#[inline] #[inline]
pub fn uninit(layout: TyAndLayout<'tcx>) -> Self { pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(layout.is_sized(), "immediates must be sized");
ImmTy { imm: Immediate::Uninit, layout } ImmTy { imm: Immediate::Uninit, layout }
} }
@ -239,6 +242,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
// if the entire value is uninit, then so is the field (can happen in ConstProp) // if the entire value is uninit, then so is the field (can happen in ConstProp)
(Immediate::Uninit, _) => Immediate::Uninit, (Immediate::Uninit, _) => Immediate::Uninit,
// the field contains no information, can be left uninit // the field contains no information, can be left uninit
// (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
_ if layout.is_zst() => Immediate::Uninit, _ if layout.is_zst() => Immediate::Uninit,
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
// to detect those here and also give them no data // to detect those here and also give them no data
@ -290,23 +294,21 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
self.layout self.layout
} }
fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( #[inline(always)]
&self, fn meta(&self) -> MemPlaceMeta<Prov> {
_ecx: &InterpCx<'mir, 'tcx, M>, debug_assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { MemPlaceMeta::None
assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
Ok(MemPlaceMeta::None)
} }
fn offset_with_meta( fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> { ) -> InterpResult<'tcx, Self> {
assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
Ok(self.offset_(offset, layout, cx)) Ok(self.offset_(offset, layout, ecx))
} }
fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
@ -317,49 +319,37 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
} }
} }
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
// Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
Ok(if self.layout.is_unsized() {
if matches!(self.op, Operand::Immediate(_)) {
// Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
// However, ConstProp doesn't do that, so we can run into this nonsense situation.
throw_inval!(ConstPropNonsense);
}
// There are no unsized immediates.
self.assert_mem_place().meta
} else {
MemPlaceMeta::None
})
}
}
impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> { impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
#[inline(always)] #[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> { fn layout(&self) -> TyAndLayout<'tcx> {
self.layout self.layout
} }
fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( #[inline]
&self, fn meta(&self) -> MemPlaceMeta<Prov> {
_ecx: &InterpCx<'mir, 'tcx, M>, match self.as_mplace_or_imm() {
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { Left(mplace) => mplace.meta,
self.meta() Right(_) => {
debug_assert!(self.layout.is_sized(), "unsized immediates are not a thing");
MemPlaceMeta::None
}
}
} }
fn offset_with_meta( fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> { ) -> InterpResult<'tcx, Self> {
match self.as_mplace_or_imm() { match self.as_mplace_or_imm() {
Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()), Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, ecx)?.into()),
Right(imm) => { Right(imm) => {
assert!(!meta.has_meta()); // no place to store metadata here debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
// Every part of an uninit is uninit. // Every part of an uninit is uninit.
Ok(imm.offset(offset, layout, cx)?.into()) Ok(imm.offset_(offset, layout, ecx).into())
} }
} }
} }
@ -587,6 +577,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(frame, local, layout)?; let layout = self.layout_of_local(frame, local, layout)?;
let op = *frame.locals[local].access()?; let op = *frame.locals[local].access()?;
if matches!(op, Operand::Immediate(_)) {
if layout.is_unsized() {
// ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
// efficiently check whether they are sized. We have to catch that case here.
throw_inval!(ConstPropNonsense);
}
}
Ok(OpTy { op, layout, align: Some(layout.align.abi) }) Ok(OpTy { op, layout, align: Some(layout.align.abi) })
} }
@ -600,16 +597,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match place.as_mplace_or_local() { match place.as_mplace_or_local() {
Left(mplace) => Ok(mplace.into()), Left(mplace) => Ok(mplace.into()),
Right((frame, local, offset)) => { Right((frame, local, offset)) => {
debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
let base = self.local_to_op(&self.stack()[frame], local, None)?; let base = self.local_to_op(&self.stack()[frame], local, None)?;
let mut field = if let Some(offset) = offset { let mut field = match offset {
// This got offset. We can be sure that the field is sized. Some(offset) => base.offset(offset, place.layout, self)?,
base.offset(offset, place.layout, self)? None => {
} else { // In the common case this hasn't been projected.
assert_eq!(place.layout, base.layout); debug_assert_eq!(place.layout, base.layout);
// Unsized cases are possible here since an unsized local will be a base
// `Place::Local` until the first projection calls `place_to_op` to extract the }
// underlying mplace.
base
}; };
field.align = Some(place.align); field.align = Some(place.align);
Ok(field) Ok(field)

View file

@ -13,7 +13,7 @@ use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty; use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT}; use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
use super::{ use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@ -41,33 +41,13 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> {
} }
} }
#[inline(always)]
pub fn has_meta(self) -> bool { pub fn has_meta(self) -> bool {
match self { match self {
Self::Meta(_) => true, Self::Meta(_) => true,
Self::None => false, Self::None => false,
} }
} }
pub(crate) fn len<'tcx>(
&self,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, u64> {
if layout.is_unsized() {
// We need to consult `meta` metadata
match layout.ty.kind() {
ty::Slice(..) | ty::Str => self.unwrap_meta().to_target_usize(cx),
_ => bug!("len not supported on unsized type {:?}", layout.ty),
}
} else {
// Go through the layout. There are lots of types that support a length,
// e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
match layout.fields {
abi::FieldsShape::Array { count, .. } => Ok(count),
_ => bug!("len not supported on sized type {:?}", layout.ty),
}
}
}
} }
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@ -111,6 +91,8 @@ pub enum Place<Prov: Provenance = AllocId> {
/// (Without that optimization, we'd just always be a `MemPlace`.) /// (Without that optimization, we'd just always be a `MemPlace`.)
/// Note that this only stores the frame index, not the thread this frame belongs to -- that is /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
/// implicit. This means a `Place` must never be moved across interpreter thread boundaries! /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
///
/// This variant shall not be used for unsized types -- those must always live in memory.
Local { frame: usize, local: mir::Local, offset: Option<Size> }, Local { frame: usize, local: mir::Local, offset: Option<Size> },
} }
@ -157,7 +139,7 @@ impl<Prov: Provenance> MemPlace<Prov> {
} }
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space. /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
#[inline(always)] #[inline]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> { pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
match self.meta { match self.meta {
MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)), MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
@ -220,22 +202,20 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx
self.layout self.layout
} }
fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( #[inline(always)]
&self, fn meta(&self) -> MemPlaceMeta<Prov> {
_ecx: &InterpCx<'mir, 'tcx, M>, self.meta
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
Ok(self.meta)
} }
fn offset_with_meta( fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> { ) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy { Ok(MPlaceTy {
mplace: self.mplace.offset_with_meta_(offset, meta, cx)?, mplace: self.mplace.offset_with_meta_(offset, meta, ecx)?,
align: self.align.restrict_for_offset(offset), align: self.align.restrict_for_offset(offset),
layout, layout,
}) })
@ -255,25 +235,30 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx,
self.layout self.layout
} }
fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( #[inline]
&self, fn meta(&self) -> MemPlaceMeta<Prov> {
ecx: &InterpCx<'mir, 'tcx, M>, match self.as_mplace_or_local() {
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { Left(mplace) => mplace.meta,
ecx.place_meta(self) Right(_) => {
debug_assert!(self.layout.is_sized(), "unsized locals should live in memory");
MemPlaceMeta::None
}
}
} }
fn offset_with_meta( fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> { ) -> InterpResult<'tcx, Self> {
Ok(match self.as_mplace_or_local() { Ok(match self.as_mplace_or_local() {
Left(mplace) => mplace.offset_with_meta(offset, meta, layout, cx)?.into(), Left(mplace) => mplace.offset_with_meta(offset, meta, layout, ecx)?.into(),
Right((frame, local, old_offset)) => { Right((frame, local, old_offset)) => {
debug_assert!(layout.is_sized(), "unsized locals should live in memory");
assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway... assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
let new_offset = cx let new_offset = ecx
.data_layout() .data_layout()
.offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?; .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
PlaceTy { PlaceTy {
@ -323,7 +308,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> { impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
/// A place is either an mplace or some local. /// A place is either an mplace or some local.
#[inline] #[inline(always)]
pub fn as_mplace_or_local( pub fn as_mplace_or_local(
&self, &self,
) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> { ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
@ -399,20 +384,6 @@ where
Prov: Provenance + 'static, Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>, M: Machine<'mir, 'tcx, Provenance = Prov>,
{ {
/// Get the metadata of the given place.
pub(super) fn place_meta(
&self,
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
if place.layout.is_unsized() {
// For `Place::Local`, the metadata is stored with the local, not the place. So we have
// to look that up first.
self.place_to_op(place)?.meta()
} else {
Ok(MemPlaceMeta::None)
}
}
/// Take a value, which represents a (thin or wide) reference, and make it a place. /// Take a value, which represents a (thin or wide) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`. /// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
/// ///
@ -537,8 +508,24 @@ where
frame: usize, frame: usize,
local: mir::Local, local: mir::Local,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(&self.stack()[frame], local, None)?; // Other parts of the system rely on `Place::Local` never being unsized.
let place = Place::Local { frame, local, offset: None }; // So we eagerly check here if this local has an MPlace, and if yes we use it.
let frame_ref = &self.stack()[frame];
let layout = self.layout_of_local(frame_ref, local, None)?;
let place = if layout.is_sized() {
// We can just always use the `Local` for sized values.
Place::Local { frame, local, offset: None }
} else {
// Unsized `Local` isn't okay (we cannot store the metadata).
match frame_ref.locals[local].access()? {
Operand::Immediate(_) => {
// ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
// efficiently check whether they are sized. We have to catch that case here.
throw_inval!(ConstPropNonsense);
}
Operand::Indirect(mplace) => Place::Ptr(*mplace),
}
};
Ok(PlaceTy { place, layout, align: layout.align.abi }) Ok(PlaceTy { place, layout, align: layout.align.abi })
} }
@ -896,9 +883,7 @@ where
// that has different alignment than the outer field. // that has different alignment than the outer field.
let local_layout = let local_layout =
self.layout_of_local(&self.stack()[frame], local, None)?; self.layout_of_local(&self.stack()[frame], local, None)?;
if local_layout.is_unsized() { assert!(local_layout.is_sized(), "unsized locals cannot be immediate");
throw_unsup_format!("unsized locals are not supported");
}
let mplace = self.allocate(local_layout, MemoryKind::Stack)?; let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
// Preserve old value. (As an optimization, we can skip this if it was uninit.) // Preserve old value. (As an optimization, we can skip this if it was uninit.)
if !matches!(local_val, Immediate::Uninit) { if !matches!(local_val, Immediate::Uninit) {

View file

@ -7,12 +7,13 @@
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually //! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that. //! implement the logic on OpTy, and MPlaceTy calls that.
use std::marker::PhantomData;
use std::ops::Range;
use rustc_middle::mir; use rustc_middle::mir;
use rustc_middle::ty; use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
use rustc_middle::ty::TyCtxt;
use rustc_target::abi::HasDataLayout;
use rustc_target::abi::Size; use rustc_target::abi::Size;
use rustc_target::abi::{self, VariantIdx}; use rustc_target::abi::{self, VariantIdx};
@ -24,44 +25,59 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
fn layout(&self) -> TyAndLayout<'tcx>; fn layout(&self) -> TyAndLayout<'tcx>;
/// Get the metadata of a wide value. /// Get the metadata of a wide value.
fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( fn meta(&self) -> MemPlaceMeta<Prov>;
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
/// Get the length of a slice/string/array stored here.
fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
ecx: &InterpCx<'mir, 'tcx, M>, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, u64> { ) -> InterpResult<'tcx, u64> {
self.meta(ecx)?.len(self.layout(), ecx) let layout = self.layout();
if layout.is_unsized() {
// We need to consult `meta` metadata
match layout.ty.kind() {
ty::Slice(..) | ty::Str => self.meta().unwrap_meta().to_target_usize(ecx),
_ => bug!("len not supported on unsized type {:?}", layout.ty),
}
} else {
// Go through the layout. There are lots of types that support a length,
// e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
match layout.fields {
abi::FieldsShape::Array { count, .. } => Ok(count),
_ => bug!("len not supported on sized type {:?}", layout.ty),
}
}
} }
/// Offset the value by the given amount, replacing the layout and metadata. /// Offset the value by the given amount, replacing the layout and metadata.
fn offset_with_meta( fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self>; ) -> InterpResult<'tcx, Self>;
fn offset( #[inline]
fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
offset: Size, offset: Size,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> { ) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized()); assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx) self.offset_with_meta(offset, MemPlaceMeta::None, layout, ecx)
} }
fn transmute( #[inline]
fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self, &self,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout, ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> { ) -> InterpResult<'tcx, Self> {
assert!(self.layout().is_sized() && layout.is_sized());
assert_eq!(self.layout().size, layout.size); assert_eq!(self.layout().size, layout.size);
self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx) self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, ecx)
} }
/// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
@ -72,6 +88,28 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>; ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
} }
/// A type representing iteration over the elements of an array.
pub struct ArrayIterator<'tcx, 'a, Prov: Provenance + 'static, P: Projectable<'tcx, Prov>> {
base: &'a P,
range: Range<u64>,
stride: Size,
field_layout: TyAndLayout<'tcx>,
_phantom: PhantomData<Prov>, // otherwise it says `Prov` is never used...
}
impl<'tcx, 'a, Prov: Provenance + 'static, P: Projectable<'tcx, Prov>>
ArrayIterator<'tcx, 'a, Prov, P>
{
/// Should be the same `ecx` on each call, and match the one used to create the iterator.
pub fn next<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&mut self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Option<(u64, P)>> {
let Some(idx) = self.range.next() else { return Ok(None) };
Ok(Some((idx, self.base.offset(self.stride * idx, self.field_layout, ecx)?)))
}
}
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M> impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where where
@ -104,7 +142,7 @@ where
// But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`) // But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`)
throw_inval!(ConstPropNonsense); throw_inval!(ConstPropNonsense);
} }
let base_meta = base.meta(self)?; let base_meta = base.meta();
// Re-use parent metadata to determine dynamic field layout. // Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same // With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay. // happens at run-time so that's okay.
@ -132,7 +170,7 @@ where
base: &P, base: &P,
variant: VariantIdx, variant: VariantIdx,
) -> InterpResult<'tcx, P> { ) -> InterpResult<'tcx, P> {
assert!(!base.meta(self)?.has_meta()); assert!(!base.meta().has_meta());
// Downcasts only change the layout. // Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design, // (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.) // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
@ -206,20 +244,13 @@ where
pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>( pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>(
&self, &self,
base: &'a P, base: &'a P,
) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a> ) -> InterpResult<'tcx, ArrayIterator<'tcx, 'a, M::Provenance, P>> {
where
'tcx: 'a,
{
let abi::FieldsShape::Array { stride, .. } = base.layout().fields else { let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
span_bug!(self.cur_span(), "operand_array_fields: expected an array layout"); span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
}; };
let len = base.len(self)?; let len = base.len(self)?;
let field_layout = base.layout().field(self, 0); let field_layout = base.layout().field(self, 0);
let tcx: TyCtxt<'tcx> = *self.tcx; Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
// `Size` multiplication
Ok((0..len).map(move |i| {
base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
}))
} }
/// Subslicing /// Subslicing

View file

@ -2,19 +2,20 @@ use std::borrow::Cow;
use either::Either; use either::Either;
use rustc_ast::ast::InlineAsmOptions; use rustc_ast::ast::InlineAsmOptions;
use rustc_middle::mir::ProjectionElem;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::Instance; use rustc_middle::ty::Instance;
use rustc_middle::{ use rustc_middle::{
mir, mir,
ty::{self, Ty}, ty::{self, Ty},
}; };
use rustc_target::abi;
use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode}; use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode};
use rustc_target::abi::{self, FieldIdx};
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
use super::{ use super::{
AllocId, FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, AllocId, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable,
Operand, PlaceTy, Provenance, Scalar, StackPopCleanup, Provenance, Scalar, StackPopCleanup,
}; };
use crate::fluent_generated as fluent; use crate::fluent_generated as fluent;
@ -358,23 +359,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>), Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
>, >,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
callee_arg: &PlaceTy<'tcx, M::Provenance>, callee_arg: &mir::Place<'tcx>,
callee_ty: Ty<'tcx>,
already_live: bool,
) -> InterpResult<'tcx> ) -> InterpResult<'tcx>
where where
'tcx: 'x, 'tcx: 'x,
'tcx: 'y, 'tcx: 'y,
{ {
if matches!(callee_abi.mode, PassMode::Ignore) { if matches!(callee_abi.mode, PassMode::Ignore) {
// This one is skipped. // This one is skipped. Still must be made live though!
if !already_live {
self.storage_live(callee_arg.as_local().unwrap())?;
}
return Ok(()); return Ok(());
} }
// Find next caller arg. // Find next caller arg.
let Some((caller_arg, caller_abi)) = caller_args.next() else { let Some((caller_arg, caller_abi)) = caller_args.next() else {
throw_ub_custom!(fluent::const_eval_not_enough_caller_args); throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
}; };
// Now, check // Check compatibility
if !Self::check_argument_compat(caller_abi, callee_abi) { if !Self::check_argument_compat(caller_abi, callee_abi) {
let callee_ty = format!("{}", callee_arg.layout.ty); let callee_ty = format!("{}", callee_ty);
let caller_ty = format!("{}", caller_arg.layout().ty); let caller_ty = format!("{}", caller_arg.layout().ty);
throw_ub_custom!( throw_ub_custom!(
fluent::const_eval_incompatible_types, fluent::const_eval_incompatible_types,
@ -386,35 +392,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// will later protect the source it comes from. This means the callee cannot observe if we // will later protect the source it comes from. This means the callee cannot observe if we
// did in-place of by-copy argument passing, except for pointer equality tests. // did in-place of by-copy argument passing, except for pointer equality tests.
let caller_arg_copy = self.copy_fn_arg(&caller_arg)?; let caller_arg_copy = self.copy_fn_arg(&caller_arg)?;
// Special handling for unsized parameters. if !already_live {
if caller_arg_copy.layout.is_unsized() { let local = callee_arg.as_local().unwrap();
// `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way. let meta = caller_arg_copy.meta();
assert_eq!(caller_arg_copy.layout.ty, callee_arg.layout.ty); // `check_argument_compat` ensures that if metadata is needed, both have the same type,
// We have to properly pre-allocate the memory for the callee. // so we know they will use the metadata the same way.
// So let's tear down some abstractions. assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty);
// This all has to be in memory, there are no immediate unsized values.
let src = caller_arg_copy.assert_mem_place(); self.storage_live_dyn(local, meta)?;
// The destination cannot be one of these "spread args".
let (dest_frame, dest_local, dest_offset) = callee_arg
.as_mplace_or_local()
.right()
.expect("callee fn arguments must be locals");
// We are just initializing things, so there can't be anything here yet.
assert!(matches!(
*self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
Operand::Immediate(Immediate::Uninit)
));
assert_eq!(dest_offset, None);
// Allocate enough memory to hold `src`.
let dest_place = self.allocate_dyn(src.layout, MemoryKind::Stack, src.meta)?;
// Update the local to be that new place.
*M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
} }
// Now we can finally actually evaluate the callee place.
let callee_arg = self.eval_place(*callee_arg)?;
// We allow some transmutes here. // We allow some transmutes here.
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
// is true for all `copy_op`, but there are a lot of special cases for argument passing // is true for all `copy_op`, but there are a lot of special cases for argument passing
// specifically.) // specifically.)
self.copy_op(&caller_arg_copy, callee_arg, /*allow_transmute*/ true)?; self.copy_op(&caller_arg_copy, &callee_arg, /*allow_transmute*/ true)?;
// If this was an in-place pass, protect the place it comes from for the duration of the call. // If this was an in-place pass, protect the place it comes from for the duration of the call.
if let FnArg::InPlace(place) = caller_arg { if let FnArg::InPlace(place) = caller_arg {
M::protect_in_place_function_argument(self, place)?; M::protect_in_place_function_argument(self, place)?;
@ -600,18 +593,47 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// not advance `caller_iter` for ZSTs. // not advance `caller_iter` for ZSTs.
let mut callee_args_abis = callee_fn_abi.args.iter(); let mut callee_args_abis = callee_fn_abi.args.iter();
for local in body.args_iter() { for local in body.args_iter() {
let dest = self.eval_place(mir::Place::from(local))?; // Construct the destination place for this argument. At this point all
// locals are still dead, so we cannot construct a `PlaceTy`.
let dest = mir::Place::from(local);
// `layout_of_local` does more than just the substitution we need to get the
// type, but the result gets cached so this avoids calling the substitution
// query *again* the next time this local is accessed.
let ty = self.layout_of_local(self.frame(), local, None)?.ty;
if Some(local) == body.spread_arg { if Some(local) == body.spread_arg {
// Make the local live once, then fill in the value field by field.
self.storage_live(local)?;
// Must be a tuple // Must be a tuple
for i in 0..dest.layout.fields.count() { let ty::Tuple(fields) = ty.kind() else {
let dest = self.project_field(&dest, i)?; span_bug!(
self.cur_span(),
"non-tuple type for `spread_arg`: {ty:?}"
)
};
for (i, field_ty) in fields.iter().enumerate() {
let dest = dest.project_deeper(
&[ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)],
*self.tcx,
);
let callee_abi = callee_args_abis.next().unwrap(); let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(&mut caller_args, callee_abi, &dest)?; self.pass_argument(
&mut caller_args,
callee_abi,
&dest,
field_ty,
/* already_live */ true,
)?;
} }
} else { } else {
// Normal argument // Normal argument. Cannot mark it as live yet, it might be unsized!
let callee_abi = callee_args_abis.next().unwrap(); let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(&mut caller_args, callee_abi, &dest)?; self.pass_argument(
&mut caller_args,
callee_abi,
&dest,
ty,
/* already_live */ false,
)?;
} }
} }
// If the callee needs a caller location, pretend we consume one more argument from the ABI. // If the callee needs a caller location, pretend we consume one more argument from the ABI.
@ -644,6 +666,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Nothing to do for locals, they are always properly allocated and aligned. // Nothing to do for locals, they are always properly allocated and aligned.
} }
M::protect_in_place_function_argument(self, destination)?; M::protect_in_place_function_argument(self, destination)?;
// Don't forget to mark "initially live" locals as live.
self.storage_live_for_always_live_locals()?;
}; };
match res { match res {
Err(err) => { Err(err) => {
@ -684,15 +709,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
_ => { _ => {
// Not there yet, search for the only non-ZST field. // Not there yet, search for the only non-ZST field.
// (The rules for `DispatchFromDyn` ensure there's exactly one such field.)
let mut non_zst_field = None; let mut non_zst_field = None;
for i in 0..receiver.layout.fields.count() { for i in 0..receiver.layout.fields.count() {
let field = self.project_field(&receiver, i)?; let field = self.project_field(&receiver, i)?;
let zst = let zst = field.layout.is_1zst();
field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
if !zst { if !zst {
assert!( assert!(
non_zst_field.is_none(), non_zst_field.is_none(),
"multiple non-ZST fields in dyn receiver type {}", "multiple non-1-ZST fields in dyn receiver type {}",
receiver.layout.ty receiver.layout.ty
); );
non_zst_field = Some(field); non_zst_field = Some(field);
@ -700,7 +725,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
receiver = non_zst_field.unwrap_or_else(|| { receiver = non_zst_field.unwrap_or_else(|| {
panic!( panic!(
"no non-ZST fields in dyn receiver type {}", "no non-1-ZST fields in dyn receiver type {}",
receiver.layout.ty receiver.layout.ty
) )
}); });

View file

@ -911,9 +911,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Complain about any other kind of error -- those are bad because we'd like to // Complain about any other kind of error -- those are bad because we'd like to
// report them in a way that shows *where* in the value the issue lies. // report them in a way that shows *where* in the value the issue lies.
Err(err) => { Err(err) => {
let (err, backtrace) = err.into_parts(); bug!(
backtrace.print_backtrace(); "Unexpected Undefined Behavior error during validation: {}",
bug!("Unexpected Undefined Behavior error during validation: {err:?}"); self.format_error(err)
);
} }
} }
} }

View file

@ -170,8 +170,9 @@ pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
} }
} }
FieldsShape::Array { .. } => { FieldsShape::Array { .. } => {
for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() { let mut iter = self.ecx().project_array_fields(v)?;
self.visit_field(v, idx, &field?)?; while let Some((idx, field)) = iter.next(self.ecx())? {
self.visit_field(v, idx.try_into().unwrap(), &field)?;
} }
} }
} }

View file

@ -92,7 +92,6 @@ cfg_if!(
[std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend] [std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
[Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend] [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
[Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend] [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
[crate::sync::Lock<T> where T: DynSend]
[crate::sync::RwLock<T> where T: DynSend] [crate::sync::RwLock<T> where T: DynSend]
[crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool] [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
[rustc_arena::TypedArena<T> where T: DynSend] [rustc_arena::TypedArena<T> where T: DynSend]
@ -171,7 +170,6 @@ cfg_if!(
[std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync] [std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
[Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync] [Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
[Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync] [Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
[crate::sync::Lock<T> where T: DynSend]
[crate::sync::RwLock<T> where T: DynSend + DynSync] [crate::sync::RwLock<T> where T: DynSend + DynSync]
[crate::sync::OneThread<T> where T] [crate::sync::OneThread<T> where T]
[crate::sync::WorkerLocal<T> where T: DynSend] [crate::sync::WorkerLocal<T> where T: DynSend]

View file

@ -2,9 +2,12 @@ use crate::fx::{FxHashMap, FxHasher};
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
use crate::sync::{is_dyn_thread_safe, CacheAligned}; use crate::sync::{is_dyn_thread_safe, CacheAligned};
use crate::sync::{Lock, LockGuard}; use crate::sync::{Lock, LockGuard};
#[cfg(parallel_compiler)]
use itertools::Either;
use std::borrow::Borrow; use std::borrow::Borrow;
use std::collections::hash_map::RawEntryMut; use std::collections::hash_map::RawEntryMut;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::iter;
use std::mem; use std::mem;
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
@ -70,19 +73,27 @@ impl<T> Sharded<T> {
} }
} }
pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> { #[inline]
pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
match self { match self {
Self::Single(single) => vec![single.lock()], #[cfg(not(parallel_compiler))]
Self::Single(single) => iter::once(single.lock()),
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
Self::Shards(shards) => shards.iter().map(|shard| shard.0.lock()).collect(), Self::Single(single) => Either::Left(iter::once(single.lock())),
#[cfg(parallel_compiler)]
Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())),
} }
} }
pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> { #[inline]
pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> {
match self { match self {
Self::Single(single) => Some(vec![single.try_lock()?]), #[cfg(not(parallel_compiler))]
Self::Single(single) => iter::once(single.try_lock()),
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
Self::Shards(shards) => shards.iter().map(|shard| shard.0.try_lock()).collect(), Self::Single(single) => Either::Left(iter::once(single.try_lock())),
#[cfg(parallel_compiler)]
Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())),
} }
} }
} }
@ -101,7 +112,7 @@ pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
impl<K: Eq, V> ShardedHashMap<K, V> { impl<K: Eq, V> ShardedHashMap<K, V> {
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
self.lock_shards().iter().map(|shard| shard.len()).sum() self.lock_shards().map(|shard| shard.len()).sum()
} }
} }

View file

@ -26,7 +26,8 @@
//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` | //! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` |
//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` | //! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` |
//! | | | | //! | | | |
//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` | //! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or |
//! | | | `parking_lot::Mutex<T>` |
//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
//! | `MTLock<T>` [^1] | `T` | `Lock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` | //! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
@ -40,11 +41,16 @@
//! [^2] `MTLockRef` is a typedef. //! [^2] `MTLockRef` is a typedef.
pub use crate::marker::*; pub use crate::marker::*;
use parking_lot::Mutex;
use std::any::Any;
use std::collections::HashMap; use std::collections::HashMap;
use std::hash::{BuildHasher, Hash}; use std::hash::{BuildHasher, Hash};
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
mod lock;
pub use lock::{Lock, LockGuard};
mod worker_local; mod worker_local;
pub use worker_local::{Registry, WorkerLocal}; pub use worker_local::{Registry, WorkerLocal};
@ -75,6 +81,13 @@ mod mode {
} }
} }
// Whether thread safety might be enabled.
#[inline]
#[cfg(parallel_compiler)]
pub fn might_be_dyn_thread_safe() -> bool {
DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
}
// Only set by the `-Z threads` compile option // Only set by the `-Z threads` compile option
pub fn set_dyn_thread_safe_mode(mode: bool) { pub fn set_dyn_thread_safe_mode(mode: bool) {
let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE }; let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
@ -92,16 +105,48 @@ mod mode {
pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
/// A guard used to hold panics that occur during a parallel section to later by unwound.
/// This is used for the parallel compiler to prevent fatal errors from non-deterministically
/// hiding errors by ensuring that everything in the section has completed executing before
/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
/// output match the parallel compiler for testing purposes.
pub struct ParallelGuard {
panic: Mutex<Option<Box<dyn Any + std::marker::Send + 'static>>>,
}
impl ParallelGuard {
pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
catch_unwind(AssertUnwindSafe(f))
.map_err(|err| {
*self.panic.lock() = Some(err);
})
.ok()
}
}
/// This gives access to a fresh parallel guard in the closure and will unwind any panics
/// caught in it after the closure returns.
#[inline]
pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
let guard = ParallelGuard { panic: Mutex::new(None) };
let ret = f(&guard);
if let Some(panic) = guard.panic.into_inner() {
resume_unwind(panic);
}
ret
}
cfg_if! { cfg_if! {
if #[cfg(not(parallel_compiler))] { if #[cfg(not(parallel_compiler))] {
use std::ops::Add;
use std::cell::Cell;
pub unsafe auto trait Send {} pub unsafe auto trait Send {}
pub unsafe auto trait Sync {} pub unsafe auto trait Sync {}
unsafe impl<T> Send for T {} unsafe impl<T> Send for T {}
unsafe impl<T> Sync for T {} unsafe impl<T> Sync for T {}
use std::ops::Add;
/// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc. /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
/// It has explicit ordering arguments and is only intended for use with /// It has explicit ordering arguments and is only intended for use with
/// the native atomic types. /// the native atomic types.
@ -186,67 +231,38 @@ cfg_if! {
where A: FnOnce() -> RA, where A: FnOnce() -> RA,
B: FnOnce() -> RB B: FnOnce() -> RB
{ {
(oper_a(), oper_b()) let (a, b) = parallel_guard(|guard| {
let a = guard.run(oper_a);
let b = guard.run(oper_b);
(a, b)
});
(a.unwrap(), b.unwrap())
} }
#[macro_export] #[macro_export]
macro_rules! parallel { macro_rules! parallel {
($($blocks:block),*) => { ($($blocks:block),*) => {{
// We catch panics here ensuring that all the blocks execute. $crate::sync::parallel_guard(|guard| {
// This makes behavior consistent with the parallel compiler. $(guard.run(|| $blocks);)*
let mut panic = None; });
$( }}
if let Err(p) = ::std::panic::catch_unwind(
::std::panic::AssertUnwindSafe(|| $blocks)
) {
if panic.is_none() {
panic = Some(p);
}
}
)*
if let Some(panic) = panic {
::std::panic::resume_unwind(panic);
}
}
} }
pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) { pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
// We catch panics here ensuring that all the loop iterations execute. parallel_guard(|guard| {
// This makes behavior consistent with the parallel compiler. t.into_iter().for_each(|i| {
let mut panic = None; guard.run(|| for_each(i));
t.into_iter().for_each(|i| { });
if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { })
if panic.is_none() {
panic = Some(p);
}
}
});
if let Some(panic) = panic {
resume_unwind(panic);
}
} }
pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>( pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
t: T, t: T,
mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R, mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
) -> C { ) -> C {
// We catch panics here ensuring that all the loop iterations execute. parallel_guard(|guard| {
let mut panic = None; t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
let r = t.into_iter().filter_map(|i| { })
match catch_unwind(AssertUnwindSafe(|| map(i))) {
Ok(r) => Some(r),
Err(p) => {
if panic.is_none() {
panic = Some(p);
}
None
}
}
}).collect();
if let Some(panic) = panic {
resume_unwind(panic);
}
r
} }
pub use std::rc::Rc as Lrc; pub use std::rc::Rc as Lrc;
@ -255,15 +271,11 @@ cfg_if! {
pub use std::cell::Ref as MappedReadGuard; pub use std::cell::Ref as MappedReadGuard;
pub use std::cell::RefMut as WriteGuard; pub use std::cell::RefMut as WriteGuard;
pub use std::cell::RefMut as MappedWriteGuard; pub use std::cell::RefMut as MappedWriteGuard;
pub use std::cell::RefMut as LockGuard;
pub use std::cell::RefMut as MappedLockGuard; pub use std::cell::RefMut as MappedLockGuard;
pub use std::cell::OnceCell; pub use std::cell::OnceCell;
use std::cell::RefCell as InnerRwLock; use std::cell::RefCell as InnerRwLock;
use std::cell::RefCell as InnerLock;
use std::cell::Cell;
pub type MTLockRef<'a, T> = &'a mut MTLock<T>; pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
@ -313,7 +325,6 @@ cfg_if! {
pub use parking_lot::RwLockWriteGuard as WriteGuard; pub use parking_lot::RwLockWriteGuard as WriteGuard;
pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard; pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
pub use parking_lot::MutexGuard as LockGuard;
pub use parking_lot::MappedMutexGuard as MappedLockGuard; pub use parking_lot::MappedMutexGuard as MappedLockGuard;
pub use std::sync::OnceLock as OnceCell; pub use std::sync::OnceLock as OnceCell;
@ -355,7 +366,6 @@ cfg_if! {
} }
} }
use parking_lot::Mutex as InnerLock;
use parking_lot::RwLock as InnerRwLock; use parking_lot::RwLock as InnerRwLock;
use std::thread; use std::thread;
@ -372,7 +382,12 @@ cfg_if! {
let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()())); let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
(a.into_inner(), b.into_inner()) (a.into_inner(), b.into_inner())
} else { } else {
(oper_a(), oper_b()) let (a, b) = parallel_guard(|guard| {
let a = guard.run(oper_a);
let b = guard.run(oper_b);
(a, b)
});
(a.unwrap(), b.unwrap())
} }
} }
@ -407,28 +422,10 @@ cfg_if! {
// of a single threaded rustc. // of a single threaded rustc.
parallel!(impl $fblock [] [$($blocks),*]); parallel!(impl $fblock [] [$($blocks),*]);
} else { } else {
// We catch panics here ensuring that all the blocks execute. $crate::sync::parallel_guard(|guard| {
// This makes behavior consistent with the parallel compiler. guard.run(|| $fblock);
let mut panic = None; $(guard.run(|| $blocks);)*
if let Err(p) = ::std::panic::catch_unwind( });
::std::panic::AssertUnwindSafe(|| $fblock)
) {
if panic.is_none() {
panic = Some(p);
}
}
$(
if let Err(p) = ::std::panic::catch_unwind(
::std::panic::AssertUnwindSafe(|| $blocks)
) {
if panic.is_none() {
panic = Some(p);
}
}
)*
if let Some(panic) = panic {
::std::panic::resume_unwind(panic);
}
} }
}; };
} }
@ -439,34 +436,18 @@ cfg_if! {
t: T, t: T,
for_each: impl Fn(I) + DynSync + DynSend for_each: impl Fn(I) + DynSync + DynSend
) { ) {
if mode::is_dyn_thread_safe() { parallel_guard(|guard| {
let for_each = FromDyn::from(for_each); if mode::is_dyn_thread_safe() {
let panic: Lock<Option<_>> = Lock::new(None); let for_each = FromDyn::from(for_each);
t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { t.into_par_iter().for_each(|i| {
let mut l = panic.lock(); guard.run(|| for_each(i));
if l.is_none() { });
*l = Some(p) } else {
} t.into_iter().for_each(|i| {
}); guard.run(|| for_each(i));
});
if let Some(panic) = panic.into_inner() {
resume_unwind(panic);
} }
} else { });
// We catch panics here ensuring that all the loop iterations execute.
// This makes behavior consistent with the parallel compiler.
let mut panic = None;
t.into_iter().for_each(|i| {
if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
if panic.is_none() {
panic = Some(p);
}
}
});
if let Some(panic) = panic {
resume_unwind(panic);
}
}
} }
pub fn par_map< pub fn par_map<
@ -478,46 +459,14 @@ cfg_if! {
t: T, t: T,
map: impl Fn(I) -> R + DynSync + DynSend map: impl Fn(I) -> R + DynSync + DynSend
) -> C { ) -> C {
if mode::is_dyn_thread_safe() { parallel_guard(|guard| {
let panic: Lock<Option<_>> = Lock::new(None); if mode::is_dyn_thread_safe() {
let map = FromDyn::from(map); let map = FromDyn::from(map);
// We catch panics here ensuring that all the loop iterations execute. t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
let r = t.into_par_iter().filter_map(|i| { } else {
match catch_unwind(AssertUnwindSafe(|| map(i))) { t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
Ok(r) => Some(r),
Err(p) => {
let mut l = panic.lock();
if l.is_none() {
*l = Some(p);
}
None
},
}
}).collect();
if let Some(panic) = panic.into_inner() {
resume_unwind(panic);
} }
r })
} else {
// We catch panics here ensuring that all the loop iterations execute.
let mut panic = None;
let r = t.into_iter().filter_map(|i| {
match catch_unwind(AssertUnwindSafe(|| map(i))) {
Ok(r) => Some(r),
Err(p) => {
if panic.is_none() {
panic = Some(p);
}
None
}
}
}).collect();
if let Some(panic) = panic {
resume_unwind(panic);
}
r
}
} }
/// This makes locks panic if they are already held. /// This makes locks panic if they are already held.
@ -542,81 +491,6 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S>
} }
} }
#[derive(Debug)]
pub struct Lock<T>(InnerLock<T>);
impl<T> Lock<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
Lock(InnerLock::new(inner))
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
#[cfg(parallel_compiler)]
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
self.0.try_lock()
}
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
self.0.try_borrow_mut().ok()
}
#[cfg(parallel_compiler)]
#[inline(always)]
#[track_caller]
pub fn lock(&self) -> LockGuard<'_, T> {
if ERROR_CHECKING {
self.0.try_lock().expect("lock was already held")
} else {
self.0.lock()
}
}
#[cfg(not(parallel_compiler))]
#[inline(always)]
#[track_caller]
pub fn lock(&self) -> LockGuard<'_, T> {
self.0.borrow_mut()
}
#[inline(always)]
#[track_caller]
pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
f(&mut *self.lock())
}
#[inline(always)]
#[track_caller]
pub fn borrow(&self) -> LockGuard<'_, T> {
self.lock()
}
#[inline(always)]
#[track_caller]
pub fn borrow_mut(&self) -> LockGuard<'_, T> {
self.lock()
}
}
impl<T: Default> Default for Lock<T> {
#[inline]
fn default() -> Self {
Lock::new(T::default())
}
}
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct RwLock<T>(InnerRwLock<T>); pub struct RwLock<T>(InnerRwLock<T>);

View file

@ -0,0 +1,276 @@
//! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
//! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits.
//!
//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`.
#[cfg(not(parallel_compiler))]
use std::cell::RefCell;
#[cfg(parallel_compiler)]
use {
crate::cold_path,
crate::sync::DynSend,
crate::sync::DynSync,
parking_lot::lock_api::RawMutex,
std::cell::Cell,
std::cell::UnsafeCell,
std::fmt,
std::intrinsics::{likely, unlikely},
std::marker::PhantomData,
std::mem::ManuallyDrop,
std::ops::{Deref, DerefMut},
};
#[cfg(not(parallel_compiler))]
pub use std::cell::RefMut as LockGuard;
#[cfg(not(parallel_compiler))]
#[derive(Debug)]
pub struct Lock<T>(RefCell<T>);
#[cfg(not(parallel_compiler))]
impl<T> Lock<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
Lock(RefCell::new(inner))
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
self.0.try_borrow_mut().ok()
}
#[inline(always)]
#[track_caller]
pub fn lock(&self) -> LockGuard<'_, T> {
self.0.borrow_mut()
}
}
/// A guard holding mutable access to a `Lock` which is in a locked state.
#[cfg(parallel_compiler)]
#[must_use = "if unused the Lock will immediately unlock"]
pub struct LockGuard<'a, T> {
lock: &'a Lock<T>,
marker: PhantomData<&'a mut T>,
}
#[cfg(parallel_compiler)]
impl<'a, T: 'a> Deref for LockGuard<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
// SAFETY: We have shared access to the mutable access owned by this type,
// so we can give out a shared reference.
unsafe { &*self.lock.data.get() }
}
}
#[cfg(parallel_compiler)]
impl<'a, T: 'a> DerefMut for LockGuard<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
// SAFETY: We have mutable access to the data so we can give out a mutable reference.
unsafe { &mut *self.lock.data.get() }
}
}
#[cfg(parallel_compiler)]
impl<'a, T: 'a> Drop for LockGuard<'a, T> {
#[inline]
fn drop(&mut self) {
// SAFETY: We know that the lock is in a locked
// state because it is a invariant of this type.
unsafe { self.lock.raw.unlock() };
}
}
#[cfg(parallel_compiler)]
union LockRawUnion {
/// Indicates if the cell is locked. Only used if `LockRaw.sync` is false.
cell: ManuallyDrop<Cell<bool>>,
/// A lock implementation that's only used if `LockRaw.sync` is true.
lock: ManuallyDrop<parking_lot::RawMutex>,
}
/// A raw lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
/// It contains no associated data and is used in the implementation of `Lock` which does have such data.
///
/// A manual implementation of a tagged union is used with the `sync` field and the `LockRawUnion` instead
/// of using enums as it results in better code generation.
#[cfg(parallel_compiler)]
struct LockRaw {
/// Indicates if synchronization is used via `opt.lock` if true,
/// or if a non-thread safe cell is used via `opt.cell`. This is set on initialization and never changed.
sync: bool,
opt: LockRawUnion,
}
#[cfg(parallel_compiler)]
impl LockRaw {
fn new() -> Self {
if unlikely(super::mode::might_be_dyn_thread_safe()) {
// Create the lock with synchronization enabled using the `RawMutex` type.
LockRaw {
sync: true,
opt: LockRawUnion { lock: ManuallyDrop::new(parking_lot::RawMutex::INIT) },
}
} else {
// Create the lock with synchronization disabled.
LockRaw { sync: false, opt: LockRawUnion { cell: ManuallyDrop::new(Cell::new(false)) } }
}
}
#[inline(always)]
fn try_lock(&self) -> bool {
// SAFETY: This is safe since the union fields are used in accordance with `self.sync`.
unsafe {
if likely(!self.sync) {
if self.opt.cell.get() {
false
} else {
self.opt.cell.set(true);
true
}
} else {
self.opt.lock.try_lock()
}
}
}
#[inline(always)]
fn lock(&self) {
if super::ERROR_CHECKING {
// We're in the debugging mode, so assert that the lock is not held so we
// get a panic instead of waiting for the lock.
assert_eq!(self.try_lock(), true, "lock must not be hold");
} else {
// SAFETY: This is safe since the union fields are used in accordance with `self.sync`.
unsafe {
if likely(!self.sync) {
if unlikely(self.opt.cell.replace(true)) {
cold_path(|| panic!("lock was already held"))
}
} else {
self.opt.lock.lock();
}
}
}
}
/// This unlocks the lock.
///
/// Safety
/// This method may only be called if the lock is currently held.
#[inline(always)]
unsafe fn unlock(&self) {
// SAFETY: The union use is safe since the union fields are used in accordance with
// `self.sync` and the `unlock` method precondition is upheld by the caller.
unsafe {
if likely(!self.sync) {
debug_assert_eq!(self.opt.cell.get(), true);
self.opt.cell.set(false);
} else {
self.opt.lock.unlock();
}
}
}
}
/// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
/// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
#[cfg(parallel_compiler)]
pub struct Lock<T> {
raw: LockRaw,
data: UnsafeCell<T>,
}
#[cfg(parallel_compiler)]
impl<T> Lock<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
Lock { raw: LockRaw::new(), data: UnsafeCell::new(inner) }
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.data.into_inner()
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.data.get_mut()
}
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
if self.raw.try_lock() { Some(LockGuard { lock: self, marker: PhantomData }) } else { None }
}
#[inline(always)]
pub fn lock(&self) -> LockGuard<'_, T> {
self.raw.lock();
LockGuard { lock: self, marker: PhantomData }
}
}
impl<T> Lock<T> {
#[inline(always)]
#[track_caller]
pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
f(&mut *self.lock())
}
#[inline(always)]
#[track_caller]
pub fn borrow(&self) -> LockGuard<'_, T> {
self.lock()
}
#[inline(always)]
#[track_caller]
pub fn borrow_mut(&self) -> LockGuard<'_, T> {
self.lock()
}
}
#[cfg(parallel_compiler)]
unsafe impl<T: DynSend> DynSend for Lock<T> {}
#[cfg(parallel_compiler)]
unsafe impl<T: DynSend> DynSync for Lock<T> {}
#[cfg(parallel_compiler)]
impl<T: fmt::Debug> fmt::Debug for Lock<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.try_lock() {
Some(guard) => f.debug_struct("Lock").field("data", &&*guard).finish(),
None => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<locked>")
}
}
f.debug_struct("Lock").field("data", &LockedPlaceholder).finish()
}
}
}
}
impl<T: Default> Default for Lock<T> {
#[inline]
fn default() -> Self {
Lock::new(T::default())
}
}

View file

@ -1,4 +1,4 @@
use crate::sync::Lock; use parking_lot::Mutex;
use std::cell::Cell; use std::cell::Cell;
use std::cell::OnceCell; use std::cell::OnceCell;
use std::ops::Deref; use std::ops::Deref;
@ -35,7 +35,7 @@ impl RegistryId {
struct RegistryData { struct RegistryData {
thread_limit: usize, thread_limit: usize,
threads: Lock<usize>, threads: Mutex<usize>,
} }
/// Represents a list of threads which can access worker locals. /// Represents a list of threads which can access worker locals.
@ -65,7 +65,7 @@ thread_local! {
impl Registry { impl Registry {
/// Creates a registry which can hold up to `thread_limit` threads. /// Creates a registry which can hold up to `thread_limit` threads.
pub fn new(thread_limit: usize) -> Self { pub fn new(thread_limit: usize) -> Self {
Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) })) Registry(Arc::new(RegistryData { thread_limit, threads: Mutex::new(0) }))
} }
/// Gets the registry associated with the current thread. Panics if there's no such registry. /// Gets the registry associated with the current thread. Panics if there's no such registry.

View file

@ -85,6 +85,15 @@ pub mod pretty;
#[macro_use] #[macro_use]
mod print; mod print;
mod session_diagnostics; mod session_diagnostics;
#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))]
mod signal_handler;
#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))]
mod signal_handler {
/// On platforms which don't support our signal handler's requirements,
/// simply use the default signal handler provided by std.
pub(super) fn install() {}
}
use crate::session_diagnostics::{ use crate::session_diagnostics::{
RLinkEmptyVersionNumber, RLinkEncodingVersionMismatch, RLinkRustcVersionMismatch, RLinkEmptyVersionNumber, RLinkEncodingVersionMismatch, RLinkRustcVersionMismatch,
@ -852,11 +861,9 @@ fn print_crate_info(
use rustc_target::spec::current_apple_deployment_target; use rustc_target::spec::current_apple_deployment_target;
if sess.target.is_like_osx { if sess.target.is_like_osx {
println_info!( let (major, minor) = current_apple_deployment_target(&sess.target)
"deployment_target={}", .expect("unknown Apple target OS");
current_apple_deployment_target(&sess.target) println_info!("deployment_target={}", format!("{major}.{minor}"))
.expect("unknown Apple target OS")
)
} else { } else {
handler handler
.early_error("only Apple targets currently support deployment version info") .early_error("only Apple targets currently support deployment version info")
@ -1442,72 +1449,6 @@ pub fn init_env_logger(handler: &EarlyErrorHandler, env: &str) {
} }
} }
#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))]
mod signal_handler {
extern "C" {
fn backtrace_symbols_fd(
buffer: *const *mut libc::c_void,
size: libc::c_int,
fd: libc::c_int,
);
}
extern "C" fn print_stack_trace(_: libc::c_int) {
const MAX_FRAMES: usize = 256;
static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] =
[std::ptr::null_mut(); MAX_FRAMES];
unsafe {
let depth = libc::backtrace(STACK_TRACE.as_mut_ptr(), MAX_FRAMES as i32);
if depth == 0 {
return;
}
backtrace_symbols_fd(STACK_TRACE.as_ptr(), depth, 2);
}
}
/// When an error signal (such as SIGABRT or SIGSEGV) is delivered to the
/// process, print a stack trace and then exit.
pub(super) fn install() {
use std::alloc::{alloc, Layout};
unsafe {
let alt_stack_size: usize = min_sigstack_size() + 64 * 1024;
let mut alt_stack: libc::stack_t = std::mem::zeroed();
alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
alt_stack.ss_size = alt_stack_size;
libc::sigaltstack(&alt_stack, std::ptr::null_mut());
let mut sa: libc::sigaction = std::mem::zeroed();
sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
libc::sigemptyset(&mut sa.sa_mask);
libc::sigaction(libc::SIGSEGV, &sa, std::ptr::null_mut());
}
}
/// Modern kernels on modern hardware can have dynamic signal stack sizes.
#[cfg(any(target_os = "linux", target_os = "android"))]
fn min_sigstack_size() -> usize {
const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51;
let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) };
// If getauxval couldn't find the entry, it returns 0,
// so take the higher of the "constant" and auxval.
// This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
libc::MINSIGSTKSZ.max(dynamic_sigstksz as _)
}
/// Not all OS support hardware where this is needed.
#[cfg(not(any(target_os = "linux", target_os = "android")))]
fn min_sigstack_size() -> usize {
libc::MINSIGSTKSZ
}
}
#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))]
mod signal_handler {
pub(super) fn install() {}
}
pub fn main() -> ! { pub fn main() -> ! {
let start_time = Instant::now(); let start_time = Instant::now();
let start_rss = get_resident_set_size(); let start_rss = get_resident_set_size();

View file

@ -0,0 +1,142 @@
//! Signal handler for rustc
//! Primarily used to extract a backtrace from stack overflow
use std::alloc::{alloc, Layout};
use std::{fmt, mem, ptr};
extern "C" {
fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int);
}
fn backtrace_stderr(buffer: &[*mut libc::c_void]) {
let size = buffer.len().try_into().unwrap_or_default();
unsafe { backtrace_symbols_fd(buffer.as_ptr(), size, libc::STDERR_FILENO) };
}
/// Unbuffered, unsynchronized writer to stderr.
///
/// Only acceptable because everything will end soon anyways.
struct RawStderr(());
impl fmt::Write for RawStderr {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
let ret = unsafe { libc::write(libc::STDERR_FILENO, s.as_ptr().cast(), s.len()) };
if ret == -1 { Err(fmt::Error) } else { Ok(()) }
}
}
/// We don't really care how many bytes we actually get out. SIGSEGV comes for our head.
/// Splash stderr with letters of our own blood to warn our friends about the monster.
macro raw_errln($tokens:tt) {
let _ = ::core::fmt::Write::write_fmt(&mut RawStderr(()), format_args!($tokens));
let _ = ::core::fmt::Write::write_char(&mut RawStderr(()), '\n');
}
/// Signal handler installed for SIGSEGV
extern "C" fn print_stack_trace(_: libc::c_int) {
const MAX_FRAMES: usize = 256;
// Reserve data segment so we don't have to malloc in a signal handler, which might fail
// in incredibly undesirable and unexpected ways due to e.g. the allocator deadlocking
static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] = [ptr::null_mut(); MAX_FRAMES];
let stack = unsafe {
// Collect return addresses
let depth = libc::backtrace(STACK_TRACE.as_mut_ptr(), MAX_FRAMES as i32);
if depth == 0 {
return;
}
&STACK_TRACE.as_slice()[0..(depth as _)]
};
// Just a stack trace is cryptic. Explain what we're doing.
raw_errln!("error: rustc interrupted by SIGSEGV, printing backtrace\n");
let mut written = 1;
let mut consumed = 0;
// Begin elaborating return addrs into symbols and writing them directly to stderr
// Most backtraces are stack overflow, most stack overflows are from recursion
// Check for cycles before writing 250 lines of the same ~5 symbols
let cycled = |(runner, walker)| runner == walker;
let mut cyclic = false;
if let Some(period) = stack.iter().skip(1).step_by(2).zip(stack).position(cycled) {
let period = period.saturating_add(1); // avoid "what if wrapped?" branches
let Some(offset) = stack.iter().skip(period).zip(stack).position(cycled) else {
// impossible.
return;
};
// Count matching trace slices, else we could miscount "biphasic cycles"
// with the same period + loop entry but a different inner loop
let next_cycle = stack[offset..].chunks_exact(period).skip(1);
let cycles = 1 + next_cycle
.zip(stack[offset..].chunks_exact(period))
.filter(|(next, prev)| next == prev)
.count();
backtrace_stderr(&stack[..offset]);
written += offset;
consumed += offset;
if cycles > 1 {
raw_errln!("\n### cycle encountered after {offset} frames with period {period}");
backtrace_stderr(&stack[consumed..consumed + period]);
raw_errln!("### recursed {cycles} times\n");
written += period + 4;
consumed += period * cycles;
cyclic = true;
};
}
let rem = &stack[consumed..];
backtrace_stderr(rem);
raw_errln!("");
written += rem.len() + 1;
let random_depth = || 8 * 16; // chosen by random diceroll (2d20)
if cyclic || stack.len() > random_depth() {
// technically speculation, but assert it with confidence anyway.
// rustc only arrived in this signal handler because bad things happened
// and this message is for explaining it's not the programmer's fault
raw_errln!("note: rustc unexpectedly overflowed its stack! this is a bug");
written += 1;
}
if stack.len() == MAX_FRAMES {
raw_errln!("note: maximum backtrace depth reached, frames may have been lost");
written += 1;
}
raw_errln!("note: we would appreciate a report at https://github.com/rust-lang/rust");
written += 1;
if written > 24 {
// We probably just scrolled the earlier "we got SIGSEGV" message off the terminal
raw_errln!("note: backtrace dumped due to SIGSEGV! resuming signal");
};
}
/// When SIGSEGV is delivered to the process, print a stack trace and then exit.
pub(super) fn install() {
unsafe {
let alt_stack_size: usize = min_sigstack_size() + 64 * 1024;
let mut alt_stack: libc::stack_t = mem::zeroed();
alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
alt_stack.ss_size = alt_stack_size;
libc::sigaltstack(&alt_stack, ptr::null_mut());
let mut sa: libc::sigaction = mem::zeroed();
sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
libc::sigemptyset(&mut sa.sa_mask);
libc::sigaction(libc::SIGSEGV, &sa, ptr::null_mut());
}
}
/// Modern kernels on modern hardware can have dynamic signal stack sizes.
#[cfg(any(target_os = "linux", target_os = "android"))]
fn min_sigstack_size() -> usize {
const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51;
let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) };
// If getauxval couldn't find the entry, it returns 0,
// so take the higher of the "constant" and auxval.
// This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
libc::MINSIGSTKSZ.max(dynamic_sigstksz as _)
}
/// Not all OS support hardware where this is needed.
#[cfg(not(any(target_os = "linux", target_os = "android")))]
fn min_sigstack_size() -> usize {
libc::MINSIGSTKSZ
}

View file

@ -24,7 +24,7 @@ use rustc_lint_defs::pluralize;
use derive_setters::Setters; use derive_setters::Setters;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sync::Lrc; use rustc_data_structures::sync::{DynSend, IntoDynSyncSend, Lrc};
use rustc_error_messages::{FluentArgs, SpanLabel}; use rustc_error_messages::{FluentArgs, SpanLabel};
use rustc_span::hygiene::{ExpnKind, MacroKind}; use rustc_span::hygiene::{ExpnKind, MacroKind};
use std::borrow::Cow; use std::borrow::Cow;
@ -188,6 +188,8 @@ impl Margin {
const ANONYMIZED_LINE_NUM: &str = "LL"; const ANONYMIZED_LINE_NUM: &str = "LL";
pub type DynEmitter = dyn Emitter + DynSend;
/// Emitter trait for emitting errors. /// Emitter trait for emitting errors.
pub trait Emitter: Translate { pub trait Emitter: Translate {
/// Emit a structured diagnostic. /// Emit a structured diagnostic.
@ -625,7 +627,7 @@ impl ColorConfig {
#[derive(Setters)] #[derive(Setters)]
pub struct EmitterWriter { pub struct EmitterWriter {
#[setters(skip)] #[setters(skip)]
dst: Destination, dst: IntoDynSyncSend<Destination>,
sm: Option<Lrc<SourceMap>>, sm: Option<Lrc<SourceMap>>,
fluent_bundle: Option<Lrc<FluentBundle>>, fluent_bundle: Option<Lrc<FluentBundle>>,
#[setters(skip)] #[setters(skip)]
@ -655,7 +657,7 @@ impl EmitterWriter {
fn create(dst: Destination, fallback_bundle: LazyFallbackBundle) -> EmitterWriter { fn create(dst: Destination, fallback_bundle: LazyFallbackBundle) -> EmitterWriter {
EmitterWriter { EmitterWriter {
dst, dst: IntoDynSyncSend(dst),
sm: None, sm: None,
fluent_bundle: None, fluent_bundle: None,
fallback_bundle, fallback_bundle,

View file

@ -22,7 +22,7 @@ use crate::{
}; };
use rustc_lint_defs::Applicability; use rustc_lint_defs::Applicability;
use rustc_data_structures::sync::Lrc; use rustc_data_structures::sync::{IntoDynSyncSend, Lrc};
use rustc_error_messages::FluentArgs; use rustc_error_messages::FluentArgs;
use rustc_span::hygiene::ExpnData; use rustc_span::hygiene::ExpnData;
use rustc_span::Span; use rustc_span::Span;
@ -38,7 +38,7 @@ use serde::Serialize;
mod tests; mod tests;
pub struct JsonEmitter { pub struct JsonEmitter {
dst: Box<dyn Write + Send>, dst: IntoDynSyncSend<Box<dyn Write + Send>>,
registry: Option<Registry>, registry: Option<Registry>,
sm: Lrc<SourceMap>, sm: Lrc<SourceMap>,
fluent_bundle: Option<Lrc<FluentBundle>>, fluent_bundle: Option<Lrc<FluentBundle>>,
@ -66,7 +66,7 @@ impl JsonEmitter {
terminal_url: TerminalUrl, terminal_url: TerminalUrl,
) -> JsonEmitter { ) -> JsonEmitter {
JsonEmitter { JsonEmitter {
dst: Box::new(io::BufWriter::new(io::stderr())), dst: IntoDynSyncSend(Box::new(io::BufWriter::new(io::stderr()))),
registry, registry,
sm: source_map, sm: source_map,
fluent_bundle, fluent_bundle,
@ -120,7 +120,7 @@ impl JsonEmitter {
terminal_url: TerminalUrl, terminal_url: TerminalUrl,
) -> JsonEmitter { ) -> JsonEmitter {
JsonEmitter { JsonEmitter {
dst, dst: IntoDynSyncSend(dst),
registry, registry,
sm: source_map, sm: source_map,
fluent_bundle, fluent_bundle,

View file

@ -30,11 +30,11 @@ pub use emitter::ColorConfig;
use rustc_lint_defs::LintExpectationId; use rustc_lint_defs::LintExpectationId;
use Level::*; use Level::*;
use emitter::{is_case_difference, Emitter, EmitterWriter}; use emitter::{is_case_difference, DynEmitter, Emitter, EmitterWriter};
use registry::Registry; use registry::Registry;
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::stable_hasher::{Hash128, StableHasher}; use rustc_data_structures::stable_hasher::{Hash128, StableHasher};
use rustc_data_structures::sync::{self, IntoDynSyncSend, Lock, Lrc}; use rustc_data_structures::sync::{Lock, Lrc};
use rustc_data_structures::AtomicRef; use rustc_data_structures::AtomicRef;
pub use rustc_error_messages::{ pub use rustc_error_messages::{
fallback_fluent_bundle, fluent_bundle, DelayDm, DiagnosticMessage, FluentBundle, fallback_fluent_bundle, fluent_bundle, DelayDm, DiagnosticMessage, FluentBundle,
@ -55,7 +55,7 @@ use std::num::NonZeroUsize;
use std::panic; use std::panic;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use termcolor::{Color, ColorSpec}; pub use termcolor::{Color, ColorSpec, WriteColor};
pub mod annotate_snippet_emitter_writer; pub mod annotate_snippet_emitter_writer;
mod diagnostic; mod diagnostic;
@ -428,7 +428,7 @@ struct HandlerInner {
err_count: usize, err_count: usize,
warn_count: usize, warn_count: usize,
deduplicated_err_count: usize, deduplicated_err_count: usize,
emitter: IntoDynSyncSend<Box<dyn Emitter + sync::Send>>, emitter: Box<DynEmitter>,
delayed_span_bugs: Vec<DelayedDiagnostic>, delayed_span_bugs: Vec<DelayedDiagnostic>,
delayed_good_path_bugs: Vec<DelayedDiagnostic>, delayed_good_path_bugs: Vec<DelayedDiagnostic>,
/// This flag indicates that an expected diagnostic was emitted and suppressed. /// This flag indicates that an expected diagnostic was emitted and suppressed.
@ -594,7 +594,7 @@ impl Handler {
self self
} }
pub fn with_emitter(emitter: Box<dyn Emitter + sync::Send>) -> Self { pub fn with_emitter(emitter: Box<DynEmitter>) -> Self {
Self { Self {
inner: Lock::new(HandlerInner { inner: Lock::new(HandlerInner {
flags: HandlerFlags { can_emit_warnings: true, ..Default::default() }, flags: HandlerFlags { can_emit_warnings: true, ..Default::default() },
@ -603,7 +603,7 @@ impl Handler {
warn_count: 0, warn_count: 0,
deduplicated_err_count: 0, deduplicated_err_count: 0,
deduplicated_warn_count: 0, deduplicated_warn_count: 0,
emitter: IntoDynSyncSend(emitter), emitter,
delayed_span_bugs: Vec::new(), delayed_span_bugs: Vec::new(),
delayed_good_path_bugs: Vec::new(), delayed_good_path_bugs: Vec::new(),
suppressed_expected_diag: false, suppressed_expected_diag: false,

View file

@ -823,7 +823,7 @@ fn check_impl_items_against_trait<'tcx>(
}; };
match ty_impl_item.kind { match ty_impl_item.kind {
ty::AssocKind::Const => { ty::AssocKind::Const => {
let _ = tcx.compare_impl_const(( tcx.ensure().compare_impl_const((
impl_item.expect_local(), impl_item.expect_local(),
ty_impl_item.trait_item_def_id.unwrap(), ty_impl_item.trait_item_def_id.unwrap(),
)); ));

View file

@ -195,8 +195,8 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
let ty_b = field.ty(tcx, args_b); let ty_b = field.ty(tcx, args_b);
if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) { if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) {
if layout.is_zst() && layout.align.abi.bytes() == 1 { if layout.is_1zst() {
// ignore ZST fields with alignment of 1 byte // ignore 1-ZST fields
return false; return false;
} }
} }

View file

@ -129,7 +129,15 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc
// By default, RPIT are invariant wrt type and const generics, but they are bivariant wrt // By default, RPIT are invariant wrt type and const generics, but they are bivariant wrt
// lifetime generics. // lifetime generics.
let mut variances: Vec<_> = std::iter::repeat(ty::Invariant).take(generics.count()).collect(); let variances = std::iter::repeat(ty::Invariant).take(generics.count());
let mut variances: Vec<_> = match tcx.opaque_type_origin(item_def_id) {
rustc_hir::OpaqueTyOrigin::FnReturn(_) | rustc_hir::OpaqueTyOrigin::AsyncFn(_) => {
variances.collect()
}
// But TAIT are invariant for all generics
rustc_hir::OpaqueTyOrigin::TyAlias { .. } => return tcx.arena.alloc_from_iter(variances),
};
// Mark all lifetimes from parent generics as unused (Bivariant). // Mark all lifetimes from parent generics as unused (Bivariant).
// This will be overridden later if required. // This will be overridden later if required.

View file

@ -2310,13 +2310,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let guar = if field.name == kw::Empty { let guar = if field.name == kw::Empty {
self.tcx.sess.delay_span_bug(field.span, "field name with no name") self.tcx.sess.delay_span_bug(field.span, "field name with no name")
} else if self.method_exists( } else if self.method_exists(field, base_ty, expr.hir_id, expected.only_has_type(self)) {
field,
base_ty,
expr.hir_id,
true,
expected.only_has_type(self),
) {
self.ban_take_value_of_method(expr, base_ty, field) self.ban_take_value_of_method(expr, base_ty, field)
} else if !base_ty.is_primitive_ty() { } else if !base_ty.is_primitive_ty() {
self.ban_nonexisting_field(field, base, expr, base_ty) self.ban_nonexisting_field(field, base, expr, base_ty)
@ -2501,7 +2495,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut err = self.private_field_err(field, base_did); let mut err = self.private_field_err(field, base_did);
// Also check if an accessible method exists, which is often what is meant. // Also check if an accessible method exists, which is often what is meant.
if self.method_exists(field, expr_t, expr.hir_id, false, return_ty) if self.method_exists(field, expr_t, expr.hir_id, return_ty)
&& !self.expr_in_place(expr.hir_id) && !self.expr_in_place(expr.hir_id)
{ {
self.suggest_method_call( self.suggest_method_call(

View file

@ -89,14 +89,13 @@ pub enum CandidateSource {
} }
impl<'a, 'tcx> FnCtxt<'a, 'tcx> { impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Determines whether the type `self_ty` supports a method name `method_name` or not. /// Determines whether the type `self_ty` supports a visible method named `method_name` or not.
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub fn method_exists( pub fn method_exists(
&self, &self,
method_name: Ident, method_name: Ident,
self_ty: Ty<'tcx>, self_ty: Ty<'tcx>,
call_expr_id: hir::HirId, call_expr_id: hir::HirId,
allow_private: bool,
return_type: Option<Ty<'tcx>>, return_type: Option<Ty<'tcx>>,
) -> bool { ) -> bool {
match self.probe_for_name( match self.probe_for_name(
@ -118,7 +117,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} }
Err(NoMatch(..)) => false, Err(NoMatch(..)) => false,
Err(Ambiguity(..)) => true, Err(Ambiguity(..)) => true,
Err(PrivateMatch(..)) => allow_private, Err(PrivateMatch(..)) => false,
Err(IllegalSizedBound { .. }) => true, Err(IllegalSizedBound { .. }) => true,
Err(BadReturnType) => false, Err(BadReturnType) => false,
} }

View file

@ -2361,8 +2361,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Some(output_ty) => self.resolve_vars_if_possible(output_ty), Some(output_ty) => self.resolve_vars_if_possible(output_ty),
_ => return, _ => return,
}; };
let method_exists = let method_exists = self.method_exists(item_name, output_ty, call.hir_id, return_type);
self.method_exists(item_name, output_ty, call.hir_id, true, return_type);
debug!("suggest_await_before_method: is_method_exist={}", method_exists); debug!("suggest_await_before_method: is_method_exist={}", method_exists);
if method_exists { if method_exists {
err.span_suggestion_verbose( err.span_suggestion_verbose(

View file

@ -743,12 +743,11 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
rustc_passes::hir_id_validator::check_crate(tcx); rustc_passes::hir_id_validator::check_crate(tcx);
let sess = tcx.sess; let sess = tcx.sess;
let mut entry_point = None;
sess.time("misc_checking_1", || { sess.time("misc_checking_1", || {
parallel!( parallel!(
{ {
entry_point = sess.time("looking_for_entry_point", || tcx.entry_fn(())); sess.time("looking_for_entry_point", || tcx.ensure().entry_fn(()));
sess.time("looking_for_derive_registrar", || { sess.time("looking_for_derive_registrar", || {
tcx.ensure().proc_macro_decls_static(()) tcx.ensure().proc_macro_decls_static(())
@ -863,7 +862,7 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
// This check has to be run after all lints are done processing. We don't // This check has to be run after all lints are done processing. We don't
// define a lint filter, as all lint checks should have finished at this point. // define a lint filter, as all lint checks should have finished at this point.
sess.time("check_lint_expectations", || tcx.check_expectations(None)); sess.time("check_lint_expectations", || tcx.ensure().check_expectations(None));
}); });
if sess.opts.unstable_opts.print_vtable_sizes { if sess.opts.unstable_opts.print_vtable_sizes {

View file

@ -137,10 +137,8 @@ fn get_stack_size() -> Option<usize> {
env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE) env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE)
} }
#[cfg(not(parallel_compiler))] pub(crate) fn run_in_thread_with_globals<F: FnOnce() -> R + Send, R: Send>(
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition, edition: Edition,
_threads: usize,
f: F, f: F,
) -> R { ) -> R {
// The "thread pool" is a single spawned thread in the non-parallel // The "thread pool" is a single spawned thread in the non-parallel
@ -171,18 +169,37 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
}) })
} }
#[cfg(not(parallel_compiler))]
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
_threads: usize,
f: F,
) -> R {
run_in_thread_with_globals(edition, f)
}
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition, edition: Edition,
threads: usize, threads: usize,
f: F, f: F,
) -> R { ) -> R {
use rustc_data_structures::jobserver; use rustc_data_structures::{jobserver, sync::FromDyn};
use rustc_middle::ty::tls; use rustc_middle::ty::tls;
use rustc_query_impl::QueryCtxt; use rustc_query_impl::QueryCtxt;
use rustc_query_system::query::{deadlock, QueryContext}; use rustc_query_system::query::{deadlock, QueryContext};
let registry = sync::Registry::new(threads); let registry = sync::Registry::new(threads);
if !sync::is_dyn_thread_safe() {
return run_in_thread_with_globals(edition, || {
// Register the thread for use with the `WorkerLocal` type.
registry.register();
f()
});
}
let mut builder = rayon::ThreadPoolBuilder::new() let mut builder = rayon::ThreadPoolBuilder::new()
.thread_name(|_| "rustc".to_string()) .thread_name(|_| "rustc".to_string())
.acquire_thread_handler(jobserver::acquire_thread) .acquire_thread_handler(jobserver::acquire_thread)
@ -191,13 +208,13 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
.deadlock_handler(|| { .deadlock_handler(|| {
// On deadlock, creates a new thread and forwards information in thread // On deadlock, creates a new thread and forwards information in thread
// locals to it. The new thread runs the deadlock handler. // locals to it. The new thread runs the deadlock handler.
let query_map = tls::with(|tcx| { let query_map = FromDyn::from(tls::with(|tcx| {
QueryCtxt::new(tcx) QueryCtxt::new(tcx)
.try_collect_active_jobs() .try_collect_active_jobs()
.expect("active jobs shouldn't be locked in deadlock handler") .expect("active jobs shouldn't be locked in deadlock handler")
}); }));
let registry = rayon_core::Registry::current(); let registry = rayon_core::Registry::current();
thread::spawn(move || deadlock(query_map, &registry)); thread::spawn(move || deadlock(query_map.into_inner(), &registry));
}); });
if let Some(size) = get_stack_size() { if let Some(size) = get_stack_size() {
builder = builder.stack_size(size); builder = builder.stack_size(size);
@ -209,6 +226,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
// `Send` in the parallel compiler. // `Send` in the parallel compiler.
rustc_span::create_session_globals_then(edition, || { rustc_span::create_session_globals_then(edition, || {
rustc_span::with_session_globals(|session_globals| { rustc_span::with_session_globals(|session_globals| {
let session_globals = FromDyn::from(session_globals);
builder builder
.build_scoped( .build_scoped(
// Initialize each new worker thread when created. // Initialize each new worker thread when created.
@ -216,7 +234,9 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
// Register the thread for use with the `WorkerLocal` type. // Register the thread for use with the `WorkerLocal` type.
registry.register(); registry.register();
rustc_span::set_session_globals_then(session_globals, || thread.run()) rustc_span::set_session_globals_then(session_globals.into_inner(), || {
thread.run()
})
}, },
// Run `f` on the first thread in the thread pool. // Run `f` on the first thread in the thread pool.
move |pool: &rayon::ThreadPool| pool.install(f), move |pool: &rayon::ThreadPool| pool.install(f),

View file

@ -804,7 +804,7 @@ pub(crate) fn nonnull_optimization_guaranteed<'tcx>(
tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed) tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
} }
/// `repr(transparent)` structs can have a single non-ZST field, this function returns that /// `repr(transparent)` structs can have a single non-1-ZST field, this function returns that
/// field. /// field.
pub fn transparent_newtype_field<'a, 'tcx>( pub fn transparent_newtype_field<'a, 'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
@ -813,8 +813,8 @@ pub fn transparent_newtype_field<'a, 'tcx>(
let param_env = tcx.param_env(variant.def_id); let param_env = tcx.param_env(variant.def_id);
variant.fields.iter().find(|field| { variant.fields.iter().find(|field| {
let field_ty = tcx.type_of(field.did).instantiate_identity(); let field_ty = tcx.type_of(field.did).instantiate_identity();
let is_zst = tcx.layout_of(param_env.and(field_ty)).is_ok_and(|layout| layout.is_zst()); let is_1zst = tcx.layout_of(param_env.and(field_ty)).is_ok_and(|layout| layout.is_1zst());
!is_zst !is_1zst
}) })
} }

View file

@ -250,6 +250,7 @@ impl<'a, 'tcx> Metadata<'a, 'tcx> for (CrateMetadataRef<'a>, TyCtxt<'tcx>) {
} }
impl<T: ParameterizedOverTcx> LazyValue<T> { impl<T: ParameterizedOverTcx> LazyValue<T> {
#[inline]
fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(self, metadata: M) -> T::Value<'tcx> fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(self, metadata: M) -> T::Value<'tcx>
where where
T::Value<'tcx>: Decodable<DecodeContext<'a, 'tcx>>, T::Value<'tcx>: Decodable<DecodeContext<'a, 'tcx>>,
@ -294,6 +295,7 @@ unsafe impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> TrustedLen
} }
impl<T: ParameterizedOverTcx> LazyArray<T> { impl<T: ParameterizedOverTcx> LazyArray<T> {
#[inline]
fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>( fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(
self, self,
metadata: M, metadata: M,
@ -360,8 +362,8 @@ impl<'a, 'tcx> DecodeContext<'a, 'tcx> {
self.read_lazy_offset_then(|pos| LazyArray::from_position_and_num_elems(pos, len)) self.read_lazy_offset_then(|pos| LazyArray::from_position_and_num_elems(pos, len))
} }
fn read_lazy_table<I, T>(&mut self, len: usize) -> LazyTable<I, T> { fn read_lazy_table<I, T>(&mut self, width: usize, len: usize) -> LazyTable<I, T> {
self.read_lazy_offset_then(|pos| LazyTable::from_position_and_encoded_size(pos, len)) self.read_lazy_offset_then(|pos| LazyTable::from_position_and_encoded_size(pos, width, len))
} }
#[inline] #[inline]
@ -420,6 +422,7 @@ impl<'a, 'tcx> TyDecoder for DecodeContext<'a, 'tcx> {
} }
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum { impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum {
#[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> CrateNum { fn decode(d: &mut DecodeContext<'a, 'tcx>) -> CrateNum {
let cnum = CrateNum::from_u32(d.read_u32()); let cnum = CrateNum::from_u32(d.read_u32());
d.map_encoded_cnum_to_current(cnum) d.map_encoded_cnum_to_current(cnum)
@ -427,18 +430,21 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum {
} }
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefIndex { impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefIndex {
#[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefIndex { fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefIndex {
DefIndex::from_u32(d.read_u32()) DefIndex::from_u32(d.read_u32())
} }
} }
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex { impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex {
#[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ExpnIndex { fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ExpnIndex {
ExpnIndex::from_u32(d.read_u32()) ExpnIndex::from_u32(d.read_u32())
} }
} }
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ast::AttrId { impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ast::AttrId {
#[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ast::AttrId { fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ast::AttrId {
let sess = d.sess.expect("can't decode AttrId without Session"); let sess = d.sess.expect("can't decode AttrId without Session");
sess.parse_sess.attr_id_generator.mk_attr_id() sess.parse_sess.attr_id_generator.mk_attr_id()
@ -672,6 +678,7 @@ impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyValue<T> {
} }
impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> { impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> {
#[inline]
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self { fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self {
let len = decoder.read_usize(); let len = decoder.read_usize();
if len == 0 { LazyArray::default() } else { decoder.read_lazy_array(len) } if len == 0 { LazyArray::default() } else { decoder.read_lazy_array(len) }
@ -680,8 +687,9 @@ impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> {
impl<'a, 'tcx, I: Idx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyTable<I, T> { impl<'a, 'tcx, I: Idx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyTable<I, T> {
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self { fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self {
let width = decoder.read_usize();
let len = decoder.read_usize(); let len = decoder.read_usize();
decoder.read_lazy_table(len) decoder.read_lazy_table(width, len)
} }
} }

View file

@ -131,7 +131,8 @@ impl<'a, 'tcx, T> Encodable<EncodeContext<'a, 'tcx>> for LazyArray<T> {
impl<'a, 'tcx, I, T> Encodable<EncodeContext<'a, 'tcx>> for LazyTable<I, T> { impl<'a, 'tcx, I, T> Encodable<EncodeContext<'a, 'tcx>> for LazyTable<I, T> {
fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) { fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) {
e.emit_usize(self.encoded_size); e.emit_usize(self.width);
e.emit_usize(self.len);
e.emit_lazy_distance(self.position); e.emit_lazy_distance(self.position);
} }
} }

View file

@ -142,7 +142,11 @@ impl<T> LazyArray<T> {
/// eagerly and in-order. /// eagerly and in-order.
struct LazyTable<I, T> { struct LazyTable<I, T> {
position: NonZeroUsize, position: NonZeroUsize,
encoded_size: usize, /// The encoded size of the elements of a table is selected at runtime to drop
/// trailing zeroes. This is the number of bytes used for each table element.
width: usize,
/// How many elements are in the table.
len: usize,
_marker: PhantomData<fn(I) -> T>, _marker: PhantomData<fn(I) -> T>,
} }
@ -153,9 +157,10 @@ impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for LazyTable<I,
impl<I, T> LazyTable<I, T> { impl<I, T> LazyTable<I, T> {
fn from_position_and_encoded_size( fn from_position_and_encoded_size(
position: NonZeroUsize, position: NonZeroUsize,
encoded_size: usize, width: usize,
len: usize,
) -> LazyTable<I, T> { ) -> LazyTable<I, T> {
LazyTable { position, encoded_size, _marker: PhantomData } LazyTable { position, width, len, _marker: PhantomData }
} }
} }

View file

@ -38,6 +38,12 @@ impl IsDefault for u32 {
} }
} }
impl IsDefault for u64 {
fn is_default(&self) -> bool {
*self == 0
}
}
impl<T> IsDefault for LazyArray<T> { impl<T> IsDefault for LazyArray<T> {
fn is_default(&self) -> bool { fn is_default(&self) -> bool {
self.num_elems == 0 self.num_elems == 0
@ -89,6 +95,20 @@ impl FixedSizeEncoding for u32 {
} }
} }
impl FixedSizeEncoding for u64 {
type ByteArray = [u8; 8];
#[inline]
fn from_bytes(b: &[u8; 8]) -> Self {
Self::from_le_bytes(*b)
}
#[inline]
fn write_to_bytes(self, b: &mut [u8; 8]) {
*b = self.to_le_bytes();
}
}
macro_rules! fixed_size_enum { macro_rules! fixed_size_enum {
($ty:ty { $(($($pat:tt)*))* }) => { ($ty:ty { $(($($pat:tt)*))* }) => {
impl FixedSizeEncoding for Option<$ty> { impl FixedSizeEncoding for Option<$ty> {
@ -300,21 +320,21 @@ impl FixedSizeEncoding for UnusedGenericParams {
// generic `LazyValue<T>` impl, but in the general case we might not need / want // generic `LazyValue<T>` impl, but in the general case we might not need / want
// to fit every `usize` in `u32`. // to fit every `usize` in `u32`.
impl<T> FixedSizeEncoding for Option<LazyValue<T>> { impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
type ByteArray = [u8; 4]; type ByteArray = [u8; 8];
#[inline] #[inline]
fn from_bytes(b: &[u8; 4]) -> Self { fn from_bytes(b: &[u8; 8]) -> Self {
let position = NonZeroUsize::new(u32::from_bytes(b) as usize)?; let position = NonZeroUsize::new(u64::from_bytes(b) as usize)?;
Some(LazyValue::from_position(position)) Some(LazyValue::from_position(position))
} }
#[inline] #[inline]
fn write_to_bytes(self, b: &mut [u8; 4]) { fn write_to_bytes(self, b: &mut [u8; 8]) {
match self { match self {
None => unreachable!(), None => unreachable!(),
Some(lazy) => { Some(lazy) => {
let position = lazy.position.get(); let position = lazy.position.get();
let position: u32 = position.try_into().unwrap(); let position: u64 = position.try_into().unwrap();
position.write_to_bytes(b) position.write_to_bytes(b)
} }
} }
@ -323,55 +343,75 @@ impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
impl<T> LazyArray<T> { impl<T> LazyArray<T> {
#[inline] #[inline]
fn write_to_bytes_impl(self, b: &mut [u8; 8]) { fn write_to_bytes_impl(self, b: &mut [u8; 16]) {
let ([position_bytes, meta_bytes], []) = b.as_chunks_mut::<4>() else { panic!() }; let position = (self.position.get() as u64).to_le_bytes();
let len = (self.num_elems as u64).to_le_bytes();
let position = self.position.get(); // Element width is selected at runtime on a per-table basis by omitting trailing
let position: u32 = position.try_into().unwrap(); // zero bytes in table elements. This works very naturally when table elements are
position.write_to_bytes(position_bytes); // simple numbers but `LazyArray` is a pair of integers. If naively encoded, the second
// element would shield the trailing zeroes in the first. Interleaving the bytes
let len = self.num_elems; // of the position and length exposes trailing zeroes in both to the optimization.
let len: u32 = len.try_into().unwrap(); // We encode length second because we generally expect it to be smaller.
len.write_to_bytes(meta_bytes); for i in 0..8 {
b[2 * i] = position[i];
b[2 * i + 1] = len[i];
}
} }
fn from_bytes_impl(position_bytes: &[u8; 4], meta_bytes: &[u8; 4]) -> Option<LazyArray<T>> { fn from_bytes_impl(position: &[u8; 8], meta: &[u8; 8]) -> Option<LazyArray<T>> {
let position = NonZeroUsize::new(u32::from_bytes(position_bytes) as usize)?; let position = NonZeroUsize::new(u64::from_bytes(&position) as usize)?;
let len = u32::from_bytes(meta_bytes) as usize; let len = u64::from_bytes(&meta) as usize;
Some(LazyArray::from_position_and_num_elems(position, len)) Some(LazyArray::from_position_and_num_elems(position, len))
} }
} }
// Decoding helper for the encoding scheme used by `LazyArray`.
// Interleaving the bytes of the two integers exposes trailing bytes in the first integer
// to the varint scheme that we use for tables.
#[inline]
fn decode_interleaved(encoded: &[u8; 16]) -> ([u8; 8], [u8; 8]) {
let mut first = [0u8; 8];
let mut second = [0u8; 8];
for i in 0..8 {
first[i] = encoded[2 * i];
second[i] = encoded[2 * i + 1];
}
(first, second)
}
impl<T> FixedSizeEncoding for LazyArray<T> { impl<T> FixedSizeEncoding for LazyArray<T> {
type ByteArray = [u8; 8]; type ByteArray = [u8; 16];
#[inline] #[inline]
fn from_bytes(b: &[u8; 8]) -> Self { fn from_bytes(b: &[u8; 16]) -> Self {
let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() }; let (position, meta) = decode_interleaved(b);
if *meta_bytes == [0; 4] {
if meta == [0; 8] {
return Default::default(); return Default::default();
} }
LazyArray::from_bytes_impl(position_bytes, meta_bytes).unwrap() LazyArray::from_bytes_impl(&position, &meta).unwrap()
} }
#[inline] #[inline]
fn write_to_bytes(self, b: &mut [u8; 8]) { fn write_to_bytes(self, b: &mut [u8; 16]) {
assert!(!self.is_default()); assert!(!self.is_default());
self.write_to_bytes_impl(b) self.write_to_bytes_impl(b)
} }
} }
impl<T> FixedSizeEncoding for Option<LazyArray<T>> { impl<T> FixedSizeEncoding for Option<LazyArray<T>> {
type ByteArray = [u8; 8]; type ByteArray = [u8; 16];
#[inline] #[inline]
fn from_bytes(b: &[u8; 8]) -> Self { fn from_bytes(b: &[u8; 16]) -> Self {
let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() }; let (position, meta) = decode_interleaved(b);
LazyArray::from_bytes_impl(position_bytes, meta_bytes)
LazyArray::from_bytes_impl(&position, &meta)
} }
#[inline] #[inline]
fn write_to_bytes(self, b: &mut [u8; 8]) { fn write_to_bytes(self, b: &mut [u8; 16]) {
match self { match self {
None => unreachable!(), None => unreachable!(),
Some(lazy) => lazy.write_to_bytes_impl(b), Some(lazy) => lazy.write_to_bytes_impl(b),
@ -381,13 +421,14 @@ impl<T> FixedSizeEncoding for Option<LazyArray<T>> {
/// Helper for constructing a table's serialization (also see `Table`). /// Helper for constructing a table's serialization (also see `Table`).
pub(super) struct TableBuilder<I: Idx, T: FixedSizeEncoding> { pub(super) struct TableBuilder<I: Idx, T: FixedSizeEncoding> {
width: usize,
blocks: IndexVec<I, T::ByteArray>, blocks: IndexVec<I, T::ByteArray>,
_marker: PhantomData<T>, _marker: PhantomData<T>,
} }
impl<I: Idx, T: FixedSizeEncoding> Default for TableBuilder<I, T> { impl<I: Idx, T: FixedSizeEncoding> Default for TableBuilder<I, T> {
fn default() -> Self { fn default() -> Self {
TableBuilder { blocks: Default::default(), _marker: PhantomData } TableBuilder { width: 0, blocks: Default::default(), _marker: PhantomData }
} }
} }
@ -415,40 +456,63 @@ impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]>> TableBui
// > store bit-masks of which item in each bucket is actually serialized). // > store bit-masks of which item in each bucket is actually serialized).
let block = self.blocks.ensure_contains_elem(i, || [0; N]); let block = self.blocks.ensure_contains_elem(i, || [0; N]);
value.write_to_bytes(block); value.write_to_bytes(block);
if self.width != N {
let width = N - trailing_zeros(block);
self.width = self.width.max(width);
}
} }
} }
pub(crate) fn encode(&self, buf: &mut FileEncoder) -> LazyTable<I, T> { pub(crate) fn encode(&self, buf: &mut FileEncoder) -> LazyTable<I, T> {
let pos = buf.position(); let pos = buf.position();
let width = self.width;
for block in &self.blocks { for block in &self.blocks {
buf.emit_raw_bytes(block); buf.emit_raw_bytes(&block[..width]);
} }
let num_bytes = self.blocks.len() * N;
LazyTable::from_position_and_encoded_size( LazyTable::from_position_and_encoded_size(
NonZeroUsize::new(pos as usize).unwrap(), NonZeroUsize::new(pos as usize).unwrap(),
num_bytes, width,
self.blocks.len(),
) )
} }
} }
fn trailing_zeros(x: &[u8]) -> usize {
x.iter().rev().take_while(|b| **b == 0).count()
}
impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]> + ParameterizedOverTcx> impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]> + ParameterizedOverTcx>
LazyTable<I, T> LazyTable<I, T>
where where
for<'tcx> T::Value<'tcx>: FixedSizeEncoding<ByteArray = [u8; N]>, for<'tcx> T::Value<'tcx>: FixedSizeEncoding<ByteArray = [u8; N]>,
{ {
/// Given the metadata, extract out the value at a particular index (if any). /// Given the metadata, extract out the value at a particular index (if any).
#[inline(never)]
pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> T::Value<'tcx> { pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> T::Value<'tcx> {
trace!("LazyTable::lookup: index={:?} len={:?}", i, self.encoded_size); trace!("LazyTable::lookup: index={:?} len={:?}", i, self.len);
let start = self.position.get(); // Access past the end of the table returns a Default
let bytes = &metadata.blob()[start..start + self.encoded_size]; if i.index() >= self.len {
let (bytes, []) = bytes.as_chunks::<N>() else { panic!() }; return Default::default();
bytes.get(i.index()).map_or_else(Default::default, FixedSizeEncoding::from_bytes) }
let width = self.width;
let start = self.position.get() + (width * i.index());
let end = start + width;
let bytes = &metadata.blob()[start..end];
if let Ok(fixed) = bytes.try_into() {
FixedSizeEncoding::from_bytes(fixed)
} else {
let mut fixed = [0u8; N];
fixed[..width].copy_from_slice(bytes);
FixedSizeEncoding::from_bytes(&fixed)
}
} }
/// Size of the table in entries, including possible gaps. /// Size of the table in entries, including possible gaps.
pub(super) fn size(&self) -> usize { pub(super) fn size(&self) -> usize {
self.encoded_size / N self.len
} }
} }

View file

@ -701,6 +701,8 @@ impl<'hir> Map<'hir> {
// expressions. // expressions.
ignore_tail = true; ignore_tail = true;
} }
let mut prev_hir_id = None;
while let Some((hir_id, node)) = iter.next() { while let Some((hir_id, node)) = iter.next() {
if let (Some((_, next_node)), false) = (iter.peek(), ignore_tail) { if let (Some((_, next_node)), false) = (iter.peek(), ignore_tail) {
match next_node { match next_node {
@ -715,7 +717,14 @@ impl<'hir> Map<'hir> {
| Node::ForeignItem(_) | Node::ForeignItem(_)
| Node::TraitItem(_) | Node::TraitItem(_)
| Node::Expr(Expr { kind: ExprKind::Closure { .. }, .. }) | Node::Expr(Expr { kind: ExprKind::Closure { .. }, .. })
| Node::ImplItem(_) => return Some(hir_id), | Node::ImplItem(_)
// The input node `id` must be enclosed in the method's body as opposed
// to some other place such as its return type (fixes #114918).
// We verify that indirectly by checking that the previous node is the
// current node's body
if node.body_id().map(|b| b.hir_id) == prev_hir_id => {
return Some(hir_id)
}
// Ignore `return`s on the first iteration // Ignore `return`s on the first iteration
Node::Expr(Expr { kind: ExprKind::Loop(..) | ExprKind::Ret(..), .. }) Node::Expr(Expr { kind: ExprKind::Loop(..) | ExprKind::Ret(..), .. })
| Node::Local(_) => { | Node::Local(_) => {
@ -723,6 +732,8 @@ impl<'hir> Map<'hir> {
} }
_ => {} _ => {}
} }
prev_hir_id = Some(hir_id);
} }
None None
} }

View file

@ -415,6 +415,8 @@ pub enum UnsupportedOpInfo {
/// Free-form case. Only for errors that are never caught! /// Free-form case. Only for errors that are never caught!
// FIXME still use translatable diagnostics // FIXME still use translatable diagnostics
Unsupported(String), Unsupported(String),
/// Unsized local variables.
UnsizedLocal,
// //
// The variants below are only reachable from CTFE/const prop, miri will never emit them. // The variants below are only reachable from CTFE/const prop, miri will never emit them.
// //

View file

@ -1296,25 +1296,26 @@ macro_rules! sty_debug_print {
}; };
$(let mut $variant = total;)* $(let mut $variant = total;)*
let shards = tcx.interners.type_.lock_shards(); for shard in tcx.interners.type_.lock_shards() {
let types = shards.iter().flat_map(|shard| shard.keys()); let types = shard.keys();
for &InternedInSet(t) in types { for &InternedInSet(t) in types {
let variant = match t.internee { let variant = match t.internee {
ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
ty::Float(..) | ty::Str | ty::Never => continue, ty::Float(..) | ty::Str | ty::Never => continue,
ty::Error(_) => /* unimportant */ continue, ty::Error(_) => /* unimportant */ continue,
$(ty::$variant(..) => &mut $variant,)* $(ty::$variant(..) => &mut $variant,)*
}; };
let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER); let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER); let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER); let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
variant.total += 1; variant.total += 1;
total.total += 1; total.total += 1;
if lt { total.lt_infer += 1; variant.lt_infer += 1 } if lt { total.lt_infer += 1; variant.lt_infer += 1 }
if ty { total.ty_infer += 1; variant.ty_infer += 1 } if ty { total.ty_infer += 1; variant.ty_infer += 1 }
if ct { total.ct_infer += 1; variant.ct_infer += 1 } if ct { total.ct_infer += 1; variant.ct_infer += 1 }
if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 } if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
}
} }
writeln!(fmt, "Ty interner total ty lt ct all")?; writeln!(fmt, "Ty interner total ty lt ct all")?;
$(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \ $(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \

View file

@ -4,7 +4,9 @@ use crate::query::TyCtxtAt;
use crate::ty::normalize_erasing_regions::NormalizationError; use crate::ty::normalize_erasing_regions::NormalizationError;
use crate::ty::{self, ConstKind, ReprOptions, Ty, TyCtxt, TypeVisitableExt}; use crate::ty::{self, ConstKind, ReprOptions, Ty, TyCtxt, TypeVisitableExt};
use rustc_error_messages::DiagnosticMessage; use rustc_error_messages::DiagnosticMessage;
use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic}; use rustc_errors::{
DiagnosticArgValue, DiagnosticBuilder, Handler, IntoDiagnostic, IntoDiagnosticArg,
};
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_index::IndexVec; use rustc_index::IndexVec;
@ -265,6 +267,12 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> {
} }
} }
impl<'tcx> IntoDiagnosticArg for LayoutError<'tcx> {
fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
self.to_string().into_diagnostic_arg()
}
}
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
pub struct LayoutCx<'tcx, C> { pub struct LayoutCx<'tcx, C> {
pub tcx: C, pub tcx: C,

View file

@ -238,7 +238,7 @@ pub struct ImplHeader<'tcx> {
pub impl_def_id: DefId, pub impl_def_id: DefId,
pub self_ty: Ty<'tcx>, pub self_ty: Ty<'tcx>,
pub trait_ref: Option<TraitRef<'tcx>>, pub trait_ref: Option<TraitRef<'tcx>>,
pub predicates: Vec<(Predicate<'tcx>, Span)>, pub predicates: Vec<Predicate<'tcx>>,
} }
#[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable)] #[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable)]

View file

@ -376,6 +376,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
) )
.expect("failed to push initial stack frame"); .expect("failed to push initial stack frame");
for local in body.local_decls.indices() {
// Mark everything initially live.
// This is somewhat dicey since some of them might be unsized and it is incoherent to
// mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter
// stopping us before those unsized immediates can cause issues deeper in the
// interpreter.
ecx.frame_mut().locals[local].value =
LocalValue::Live(interpret::Operand::Immediate(Immediate::Uninit));
}
ConstPropagator { ecx, tcx, param_env, local_decls: &dummy_body.local_decls } ConstPropagator { ecx, tcx, param_env, local_decls: &dummy_body.local_decls }
} }

View file

@ -206,6 +206,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
) )
.expect("failed to push initial stack frame"); .expect("failed to push initial stack frame");
for local in body.local_decls.indices() {
// Mark everything initially live.
// This is somewhat dicey since some of them might be unsized and it is incoherent to
// mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter
// stopping us before those unsized immediates can cause issues deeper in the
// interpreter.
ecx.frame_mut().locals[local].value =
LocalValue::Live(interpret::Operand::Immediate(Immediate::Uninit));
}
ConstPropagator { ConstPropagator {
ecx, ecx,
tcx, tcx,
@ -273,7 +283,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// dedicated error variants should be introduced instead. // dedicated error variants should be introduced instead.
assert!( assert!(
!error.kind().formatted_string(), !error.kind().formatted_string(),
"const-prop encountered formatting error: {error:?}", "const-prop encountered formatting error: {}",
self.ecx.format_error(error),
); );
None None
} }

View file

@ -388,14 +388,11 @@ impl<'tcx> Inliner<'tcx> {
return Err("never inline hint"); return Err("never inline hint");
} }
// Only inline local functions if they would be eligible for cross-crate // Reachability pass defines which functions are eligible for inlining. Generally inlining
// inlining. This is to ensure that the final crate doesn't have MIR that // other functions is incorrect because they could reference symbols that aren't exported.
// reference unexported symbols let is_generic = callsite.callee.args.non_erasable_generics().next().is_some();
if callsite.callee.def_id().is_local() { if !is_generic && !callee_attrs.requests_inline() {
let is_generic = callsite.callee.args.non_erasable_generics().next().is_some(); return Err("not exported");
if !is_generic && !callee_attrs.requests_inline() {
return Err("not exported");
}
} }
if callsite.fn_sig.c_variadic() { if callsite.fn_sig.c_variadic() {

View file

@ -1851,21 +1851,11 @@ impl<'a> Parser<'a> {
attrs: AttrVec, attrs: AttrVec,
) -> PResult<'a, FieldDef> { ) -> PResult<'a, FieldDef> {
let name = self.parse_field_ident(adt_ty, lo)?; let name = self.parse_field_ident(adt_ty, lo)?;
// Parse the macro invocation and recover
if self.token.kind == token::Not { if self.token.kind == token::Not {
if let Err(mut err) = self.unexpected::<FieldDef>() { if let Err(mut err) = self.unexpected::<FieldDef>() {
err.subdiagnostic(MacroExpandsToAdtField { adt_ty }).emit(); // Encounter the macro invocation
self.bump(); err.subdiagnostic(MacroExpandsToAdtField { adt_ty });
self.parse_delim_args()?; return Err(err);
return Ok(FieldDef {
span: DUMMY_SP,
ident: None,
vis,
id: DUMMY_NODE_ID,
ty: self.mk_ty(DUMMY_SP, TyKind::Err),
attrs,
is_placeholder: false,
});
} }
} }
self.expect_field_ty_separator()?; self.expect_field_ty_separator()?;

View file

@ -891,18 +891,32 @@ impl<'a> Parser<'a> {
// that we do not use the try operator when parsing the type because // that we do not use the try operator when parsing the type because
// if it fails then we get a parser error which we don't want (we're trying // if it fails then we get a parser error which we don't want (we're trying
// to recover from errors, not make more). // to recover from errors, not make more).
let path = if self.may_recover() let path = if self.may_recover() {
&& matches!(ty.kind, TyKind::Ptr(..) | TyKind::Ref(..)) let (span, message, sugg, path, applicability) = match &ty.kind {
&& let TyKind::Path(_, path) = &ty.peel_refs().kind { TyKind::Ptr(..) | TyKind::Ref(..) if let TyKind::Path(_, path) = &ty.peel_refs().kind => {
// Just get the indirection part of the type. (
let span = ty.span.until(path.span); ty.span.until(path.span),
"consider removing the indirection",
"",
path,
Applicability::MaybeIncorrect
)
}
TyKind::ImplTrait(_, bounds)
if let [GenericBound::Trait(tr, ..), ..] = bounds.as_slice() =>
{
(
ty.span.until(tr.span),
"use the trait bounds directly",
"",
&tr.trait_ref.path,
Applicability::MachineApplicable
)
}
_ => return Err(err)
};
err.span_suggestion_verbose( err.span_suggestion_verbose(span, message, sugg, applicability);
span,
"consider removing the indirection",
"",
Applicability::MaybeIncorrect,
);
path.clone() path.clone()
} else { } else {

View file

@ -70,8 +70,7 @@ where
} }
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
let shards = self.cache.lock_shards(); for shard in self.cache.lock_shards() {
for shard in shards.iter() {
for (k, v) in shard.iter() { for (k, v) in shard.iter() {
f(k, &v.0, v.1); f(k, &v.0, v.1);
} }
@ -160,8 +159,7 @@ where
} }
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
let shards = self.cache.lock_shards(); for shard in self.cache.lock_shards() {
for shard in shards.iter() {
for (k, v) in shard.iter_enumerated() { for (k, v) in shard.iter_enumerated() {
if let Some(v) = v { if let Some(v) = v {
f(&k, &v.0, v.1); f(&k, &v.0, v.1);

View file

@ -124,7 +124,6 @@ impl<D: DepKind> QueryJob<D> {
} }
impl QueryJobId { impl QueryJobId {
#[cfg(not(parallel_compiler))]
pub(super) fn find_cycle_in_stack<D: DepKind>( pub(super) fn find_cycle_in_stack<D: DepKind>(
&self, &self,
query_map: QueryMap<D>, query_map: QueryMap<D>,

View file

@ -14,10 +14,11 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
use crate::HandleCycleError; use crate::HandleCycleError;
use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
use rustc_data_structures::{cold_path, sharded::Sharded}; use rustc_data_structures::{cold_path, sync};
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
use rustc_span::{Span, DUMMY_SP}; use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell; use std::cell::Cell;
@ -30,10 +31,7 @@ use thin_vec::ThinVec;
use super::QueryConfig; use super::QueryConfig;
pub struct QueryState<K, D: DepKind> { pub struct QueryState<K, D: DepKind> {
#[cfg(parallel_compiler)]
active: Sharded<FxHashMap<K, QueryResult<D>>>, active: Sharded<FxHashMap<K, QueryResult<D>>>,
#[cfg(not(parallel_compiler))]
active: Lock<FxHashMap<K, QueryResult<D>>>,
} }
/// Indicates the state of a query for a given key in a query map. /// Indicates the state of a query for a given key in a query map.
@ -52,15 +50,7 @@ where
D: DepKind, D: DepKind,
{ {
pub fn all_inactive(&self) -> bool { pub fn all_inactive(&self) -> bool {
#[cfg(parallel_compiler)] self.active.lock_shards().all(|shard| shard.is_empty())
{
let shards = self.active.lock_shards();
shards.iter().all(|shard| shard.is_empty())
}
#[cfg(not(parallel_compiler))]
{
self.active.lock().is_empty()
}
} }
pub fn try_collect_active_jobs<Qcx: Copy>( pub fn try_collect_active_jobs<Qcx: Copy>(
@ -71,26 +61,10 @@ where
) -> Option<()> { ) -> Option<()> {
let mut active = Vec::new(); let mut active = Vec::new();
#[cfg(parallel_compiler)] // We use try_lock_shards here since we are called from the
{ // deadlock handler, and this shouldn't be locked.
// We use try_lock_shards here since we are called from the for shard in self.active.try_lock_shards() {
// deadlock handler, and this shouldn't be locked. for (k, v) in shard?.iter() {
let shards = self.active.try_lock_shards()?;
for shard in shards.iter() {
for (k, v) in shard.iter() {
if let QueryResult::Started(ref job) = *v {
active.push((*k, job.clone()));
}
}
}
}
#[cfg(not(parallel_compiler))]
{
// We use try_lock here since we are called from the
// deadlock handler, and this shouldn't be locked.
// (FIXME: Is this relevant for non-parallel compilers? It doesn't
// really hurt much.)
for (k, v) in self.active.try_lock()?.iter() {
if let QueryResult::Started(ref job) = *v { if let QueryResult::Started(ref job) = *v {
active.push((*k, job.clone())); active.push((*k, job.clone()));
} }
@ -184,10 +158,7 @@ where
cache.complete(key, result, dep_node_index); cache.complete(key, result, dep_node_index);
let job = { let job = {
#[cfg(parallel_compiler)]
let mut lock = state.active.get_shard_by_value(&key).lock(); let mut lock = state.active.get_shard_by_value(&key).lock();
#[cfg(not(parallel_compiler))]
let mut lock = state.active.lock();
match lock.remove(&key).unwrap() { match lock.remove(&key).unwrap() {
QueryResult::Started(job) => job, QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(), QueryResult::Poisoned => panic!(),
@ -209,10 +180,7 @@ where
// Poison the query so jobs waiting on it panic. // Poison the query so jobs waiting on it panic.
let state = self.state; let state = self.state;
let job = { let job = {
#[cfg(parallel_compiler)]
let mut shard = state.active.get_shard_by_value(&self.key).lock(); let mut shard = state.active.get_shard_by_value(&self.key).lock();
#[cfg(not(parallel_compiler))]
let mut shard = state.active.lock();
let job = match shard.remove(&self.key).unwrap() { let job = match shard.remove(&self.key).unwrap() {
QueryResult::Started(job) => job, QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(), QueryResult::Poisoned => panic!(),
@ -255,7 +223,6 @@ where
#[cold] #[cold]
#[inline(never)] #[inline(never)]
#[cfg(not(parallel_compiler))]
fn cycle_error<Q, Qcx>( fn cycle_error<Q, Qcx>(
query: Q, query: Q,
qcx: Qcx, qcx: Qcx,
@ -336,10 +303,7 @@ where
Qcx: QueryContext, Qcx: QueryContext,
{ {
let state = query.query_state(qcx); let state = query.query_state(qcx);
#[cfg(parallel_compiler)]
let mut state_lock = state.active.get_shard_by_value(&key).lock(); let mut state_lock = state.active.get_shard_by_value(&key).lock();
#[cfg(not(parallel_compiler))]
let mut state_lock = state.active.lock();
// For the parallel compiler we need to check both the query cache and query state structures // For the parallel compiler we need to check both the query cache and query state structures
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@ -371,8 +335,18 @@ where
} }
Entry::Occupied(mut entry) => { Entry::Occupied(mut entry) => {
match entry.get_mut() { match entry.get_mut() {
#[cfg(not(parallel_compiler))]
QueryResult::Started(job) => { QueryResult::Started(job) => {
#[cfg(parallel_compiler)]
if sync::is_dyn_thread_safe() {
// Get the latch out
let latch = job.latch();
drop(state_lock);
// Only call `wait_for_query` if we're using a Rayon thread pool
// as it will attempt to mark the worker thread as blocked.
return wait_for_query(query, qcx, span, key, latch, current_job_id);
}
let id = job.id; let id = job.id;
drop(state_lock); drop(state_lock);
@ -380,14 +354,6 @@ where
// so we just return the error. // so we just return the error.
cycle_error(query, qcx, id, span) cycle_error(query, qcx, id, span)
} }
#[cfg(parallel_compiler)]
QueryResult::Started(job) => {
// Get the latch out
let latch = job.latch();
drop(state_lock);
wait_for_query(query, qcx, span, key, latch, current_job_id)
}
QueryResult::Poisoned => FatalError.raise(), QueryResult::Poisoned => FatalError.raise(),
} }
} }

View file

@ -17,10 +17,10 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::jobserver::{self, Client}; use rustc_data_structures::jobserver::{self, Client};
use rustc_data_structures::profiling::{duration_to_secs_str, SelfProfiler, SelfProfilerRef}; use rustc_data_structures::profiling::{duration_to_secs_str, SelfProfiler, SelfProfilerRef};
use rustc_data_structures::sync::{ use rustc_data_structures::sync::{
self, AtomicU64, AtomicUsize, Lock, Lrc, OneThread, Ordering, Ordering::SeqCst, AtomicU64, AtomicUsize, Lock, Lrc, OneThread, Ordering, Ordering::SeqCst,
}; };
use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter; use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter;
use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType}; use rustc_errors::emitter::{DynEmitter, EmitterWriter, HumanReadableErrorType};
use rustc_errors::json::JsonEmitter; use rustc_errors::json::JsonEmitter;
use rustc_errors::registry::Registry; use rustc_errors::registry::Registry;
use rustc_errors::{ use rustc_errors::{
@ -1251,7 +1251,7 @@ fn default_emitter(
source_map: Lrc<SourceMap>, source_map: Lrc<SourceMap>,
bundle: Option<Lrc<FluentBundle>>, bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: LazyFallbackBundle, fallback_bundle: LazyFallbackBundle,
) -> Box<dyn Emitter + sync::Send> { ) -> Box<DynEmitter> {
let macro_backtrace = sopts.unstable_opts.macro_backtrace; let macro_backtrace = sopts.unstable_opts.macro_backtrace;
let track_diagnostics = sopts.unstable_opts.track_diagnostics; let track_diagnostics = sopts.unstable_opts.track_diagnostics;
let terminal_url = match sopts.unstable_opts.terminal_urls { let terminal_url = match sopts.unstable_opts.terminal_urls {
@ -1717,12 +1717,12 @@ impl EarlyErrorHandler {
} }
} }
fn mk_emitter(output: ErrorOutputType) -> Box<dyn Emitter + sync::Send + 'static> { fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> {
// FIXME(#100717): early errors aren't translated at the moment, so this is fine, but it will // FIXME(#100717): early errors aren't translated at the moment, so this is fine, but it will
// need to reference every crate that might emit an early error for translation to work. // need to reference every crate that might emit an early error for translation to work.
let fallback_bundle = let fallback_bundle =
fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false); fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false);
let emitter: Box<dyn Emitter + sync::Send> = match output { let emitter: Box<DynEmitter> = match output {
config::ErrorOutputType::HumanReadable(kind) => { config::ErrorOutputType::HumanReadable(kind) => {
let (short, color_config) = kind.unzip(); let (short, color_config) = kind.unzip();
Box::new(EmitterWriter::stderr(color_config, fallback_bundle).short_message(short)) Box::new(EmitterWriter::stderr(color_config, fallback_bundle).short_message(short))

View file

@ -9,6 +9,9 @@ rustc_hir = { path = "../rustc_hir", optional = true }
rustc_middle = { path = "../rustc_middle", optional = true } rustc_middle = { path = "../rustc_middle", optional = true }
rustc_span = { path = "../rustc_span", optional = true } rustc_span = { path = "../rustc_span", optional = true }
rustc_target = { path = "../rustc_target", optional = true } rustc_target = { path = "../rustc_target", optional = true }
rustc_driver = { path = "../rustc_driver", optional = true }
rustc_interface = { path = "../rustc_interface", optional = true}
rustc_session = {path = "../rustc_session", optional = true}
tracing = "0.1" tracing = "0.1"
scoped-tls = "1.0" scoped-tls = "1.0"
@ -18,4 +21,7 @@ default = [
"rustc_middle", "rustc_middle",
"rustc_span", "rustc_span",
"rustc_target", "rustc_target",
"rustc_driver",
"rustc_interface",
"rustc_session",
] ]

View file

@ -4,13 +4,18 @@
//! until stable MIR is complete. //! until stable MIR is complete.
use std::fmt::Debug; use std::fmt::Debug;
use std::ops::Index;
use std::string::ToString; use std::string::ToString;
use crate::rustc_internal;
use crate::{ use crate::{
rustc_smir::Tables, rustc_smir::Tables,
stable_mir::{self, with}, stable_mir::{self, with},
}; };
use rustc_driver::{Callbacks, Compilation, RunCompiler};
use rustc_interface::{interface, Queries};
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
use rustc_session::EarlyErrorHandler;
pub use rustc_span::def_id::{CrateNum, DefId}; pub use rustc_span::def_id::{CrateNum, DefId};
fn with_tables<R>(mut f: impl FnMut(&mut Tables<'_>) -> R) -> R { fn with_tables<R>(mut f: impl FnMut(&mut Tables<'_>) -> R) -> R {
@ -20,7 +25,7 @@ fn with_tables<R>(mut f: impl FnMut(&mut Tables<'_>) -> R) -> R {
} }
pub fn item_def_id(item: &stable_mir::CrateItem) -> DefId { pub fn item_def_id(item: &stable_mir::CrateItem) -> DefId {
with_tables(|t| t.item_def_id(item)) with_tables(|t| t[item.0])
} }
pub fn crate_item(did: DefId) -> stable_mir::CrateItem { pub fn crate_item(did: DefId) -> stable_mir::CrateItem {
@ -67,23 +72,16 @@ pub fn impl_def(did: DefId) -> stable_mir::ty::ImplDef {
with_tables(|t| t.impl_def(did)) with_tables(|t| t.impl_def(did))
} }
impl<'tcx> Index<stable_mir::DefId> for Tables<'tcx> {
type Output = DefId;
#[inline(always)]
fn index(&self, index: stable_mir::DefId) -> &Self::Output {
&self.def_ids[index.0]
}
}
impl<'tcx> Tables<'tcx> { impl<'tcx> Tables<'tcx> {
pub fn item_def_id(&self, item: &stable_mir::CrateItem) -> DefId {
self.def_ids[item.0]
}
pub fn trait_def_id(&self, trait_def: &stable_mir::ty::TraitDef) -> DefId {
self.def_ids[trait_def.0]
}
pub fn impl_trait_def_id(&self, impl_def: &stable_mir::ty::ImplDef) -> DefId {
self.def_ids[impl_def.0]
}
pub fn generic_def_id(&self, generic_def: &stable_mir::ty::GenericDef) -> DefId {
self.def_ids[generic_def.0]
}
pub fn crate_item(&mut self, did: DefId) -> stable_mir::CrateItem { pub fn crate_item(&mut self, did: DefId) -> stable_mir::CrateItem {
stable_mir::CrateItem(self.create_def_id(did)) stable_mir::CrateItem(self.create_def_id(did))
} }
@ -140,12 +138,12 @@ impl<'tcx> Tables<'tcx> {
// FIXME: this becomes inefficient when we have too many ids // FIXME: this becomes inefficient when we have too many ids
for (i, &d) in self.def_ids.iter().enumerate() { for (i, &d) in self.def_ids.iter().enumerate() {
if d == did { if d == did {
return i; return stable_mir::DefId(i);
} }
} }
let id = self.def_ids.len(); let id = self.def_ids.len();
self.def_ids.push(did); self.def_ids.push(did);
id stable_mir::DefId(id)
} }
} }
@ -163,3 +161,40 @@ pub type Opaque = impl Debug + ToString + Clone;
pub(crate) fn opaque<T: Debug>(value: &T) -> Opaque { pub(crate) fn opaque<T: Debug>(value: &T) -> Opaque {
format!("{value:?}") format!("{value:?}")
} }
pub struct StableMir {
args: Vec<String>,
callback: fn(TyCtxt<'_>),
}
impl StableMir {
/// Creates a new `StableMir` instance, with given test_function and arguments.
pub fn new(args: Vec<String>, callback: fn(TyCtxt<'_>)) -> Self {
StableMir { args, callback }
}
/// Runs the compiler against given target and tests it with `test_function`
pub fn run(&mut self) {
rustc_driver::catch_fatal_errors(|| {
RunCompiler::new(&self.args.clone(), self).run().unwrap();
})
.unwrap();
}
}
impl Callbacks for StableMir {
/// Called after analysis. Return value instructs the compiler whether to
/// continue the compilation afterwards (defaults to `Compilation::Continue`)
fn after_analysis<'tcx>(
&mut self,
_handler: &EarlyErrorHandler,
_compiler: &interface::Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
queries.global_ctxt().unwrap().enter(|tcx| {
rustc_internal::run(tcx, || (self.callback)(tcx));
});
// No need to keep going.
Compilation::Stop
}
}

View file

@ -10,8 +10,8 @@
use crate::rustc_internal::{self, opaque}; use crate::rustc_internal::{self, opaque};
use crate::stable_mir::mir::{CopyNonOverlapping, UserTypeProjection, VariantIdx}; use crate::stable_mir::mir::{CopyNonOverlapping, UserTypeProjection, VariantIdx};
use crate::stable_mir::ty::{ use crate::stable_mir::ty::{
allocation_filter, new_allocation, Const, FloatTy, GenericDef, GenericParamDef, IntTy, allocation_filter, new_allocation, Const, FloatTy, GenericParamDef, IntTy, Movability, RigidTy,
Movability, RigidTy, TyKind, UintTy, TyKind, UintTy,
}; };
use crate::stable_mir::{self, Context}; use crate::stable_mir::{self, Context};
use rustc_hir as hir; use rustc_hir as hir;
@ -54,7 +54,7 @@ impl<'tcx> Context for Tables<'tcx> {
} }
fn trait_decl(&mut self, trait_def: &stable_mir::ty::TraitDef) -> stable_mir::ty::TraitDecl { fn trait_decl(&mut self, trait_def: &stable_mir::ty::TraitDef) -> stable_mir::ty::TraitDecl {
let def_id = self.trait_def_id(trait_def); let def_id = self[trait_def.0];
let trait_def = self.tcx.trait_def(def_id); let trait_def = self.tcx.trait_def(def_id);
trait_def.stable(self) trait_def.stable(self)
} }
@ -68,13 +68,13 @@ impl<'tcx> Context for Tables<'tcx> {
} }
fn trait_impl(&mut self, impl_def: &stable_mir::ty::ImplDef) -> stable_mir::ty::ImplTrait { fn trait_impl(&mut self, impl_def: &stable_mir::ty::ImplDef) -> stable_mir::ty::ImplTrait {
let def_id = self.impl_trait_def_id(impl_def); let def_id = self[impl_def.0];
let impl_trait = self.tcx.impl_trait_ref(def_id).unwrap(); let impl_trait = self.tcx.impl_trait_ref(def_id).unwrap();
impl_trait.stable(self) impl_trait.stable(self)
} }
fn mir_body(&mut self, item: &stable_mir::CrateItem) -> stable_mir::mir::Body { fn mir_body(&mut self, item: &stable_mir::CrateItem) -> stable_mir::mir::Body {
let def_id = self.item_def_id(item); let def_id = self[item.0];
let mir = self.tcx.optimized_mir(def_id); let mir = self.tcx.optimized_mir(def_id);
stable_mir::mir::Body { stable_mir::mir::Body {
blocks: mir blocks: mir
@ -102,19 +102,16 @@ impl<'tcx> Context for Tables<'tcx> {
ty.stable(self) ty.stable(self)
} }
fn generics_of(&mut self, generic_def: &GenericDef) -> stable_mir::ty::Generics { fn generics_of(&mut self, def_id: stable_mir::DefId) -> stable_mir::ty::Generics {
let def_id = self.generic_def_id(generic_def); let def_id = self[def_id];
let generic_def = self.tcx.generics_of(def_id); let generics = self.tcx.generics_of(def_id);
generic_def.stable(self) generics.stable(self)
} }
fn predicates_of( fn predicates_of(&mut self, def_id: stable_mir::DefId) -> stable_mir::ty::GenericPredicates {
&mut self, let def_id = self[def_id];
trait_def: &stable_mir::ty::TraitDef, let ty::GenericPredicates { parent, predicates } = self.tcx.predicates_of(def_id);
) -> stable_mir::GenericPredicates { stable_mir::ty::GenericPredicates {
let trait_def_id = self.trait_def_id(trait_def);
let ty::GenericPredicates { parent, predicates } = self.tcx.predicates_of(trait_def_id);
stable_mir::GenericPredicates {
parent: parent.map(|did| self.trait_def(did)), parent: parent.map(|did| self.trait_def(did)),
predicates: predicates predicates: predicates
.iter() .iter()

View file

@ -13,11 +13,10 @@
use std::cell::Cell; use std::cell::Cell;
use crate::rustc_smir::Tables;
use self::ty::{ use self::ty::{
GenericDef, Generics, ImplDef, ImplTrait, PredicateKind, Span, TraitDecl, TraitDef, Ty, TyKind, GenericPredicates, Generics, ImplDef, ImplTrait, Span, TraitDecl, TraitDef, Ty, TyKind,
}; };
use crate::rustc_smir::Tables;
pub mod mir; pub mod mir;
pub mod ty; pub mod ty;
@ -29,7 +28,8 @@ pub type Symbol = String;
pub type CrateNum = usize; pub type CrateNum = usize;
/// A unique identification number for each item accessible for the current compilation unit. /// A unique identification number for each item accessible for the current compilation unit.
pub type DefId = usize; #[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct DefId(pub(crate) usize);
/// A list of crate items. /// A list of crate items.
pub type CrateItems = Vec<CrateItem>; pub type CrateItems = Vec<CrateItem>;
@ -40,12 +40,6 @@ pub type TraitDecls = Vec<TraitDef>;
/// A list of impl trait decls. /// A list of impl trait decls.
pub type ImplTraitDecls = Vec<ImplDef>; pub type ImplTraitDecls = Vec<ImplDef>;
/// A list of predicates.
pub struct GenericPredicates {
pub parent: Option<TraitDef>,
pub predicates: Vec<(PredicateKind, Span)>,
}
/// Holds information about a crate. /// Holds information about a crate.
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub struct Crate { pub struct Crate {
@ -109,14 +103,6 @@ pub fn trait_impl(trait_impl: &ImplDef) -> ImplTrait {
with(|cx| cx.trait_impl(trait_impl)) with(|cx| cx.trait_impl(trait_impl))
} }
pub fn generics_of(generic_def: &GenericDef) -> Generics {
with(|cx| cx.generics_of(generic_def))
}
pub fn predicates_of(trait_def: &TraitDef) -> GenericPredicates {
with(|cx| cx.predicates_of(trait_def))
}
pub trait Context { pub trait Context {
fn entry_fn(&mut self) -> Option<CrateItem>; fn entry_fn(&mut self) -> Option<CrateItem>;
/// Retrieve all items of the local crate that have a MIR associated with them. /// Retrieve all items of the local crate that have a MIR associated with them.
@ -126,8 +112,8 @@ pub trait Context {
fn trait_decl(&mut self, trait_def: &TraitDef) -> TraitDecl; fn trait_decl(&mut self, trait_def: &TraitDef) -> TraitDecl;
fn all_trait_impls(&mut self) -> ImplTraitDecls; fn all_trait_impls(&mut self) -> ImplTraitDecls;
fn trait_impl(&mut self, trait_impl: &ImplDef) -> ImplTrait; fn trait_impl(&mut self, trait_impl: &ImplDef) -> ImplTrait;
fn generics_of(&mut self, generic_def: &GenericDef) -> Generics; fn generics_of(&mut self, def_id: DefId) -> Generics;
fn predicates_of(&mut self, trait_def: &TraitDef) -> GenericPredicates; fn predicates_of(&mut self, def_id: DefId) -> GenericPredicates;
/// Get information about the local crate. /// Get information about the local crate.
fn local_crate(&self) -> Crate; fn local_crate(&self) -> Crate;
/// Retrieve a list of all external crates. /// Retrieve a list of all external crates.

View file

@ -120,27 +120,9 @@ pub struct GenericDef(pub(crate) DefId);
#[derive(Clone, Copy, PartialEq, Eq, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct ConstDef(pub(crate) DefId); pub struct ConstDef(pub(crate) DefId);
impl TraitDef {
pub fn trait_decl(&self) -> TraitDecl {
with(|cx| cx.trait_decl(self))
}
}
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub struct ImplDef(pub(crate) DefId); pub struct ImplDef(pub(crate) DefId);
impl ImplDef {
pub fn trait_impl(&self) -> ImplTrait {
with(|cx| cx.trait_impl(self))
}
}
impl GenericDef {
pub fn generics_of(&self) -> Generics {
with(|tcx| tcx.generics_of(self))
}
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct GenericArgs(pub Vec<GenericArgKind>); pub struct GenericArgs(pub Vec<GenericArgKind>);
@ -463,6 +445,16 @@ pub struct TraitDecl {
pub deny_explicit_impl: bool, pub deny_explicit_impl: bool,
} }
impl TraitDecl {
pub fn generics_of(&self) -> Generics {
with(|cx| cx.generics_of(self.def_id.0))
}
pub fn predicates_of(&self) -> GenericPredicates {
with(|cx| cx.predicates_of(self.def_id.0))
}
}
pub type ImplTrait = EarlyBinder<TraitRef>; pub type ImplTrait = EarlyBinder<TraitRef>;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -499,8 +491,8 @@ pub struct GenericParamDef {
} }
pub struct GenericPredicates { pub struct GenericPredicates {
pub parent: Option<DefId>, pub parent: Option<TraitDef>,
pub predicates: Vec<PredicateKind>, pub predicates: Vec<(PredicateKind, Span)>,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]

View file

@ -179,20 +179,52 @@ pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
} }
} }
pub fn deployment_target(target: &Target) -> Option<String> { pub fn sdk_version(platform: u32) -> Option<(u32, u32)> {
// NOTE: These values are from an arbitrary point in time but shouldn't make it into the final
// binary since the final link command will have the current SDK version passed to it.
match platform {
object::macho::PLATFORM_MACOS => Some((13, 1)),
object::macho::PLATFORM_IOS
| object::macho::PLATFORM_IOSSIMULATOR
| object::macho::PLATFORM_TVOS
| object::macho::PLATFORM_TVOSSIMULATOR
| object::macho::PLATFORM_MACCATALYST => Some((16, 2)),
object::macho::PLATFORM_WATCHOS | object::macho::PLATFORM_WATCHOSSIMULATOR => Some((9, 1)),
_ => None,
}
}
pub fn platform(target: &Target) -> Option<u32> {
Some(match (&*target.os, &*target.abi) {
("macos", _) => object::macho::PLATFORM_MACOS,
("ios", "macabi") => object::macho::PLATFORM_MACCATALYST,
("ios", "sim") => object::macho::PLATFORM_IOSSIMULATOR,
("ios", _) => object::macho::PLATFORM_IOS,
("watchos", "sim") => object::macho::PLATFORM_WATCHOSSIMULATOR,
("watchos", _) => object::macho::PLATFORM_WATCHOS,
("tvos", "sim") => object::macho::PLATFORM_TVOSSIMULATOR,
("tvos", _) => object::macho::PLATFORM_TVOS,
_ => return None,
})
}
pub fn deployment_target(target: &Target) -> Option<(u32, u32)> {
let (major, minor) = match &*target.os { let (major, minor) = match &*target.os {
"macos" => { "macos" => {
// This does not need to be specific. It just needs to handle x86 vs M1. // This does not need to be specific. It just needs to handle x86 vs M1.
let arch = if target.arch == "x86" || target.arch == "x86_64" { X86_64 } else { Arm64 }; let arch = if target.arch == "x86" || target.arch == "x86_64" { X86_64 } else { Arm64 };
macos_deployment_target(arch) macos_deployment_target(arch)
} }
"ios" => ios_deployment_target(), "ios" => match &*target.abi {
"macabi" => mac_catalyst_deployment_target(),
_ => ios_deployment_target(),
},
"watchos" => watchos_deployment_target(), "watchos" => watchos_deployment_target(),
"tvos" => tvos_deployment_target(), "tvos" => tvos_deployment_target(),
_ => return None, _ => return None,
}; };
Some(format!("{major}.{minor}")) Some((major, minor))
} }
fn from_set_deployment_target(var_name: &str) -> Option<(u32, u32)> { fn from_set_deployment_target(var_name: &str) -> Option<(u32, u32)> {
@ -274,6 +306,11 @@ fn ios_deployment_target() -> (u32, u32) {
from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((7, 0)) from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
} }
fn mac_catalyst_deployment_target() -> (u32, u32) {
// If you are looking for the default deployment target, prefer `rustc --print deployment-target`.
from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((14, 0))
}
pub fn ios_llvm_target(arch: Arch) -> String { pub fn ios_llvm_target(arch: Arch) -> String {
// Modern iOS tooling extracts information about deployment target // Modern iOS tooling extracts information about deployment target
// from LC_BUILD_VERSION. This load command will only be emitted when // from LC_BUILD_VERSION. This load command will only be emitted when

View file

@ -61,6 +61,8 @@ mod aix_base;
mod android_base; mod android_base;
mod apple_base; mod apple_base;
pub use apple_base::deployment_target as current_apple_deployment_target; pub use apple_base::deployment_target as current_apple_deployment_target;
pub use apple_base::platform as current_apple_platform;
pub use apple_base::sdk_version as current_apple_sdk_version;
mod avr_gnu_base; mod avr_gnu_base;
pub use avr_gnu_base::ef_avr_arch; pub use avr_gnu_base::ef_avr_arch;
mod bpf_base; mod bpf_base;

View file

@ -53,7 +53,7 @@ impl<'tcx> SearchGraph<'tcx> {
pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> { pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> {
Self { Self {
mode, mode,
local_overflow_limit: tcx.recursion_limit().0.ilog2() as usize, local_overflow_limit: tcx.recursion_limit().0.checked_ilog2().unwrap_or(0) as usize,
stack: Default::default(), stack: Default::default(),
provisional_cache: ProvisionalCache::empty(), provisional_cache: ProvisionalCache::empty(),
} }

View file

@ -152,16 +152,14 @@ fn with_fresh_ty_vars<'cx, 'tcx>(
.predicates_of(impl_def_id) .predicates_of(impl_def_id)
.instantiate(tcx, impl_args) .instantiate(tcx, impl_args)
.iter() .iter()
.map(|(c, s)| (c.as_predicate(), s)) .map(|(c, _)| c.as_predicate())
.collect(), .collect(),
}; };
let InferOk { value: mut header, obligations } = selcx let InferOk { value: mut header, obligations } =
.infcx selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(header);
.at(&ObligationCause::dummy_with_span(tcx.def_span(impl_def_id)), param_env)
.normalize(header);
header.predicates.extend(obligations.into_iter().map(|o| (o.predicate, o.cause.span))); header.predicates.extend(obligations.into_iter().map(|o| o.predicate));
header header
} }
@ -261,17 +259,11 @@ fn overlap<'tcx>(
infcx.tcx.def_span(impl2_header.impl_def_id), infcx.tcx.def_span(impl2_header.impl_def_id),
"the second impl is here", "the second impl is here",
); );
if !failing_obligation.cause.span.is_dummy() { lint.note(format!(
lint.span_label( "`{}` may be considered to hold in future releases, \
failing_obligation.cause.span, causing the impls to overlap",
format!( infcx.resolve_vars_if_possible(failing_obligation.predicate)
"`{}` may be considered to hold in future releases, \ ));
causing the impls to overlap",
infcx
.resolve_vars_if_possible(failing_obligation.predicate)
),
);
}
lint lint
}, },
); );
@ -355,8 +347,8 @@ fn impl_intersection_has_impossible_obligation<'cx, 'tcx>(
[&impl1_header.predicates, &impl2_header.predicates] [&impl1_header.predicates, &impl2_header.predicates]
.into_iter() .into_iter()
.flatten() .flatten()
.map(|&(predicate, span)| { .map(|&predicate| {
Obligation::new(infcx.tcx, ObligationCause::dummy_with_span(span), param_env, predicate) Obligation::new(infcx.tcx, ObligationCause::dummy(), param_env, predicate)
}) })
.chain(obligations.into_iter().cloned()) .chain(obligations.into_iter().cloned())
.find(|obligation: &PredicateObligation<'tcx>| { .find(|obligation: &PredicateObligation<'tcx>| {

View file

@ -592,13 +592,13 @@ fn make_thin_self_ptr<'tcx>(
for i in 0..fat_pointer_layout.fields.count() { for i in 0..fat_pointer_layout.fields.count() {
let field_layout = fat_pointer_layout.field(cx, i); let field_layout = fat_pointer_layout.field(cx, i);
if !field_layout.is_zst() { if !field_layout.is_1zst() {
fat_pointer_layout = field_layout; fat_pointer_layout = field_layout;
continue 'descend_newtypes; continue 'descend_newtypes;
} }
} }
bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout); bug!("receiver has no non-1-ZST fields {:?}", fat_pointer_layout);
} }
fat_pointer_layout.ty fat_pointer_layout.ty

View file

@ -192,7 +192,7 @@ fn layout_of_uncached<'tcx>(
let metadata_layout = cx.layout_of(metadata_ty)?; let metadata_layout = cx.layout_of(metadata_ty)?;
// If the metadata is a 1-zst, then the pointer is thin. // If the metadata is a 1-zst, then the pointer is thin.
if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 { if metadata_layout.is_1zst() {
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr))); return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
} }

View file

@ -1218,6 +1218,15 @@ impl<T, A: Allocator> Vec<T, A> {
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`]. /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
/// ///
/// This method guarantees that for the purpose of the aliasing model, this method
/// does not materialize a reference to the underlying slice, and thus the returned pointer
/// will remain valid when mixed with other calls to [`as_ptr`] and [`as_mut_ptr`].
/// Note that calling other methods that materialize mutable references to the slice,
/// or mutable references to specific elements you are planning on accessing through this pointer,
/// as well as writing to those elements, may still invalidate this pointer.
/// See the second example below for how this guarantee can be used.
///
///
/// # Examples /// # Examples
/// ///
/// ``` /// ```
@ -1231,7 +1240,23 @@ impl<T, A: Allocator> Vec<T, A> {
/// } /// }
/// ``` /// ```
/// ///
/// Due to the aliasing guarantee, the following code is legal:
///
/// ```rust
/// unsafe {
/// let mut v = vec![0, 1, 2];
/// let ptr1 = v.as_ptr();
/// let _ = ptr1.read();
/// let ptr2 = v.as_mut_ptr().offset(2);
/// ptr2.write(2);
/// // Notably, the write to `ptr2` did *not* invalidate `ptr1`
/// // because it mutated a different element:
/// let _ = ptr1.read();
/// }
/// ```
///
/// [`as_mut_ptr`]: Vec::as_mut_ptr /// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")] #[stable(feature = "vec_as_ptr", since = "1.37.0")]
#[inline] #[inline]
pub fn as_ptr(&self) -> *const T { pub fn as_ptr(&self) -> *const T {
@ -1248,6 +1273,15 @@ impl<T, A: Allocator> Vec<T, A> {
/// Modifying the vector may cause its buffer to be reallocated, /// Modifying the vector may cause its buffer to be reallocated,
/// which would also make any pointers to it invalid. /// which would also make any pointers to it invalid.
/// ///
/// This method guarantees that for the purpose of the aliasing model, this method
/// does not materialize a reference to the underlying slice, and thus the returned pointer
/// will remain valid when mixed with other calls to [`as_ptr`] and [`as_mut_ptr`].
/// Note that calling other methods that materialize references to the slice,
/// or references to specific elements you are planning on accessing through this pointer,
/// may still invalidate this pointer.
/// See the second example below for how this guarantee can be used.
///
///
/// # Examples /// # Examples
/// ///
/// ``` /// ```
@ -1265,6 +1299,23 @@ impl<T, A: Allocator> Vec<T, A> {
/// } /// }
/// assert_eq!(&*x, &[0, 1, 2, 3]); /// assert_eq!(&*x, &[0, 1, 2, 3]);
/// ``` /// ```
///
/// Due to the aliasing guarantee, the following code is legal:
///
/// ```rust
/// unsafe {
/// let mut v = vec![0];
/// let ptr1 = v.as_mut_ptr();
/// ptr1.write(1);
/// let ptr2 = v.as_mut_ptr();
/// ptr2.write(2);
/// // Notably, the write to `ptr2` did *not* invalidate `ptr1`:
/// ptr1.write(3);
/// }
/// ```
///
/// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")] #[stable(feature = "vec_as_ptr", since = "1.37.0")]
#[inline] #[inline]
pub fn as_mut_ptr(&mut self) -> *mut T { pub fn as_mut_ptr(&mut self) -> *mut T {

View file

@ -237,6 +237,7 @@
use crate::cmp::Ordering; use crate::cmp::Ordering;
use crate::fmt::{self, Debug, Display}; use crate::fmt::{self, Debug, Display};
use crate::intrinsics::is_nonoverlapping;
use crate::marker::{PhantomData, Unsize}; use crate::marker::{PhantomData, Unsize};
use crate::mem; use crate::mem;
use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn}; use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn};
@ -415,6 +416,12 @@ impl<T> Cell<T> {
/// Swaps the values of two `Cell`s. /// Swaps the values of two `Cell`s.
/// Difference with `std::mem::swap` is that this function doesn't require `&mut` reference. /// Difference with `std::mem::swap` is that this function doesn't require `&mut` reference.
/// ///
/// # Panics
///
/// This function will panic if `self` and `other` are different `Cell`s that partially overlap.
/// (Using just standard library methods, it is impossible to create such partially overlapping `Cell`s.
/// However, unsafe code is allowed to e.g. create two `&Cell<[i32; 2]>` that partially overlap.)
///
/// # Examples /// # Examples
/// ///
/// ``` /// ```
@ -430,14 +437,20 @@ impl<T> Cell<T> {
#[stable(feature = "move_cell", since = "1.17.0")] #[stable(feature = "move_cell", since = "1.17.0")]
pub fn swap(&self, other: &Self) { pub fn swap(&self, other: &Self) {
if ptr::eq(self, other) { if ptr::eq(self, other) {
// Swapping wouldn't change anything.
return; return;
} }
if !is_nonoverlapping(self, other, 1) {
// See <https://github.com/rust-lang/rust/issues/80778> for why we need to stop here.
panic!("`Cell::swap` on overlapping non-identical `Cell`s");
}
// SAFETY: This can be risky if called from separate threads, but `Cell` // SAFETY: This can be risky if called from separate threads, but `Cell`
// is `!Sync` so this won't happen. This also won't invalidate any // is `!Sync` so this won't happen. This also won't invalidate any
// pointers since `Cell` makes sure nothing else will be pointing into // pointers since `Cell` makes sure nothing else will be pointing into
// either of these `Cell`s. // either of these `Cell`s. We also excluded shenanigans like partially overlapping `Cell`s,
// so `swap` will just properly copy two full values of type `T` back and forth.
unsafe { unsafe {
ptr::swap(self.value.get(), other.value.get()); mem::swap(&mut *self.value.get(), &mut *other.value.get());
} }
} }

View file

@ -2567,7 +2567,7 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
let size = mem::size_of::<T>() let size = mem::size_of::<T>()
.checked_mul(count) .checked_mul(count)
.expect("is_nonoverlapping: `size_of::<T>() * count` overflows a usize"); .expect("is_nonoverlapping: `size_of::<T>() * count` overflows a usize");
let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize }; let diff = src_usize.abs_diff(dst_usize);
// If the absolute distance between the ptrs is at least as big as the size of the buffer, // If the absolute distance between the ptrs is at least as big as the size of the buffer,
// they do not overlap. // they do not overlap.
diff >= size diff >= size

View file

@ -129,6 +129,11 @@ pub unsafe extern "C" fn runtime_entry(
abi::exit(result); abi::exit(result);
} }
#[inline]
pub(crate) fn is_interrupted(errno: i32) -> bool {
errno == abi::errno::EINTR
}
pub fn decode_error_kind(errno: i32) -> ErrorKind { pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno { match errno {
abi::errno::EACCES => ErrorKind::PermissionDenied, abi::errno::EACCES => ErrorKind::PermissionDenied,

View file

@ -104,16 +104,15 @@ def _download(path, url, probably_big, verbose, exception):
# If curl is not present on Win32, we should not sys.exit # If curl is not present on Win32, we should not sys.exit
# but raise `CalledProcessError` or `OSError` instead # but raise `CalledProcessError` or `OSError` instead
require(["curl", "--version"], exception=platform_is_win32()) require(["curl", "--version"], exception=platform_is_win32())
with open(path, "wb") as outfile: run(["curl", option,
run(["curl", option, "-L", # Follow redirect.
"-L", # Follow redirect. "-y", "30", "-Y", "10", # timeout if speed is < 10 bytes/sec for > 30 seconds
"-y", "30", "-Y", "10", # timeout if speed is < 10 bytes/sec for > 30 seconds "--connect-timeout", "30", # timeout if cannot connect within 30 seconds
"--connect-timeout", "30", # timeout if cannot connect within 30 seconds "-o", path,
"--retry", "3", "-SRf", url], "--retry", "3", "-SRf", url],
stdout=outfile, #Implements cli redirect operator '>' verbose=verbose,
verbose=verbose, exception=True, # Will raise RuntimeError on failure
exception=True, # Will raise RuntimeError on failure )
)
except (subprocess.CalledProcessError, OSError, RuntimeError): except (subprocess.CalledProcessError, OSError, RuntimeError):
# see http://serverfault.com/questions/301128/how-to-download # see http://serverfault.com/questions/301128/how-to-download
if platform_is_win32(): if platform_is_win32():

View file

@ -225,6 +225,8 @@ impl Config {
"10", // timeout if speed is < 10 bytes/sec for > 30 seconds "10", // timeout if speed is < 10 bytes/sec for > 30 seconds
"--connect-timeout", "--connect-timeout",
"30", // timeout if cannot connect within 30 seconds "30", // timeout if cannot connect within 30 seconds
"-o",
tempfile.to_str().unwrap(),
"--retry", "--retry",
"3", "3",
"-SRf", "-SRf",
@ -236,8 +238,6 @@ impl Config {
curl.arg("--progress-bar"); curl.arg("--progress-bar");
} }
curl.arg(url); curl.arg(url);
let f = File::create(tempfile).unwrap();
curl.stdout(Stdio::from(f));
if !self.check_run(&mut curl) { if !self.check_run(&mut curl) {
if self.build.contains("windows-msvc") { if self.build.contains("windows-msvc") {
eprintln!("Fallback to PowerShell"); eprintln!("Fallback to PowerShell");

View file

@ -263,7 +263,7 @@ flavor. Valid options are:
* `lld-link`: use the LLVM `lld` executable with the [`-flavor link` * `lld-link`: use the LLVM `lld` executable with the [`-flavor link`
flag][lld-flavor] for Microsoft's `link.exe`. flag][lld-flavor] for Microsoft's `link.exe`.
[lld-flavor]: https://lld.llvm.org/Driver.html [lld-flavor]: https://releases.llvm.org/12.0.0/tools/lld/docs/Driver.html
## linker-plugin-lto ## linker-plugin-lto

View file

@ -1,7 +1,7 @@
use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync::{self, Lrc}; use rustc_data_structures::sync::Lrc;
use rustc_data_structures::unord::UnordSet; use rustc_data_structures::unord::UnordSet;
use rustc_errors::emitter::{Emitter, EmitterWriter}; use rustc_errors::emitter::{DynEmitter, EmitterWriter};
use rustc_errors::json::JsonEmitter; use rustc_errors::json::JsonEmitter;
use rustc_errors::TerminalUrl; use rustc_errors::TerminalUrl;
use rustc_feature::UnstableFeatures; use rustc_feature::UnstableFeatures;
@ -133,7 +133,7 @@ pub(crate) fn new_handler(
rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(), rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(),
false, false,
); );
let emitter: Box<dyn Emitter + sync::Send> = match error_format { let emitter: Box<DynEmitter> = match error_format {
ErrorOutputType::HumanReadable(kind) => { ErrorOutputType::HumanReadable(kind) => {
let (short, color_config) = kind.unzip(); let (short, color_config) = kind.unzip();
Box::new( Box::new(

@ -1 +1 @@
Subproject commit 925280f028db3a322935e040719a0754703947cf Subproject commit 96fe1c9e1aecd8f57063e3753969bb6418fd2fd5

View file

@ -6,18 +6,105 @@ document.
## Unreleased / Beta / In Rust Nightly ## Unreleased / Beta / In Rust Nightly
[435a8ad8...master](https://github.com/rust-lang/rust-clippy/compare/435a8ad8...master) [37f4c172...master](https://github.com/rust-lang/rust-clippy/compare/37f4c172...master)
## Rust 1.72
Current stable, released 2023-08-24
[View all 131 merged pull requests](https://github.com/rust-lang/rust-clippy/pulls?q=merged%3A2023-05-22T14%3A53%3A59Z..2023-07-01T22%3A57%3A20Z+base%3Amaster)
### New Lints
* [`manual_try_fold`]
[#11012](https://github.com/rust-lang/rust-clippy/pull/11012)
* [`tuple_array_conversions`]
[#11020](https://github.com/rust-lang/rust-clippy/pull/11020)
* [`redundant_at_rest_pattern`]
[#11013](https://github.com/rust-lang/rust-clippy/pull/11013)
* [`needless_pub_self`]
[#10967](https://github.com/rust-lang/rust-clippy/pull/10967)
* [`pub_with_shorthand`]
[#10967](https://github.com/rust-lang/rust-clippy/pull/10967)
* [`pub_without_shorthand`]
[#10967](https://github.com/rust-lang/rust-clippy/pull/10967)
* [`manual_range_patterns`]
[#10968](https://github.com/rust-lang/rust-clippy/pull/10968)
* [`needless_raw_string_hashes`]
[#10884](https://github.com/rust-lang/rust-clippy/pull/10884)
* [`needless_raw_strings`]
[#10884](https://github.com/rust-lang/rust-clippy/pull/10884)
* [`incorrect_clone_impl_on_copy_type`]
[#10925](https://github.com/rust-lang/rust-clippy/pull/10925)
* [`drain_collect`]
[#10835](https://github.com/rust-lang/rust-clippy/pull/10835)
* [`single_range_in_vec_init`]
[#10934](https://github.com/rust-lang/rust-clippy/pull/10934)
* [`unnecessary_literal_unwrap`]
[#10358](https://github.com/rust-lang/rust-clippy/pull/10358)
* [`large_stack_frames`]
[#10827](https://github.com/rust-lang/rust-clippy/pull/10827)
* [`min_ident_chars`]
[#10916](https://github.com/rust-lang/rust-clippy/pull/10916)
* [`needless_if`]
[#10921](https://github.com/rust-lang/rust-clippy/pull/10921)
* [`excessive_nesting`]
[#10672](https://github.com/rust-lang/rust-clippy/pull/10672)
* [`arc_with_non_send_sync`]
[#10898](https://github.com/rust-lang/rust-clippy/pull/10898)
* [`redundant_type_annotations`]
[#10570](https://github.com/rust-lang/rust-clippy/pull/10570)
* [`host_endian_bytes`]
[#10826](https://github.com/rust-lang/rust-clippy/pull/10826)
* [`little_endian_bytes`]
[#10826](https://github.com/rust-lang/rust-clippy/pull/10826)
* [`big_endian_bytes`]
[#10826](https://github.com/rust-lang/rust-clippy/pull/10826)
* [`ptr_cast_constness`]
[#10779](https://github.com/rust-lang/rust-clippy/pull/10779)
* [`needless_else`]
[#10810](https://github.com/rust-lang/rust-clippy/pull/10810)
### Moves and Deprecations
* Moved [`redundant_clone`] to `nursery` (Now allow-by-default)
[#10873](https://github.com/rust-lang/rust-clippy/pull/10873)
### Enhancements
* [`undocumented_unsafe_blocks`]: Added [`accept-comment-above-attributes`] configuration
[#10986](https://github.com/rust-lang/rust-clippy/pull/10986)
* [`undocumented_unsafe_blocks`]: Added [`accept-comment-above-statement`] configuration.
[#10886](https://github.com/rust-lang/rust-clippy/pull/10886)
* [`missing_panics_doc`]: No longer lints on `todo!()`
[#10976](https://github.com/rust-lang/rust-clippy/pull/10976)
* [`module_inception`]: Added `allow-private-module-inception` configuration.
[#10917](https://github.com/rust-lang/rust-clippy/pull/10917)
* Errors and warnings generated while parsing `clippy.toml` now point to the location in the TOML
file the error/warning occurred in.
[#10607](https://github.com/rust-lang/rust-clippy/pull/10607)
### False Positive Fixes
* [`excessive_precision`]: No longer lints overflowing literals
[#10952](https://github.com/rust-lang/rust-clippy/pull/10952)
### Suggestion Fixes/Improvements
* [`option_map_unwrap_or`]: The suggestion now considers the set [`msrv`] config value
[#11030](https://github.com/rust-lang/rust-clippy/pull/11030)
### Documentation Improvements
* [Clippy's lint list] now stores filter parameters in the URL, to allow easy sharing
[#10834](https://github.com/rust-lang/rust-clippy/pull/10834)
## Rust 1.71 ## Rust 1.71
Current stable, released 2023-07-13 Released 2023-07-13
<!-- FIXME: Remove the request for feedback, with the next changelog --> Note: Clippy will use a shorter changelog format from now on, if you want a detailed list of
all changes, please check out the list of merged pull requests.
We're trying out a new shorter changelog format, that only contains significant changes.
You can check out the list of merged pull requests for a list of all changes.
If you have any feedback related to the new format, please share it in
[#10847](https://github.com/rust-lang/rust-clippy/issues/10847)
[View all 78 merged pull requests](https://github.com/rust-lang/rust-clippy/pulls?q=merged%3A2023-04-11T20%3A05%3A26Z..2023-05-20T13%3A48%3A17Z+base%3Amaster) [View all 78 merged pull requests](https://github.com/rust-lang/rust-clippy/pulls?q=merged%3A2023-04-11T20%3A05%3A26Z..2023-05-20T13%3A48%3A17Z+base%3Amaster)
@ -4677,6 +4764,7 @@ Released 2018-09-13
[pull3665]: https://github.com/rust-lang/rust-clippy/pull/3665 [pull3665]: https://github.com/rust-lang/rust-clippy/pull/3665
[adding_lints]: https://github.com/rust-lang/rust-clippy/blob/master/book/src/development/adding_lints.md [adding_lints]: https://github.com/rust-lang/rust-clippy/blob/master/book/src/development/adding_lints.md
[`README.md`]: https://github.com/rust-lang/rust-clippy/blob/master/README.md [`README.md`]: https://github.com/rust-lang/rust-clippy/blob/master/README.md
[Clippy's lint list]: https://rust-lang.github.io/rust-clippy/master/index.html
<!-- lint disable no-unused-definitions --> <!-- lint disable no-unused-definitions -->
<!-- begin autogenerated links to lint list --> <!-- begin autogenerated links to lint list -->
@ -4897,6 +4985,7 @@ Released 2018-09-13
[`implicit_return`]: https://rust-lang.github.io/rust-clippy/master/index.html#implicit_return [`implicit_return`]: https://rust-lang.github.io/rust-clippy/master/index.html#implicit_return
[`implicit_saturating_add`]: https://rust-lang.github.io/rust-clippy/master/index.html#implicit_saturating_add [`implicit_saturating_add`]: https://rust-lang.github.io/rust-clippy/master/index.html#implicit_saturating_add
[`implicit_saturating_sub`]: https://rust-lang.github.io/rust-clippy/master/index.html#implicit_saturating_sub [`implicit_saturating_sub`]: https://rust-lang.github.io/rust-clippy/master/index.html#implicit_saturating_sub
[`implied_bounds_in_impls`]: https://rust-lang.github.io/rust-clippy/master/index.html#implied_bounds_in_impls
[`impossible_comparisons`]: https://rust-lang.github.io/rust-clippy/master/index.html#impossible_comparisons [`impossible_comparisons`]: https://rust-lang.github.io/rust-clippy/master/index.html#impossible_comparisons
[`imprecise_flops`]: https://rust-lang.github.io/rust-clippy/master/index.html#imprecise_flops [`imprecise_flops`]: https://rust-lang.github.io/rust-clippy/master/index.html#imprecise_flops
[`inconsistent_digit_grouping`]: https://rust-lang.github.io/rust-clippy/master/index.html#inconsistent_digit_grouping [`inconsistent_digit_grouping`]: https://rust-lang.github.io/rust-clippy/master/index.html#inconsistent_digit_grouping
@ -5211,6 +5300,7 @@ Released 2018-09-13
[`regex_macro`]: https://rust-lang.github.io/rust-clippy/master/index.html#regex_macro [`regex_macro`]: https://rust-lang.github.io/rust-clippy/master/index.html#regex_macro
[`repeat_once`]: https://rust-lang.github.io/rust-clippy/master/index.html#repeat_once [`repeat_once`]: https://rust-lang.github.io/rust-clippy/master/index.html#repeat_once
[`replace_consts`]: https://rust-lang.github.io/rust-clippy/master/index.html#replace_consts [`replace_consts`]: https://rust-lang.github.io/rust-clippy/master/index.html#replace_consts
[`reserve_after_initialization`]: https://rust-lang.github.io/rust-clippy/master/index.html#reserve_after_initialization
[`rest_pat_in_fully_bound_structs`]: https://rust-lang.github.io/rust-clippy/master/index.html#rest_pat_in_fully_bound_structs [`rest_pat_in_fully_bound_structs`]: https://rust-lang.github.io/rust-clippy/master/index.html#rest_pat_in_fully_bound_structs
[`result_expect_used`]: https://rust-lang.github.io/rust-clippy/master/index.html#result_expect_used [`result_expect_used`]: https://rust-lang.github.io/rust-clippy/master/index.html#result_expect_used
[`result_large_err`]: https://rust-lang.github.io/rust-clippy/master/index.html#result_large_err [`result_large_err`]: https://rust-lang.github.io/rust-clippy/master/index.html#result_large_err
@ -5241,6 +5331,7 @@ Released 2018-09-13
[`short_circuit_statement`]: https://rust-lang.github.io/rust-clippy/master/index.html#short_circuit_statement [`short_circuit_statement`]: https://rust-lang.github.io/rust-clippy/master/index.html#short_circuit_statement
[`should_assert_eq`]: https://rust-lang.github.io/rust-clippy/master/index.html#should_assert_eq [`should_assert_eq`]: https://rust-lang.github.io/rust-clippy/master/index.html#should_assert_eq
[`should_implement_trait`]: https://rust-lang.github.io/rust-clippy/master/index.html#should_implement_trait [`should_implement_trait`]: https://rust-lang.github.io/rust-clippy/master/index.html#should_implement_trait
[`should_panic_without_expect`]: https://rust-lang.github.io/rust-clippy/master/index.html#should_panic_without_expect
[`significant_drop_in_scrutinee`]: https://rust-lang.github.io/rust-clippy/master/index.html#significant_drop_in_scrutinee [`significant_drop_in_scrutinee`]: https://rust-lang.github.io/rust-clippy/master/index.html#significant_drop_in_scrutinee
[`significant_drop_tightening`]: https://rust-lang.github.io/rust-clippy/master/index.html#significant_drop_tightening [`significant_drop_tightening`]: https://rust-lang.github.io/rust-clippy/master/index.html#significant_drop_tightening
[`similar_names`]: https://rust-lang.github.io/rust-clippy/master/index.html#similar_names [`similar_names`]: https://rust-lang.github.io/rust-clippy/master/index.html#similar_names

View file

@ -148,7 +148,7 @@ pub mod else_if_without_else;
pub fn register_plugins(store: &mut rustc_lint::LintStore, sess: &Session, conf: &Conf) { pub fn register_plugins(store: &mut rustc_lint::LintStore, sess: &Session, conf: &Conf) {
// ... // ...
store.register_early_pass(|| box else_if_without_else::ElseIfWithoutElse); store.register_early_pass(|| Box::new(else_if_without_else::ElseIfWithoutElse));
// ... // ...
store.register_group(true, "clippy::restriction", Some("clippy_restriction"), vec![ store.register_group(true, "clippy::restriction", Some("clippy_restriction"), vec![

View file

@ -1,6 +1,6 @@
[package] [package]
name = "clippy" name = "clippy"
version = "0.1.73" version = "0.1.74"
description = "A bunch of helpful lints to avoid common pitfalls in Rust" description = "A bunch of helpful lints to avoid common pitfalls in Rust"
repository = "https://github.com/rust-lang/rust-clippy" repository = "https://github.com/rust-lang/rust-clippy"
readme = "README.md" readme = "README.md"
@ -27,7 +27,7 @@ tempfile = { version = "3.2", optional = true }
termize = "0.1" termize = "0.1"
[dev-dependencies] [dev-dependencies]
ui_test = "0.11.5" ui_test = "0.18.1"
tester = "0.9" tester = "0.9"
regex = "1.5" regex = "1.5"
toml = "0.7.3" toml = "0.7.3"

View file

@ -13,8 +13,10 @@
- [Development](development/README.md) - [Development](development/README.md)
- [Basics](development/basics.md) - [Basics](development/basics.md)
- [Adding Lints](development/adding_lints.md) - [Adding Lints](development/adding_lints.md)
- [Defining Lints](development/defining_lints.md)
- [Lint Passes](development/lint_passes.md) - [Lint Passes](development/lint_passes.md)
- [Type Checking](development/type_checking.md) - [Type Checking](development/type_checking.md)
- [Method Checking](development/method_checking.md)
- [Macro Expansions](development/macro_expansions.md) - [Macro Expansions](development/macro_expansions.md)
- [Common Tools](development/common_tools_writing_lints.md) - [Common Tools](development/common_tools_writing_lints.md)
- [Infrastructure](development/infrastructure/README.md) - [Infrastructure](development/infrastructure/README.md)

View file

@ -161,8 +161,8 @@ The process of generating the `.stderr` file is the same, and prepending the
## Rustfix tests ## Rustfix tests
If the lint you are working on is making use of structured suggestions, the test If the lint you are working on is making use of structured suggestions, the test
file should include a `//@run-rustfix` comment at the top. This will will create a `.fixed` file by running [rustfix] for that test.
additionally run [rustfix] for that test. Rustfix will apply the suggestions Rustfix will apply the suggestions
from the lint to the code of the test file and compare that to the contents of a from the lint to the code of the test file and compare that to the contents of a
`.fixed` file. `.fixed` file.

View file

@ -0,0 +1,205 @@
# Define New Lints
The first step in the journey of a new lint is the definition
and registration of the lint in Clippy's codebase.
We can use the Clippy dev tools to handle this step since setting up the
lint involves some boilerplate code.
#### Lint types
A lint type is the category of items and expressions in which your lint focuses on.
As of the writing of this documentation update, there are 12 _types_ of lints
besides the numerous standalone lints living under `clippy_lints/src/`:
- `cargo`
- `casts`
- `functions`
- `loops`
- `matches`
- `methods`
- `misc_early`
- `operators`
- `transmute`
- `types`
- `unit_types`
- `utils / internal` (Clippy internal lints)
These types group together lints that share some common behaviors. For instance,
`functions` groups together lints that deal with some aspects of functions in
Rust, like definitions, signatures and attributes.
For more information, feel free to compare the lint files under any category
with [All Clippy lints][all_lints] or ask one of the maintainers.
## Lint name
A good lint name is important, make sure to check the [lint naming
guidelines][lint_naming]. Don't worry, if the lint name doesn't fit, a Clippy
team member will alert you in the PR process.
---
We'll name our example lint that detects functions named "foo" `foo_functions`.
Check the [lint naming guidelines][lint_naming] to see why this name makes
sense.
## Add and Register the Lint
Now that a name is chosen, we shall register `foo_functions` as a lint to the
codebase. There are two ways to register a lint.
### Standalone
If you believe that this new lint is a standalone lint (that doesn't belong to
any specific [type](#lint-types) like `functions` or `loops`), you can run the
following command in your Clippy project:
```sh
$ cargo dev new_lint --name=lint_name --pass=late --category=pedantic
```
There are two things to note here:
1. `--pass`: We set `--pass=late` in this command to do a late lint pass. The
alternative is an `early` lint pass. We will discuss this difference in a
later chapter.
<!-- FIXME: Link that "later chapter" when lint_passes.md is merged -->
2. `--category`: If not provided, the `category` of this new lint will default
to `nursery`.
The `cargo dev new_lint` command will create a new file:
`clippy_lints/src/foo_functions.rs` as well as [register the
lint](#lint-registration).
Overall, you should notice that the following files are modified or created:
```sh
$ git status
On branch foo_functions
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git restore <file>..." to discard changes in working directory)
modified: CHANGELOG.md
modified: clippy_lints/src/lib.register_lints.rs
modified: clippy_lints/src/lib.register_pedantic.rs
modified: clippy_lints/src/lib.rs
Untracked files:
(use "git add <file>..." to include in what will be committed)
clippy_lints/src/foo_functions.rs
tests/ui/foo_functions.rs
```
### Specific Type
> **Note**: Lint types are listed in the ["Lint types"](#lint-types) section
If you believe that this new lint belongs to a specific type of lints,
you can run `cargo dev new_lint` with a `--type` option.
Since our `foo_functions` lint is related to function calls, one could
argue that we should put it into a group of lints that detect some behaviors
of functions, we can put it in the `functions` group.
Let's run the following command in your Clippy project:
```sh
$ cargo dev new_lint --name=foo_functions --type=functions --category=pedantic
```
This command will create, among other things, a new file:
`clippy_lints/src/{type}/foo_functions.rs`.
In our case, the path will be `clippy_lints/src/functions/foo_functions.rs`.
Notice how this command has a `--type` flag instead of `--pass`. Unlike a standalone
definition, this lint won't be registered in the traditional sense. Instead, you will
call your lint from within the type's lint pass, found in `clippy_lints/src/{type}/mod.rs`.
A _type_ is just the name of a directory in `clippy_lints/src`, like `functions` in
the example command. Clippy groups together some lints that share common behaviors,
so if your lint falls into one, it would be best to add it to that type.
Overall, you should notice that the following files are modified or created:
```sh
$ git status
On branch foo_functions
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git restore <file>..." to discard changes in working directory)
modified: CHANGELOG.md
modified: clippy_lints/src/declared_lints.rs
modified: clippy_lints/src/functions/mod.rs
Untracked files:
(use "git add <file>..." to include in what will be committed)
clippy_lints/src/functions/foo_functions.rs
tests/ui/foo_functions.rs
```
## The `define_clippy_lints` macro
After `cargo dev new_lint`, you should see a macro with the name
`define_clippy_lints`. It will be in the same file if you defined a standalone
lint, and it will be in `mod.rs` if you defined a type-specific lint.
The macro looks something like this:
```rust
declare_clippy_lint! {
/// ### What it does
///
/// // Describe here what does the lint do.
///
/// Triggers when detects...
///
/// ### Why is this bad?
///
/// // Describe why this pattern would be bad
///
/// It can lead to...
///
/// ### Example
/// ```rust
/// // example code where clippy issues a warning
/// ```
/// Use instead:
/// ```rust
/// // example code which does not raise clippy warning
/// ```
#[clippy::version = "1.70.0"] // <- In which version was this implemented, keep it up to date!
pub LINT_NAME, // <- The lint name IN_ALL_CAPS
pedantic, // <- The lint group
"default lint description" // <- A lint description, e.g. "A function has an unit return type."
}
```
## Lint registration
If we run the `cargo dev new_lint` command for a new lint, the lint will be
automatically registered and there is nothing more to do.
However, sometimes we might want to declare a new lint by hand. In this case,
we'd use `cargo dev update_lints` command afterwards.
When a lint is manually declared, we might need to register the lint pass
manually in the `register_plugins` function in `clippy_lints/src/lib.rs`:
```rust
store.register_late_pass(|_| Box::new(foo_functions::FooFunctions));
```
As you might have guessed, where there's something late, there is something
early: in Clippy there is a `register_early_pass` method as well. More on early
vs. late passes in a later chapter.
<!-- FIXME: Link that "later chapter" when lint_passes.md is merged -->
Without a call to one of `register_early_pass` or `register_late_pass`, the lint
pass in question will not be run.
[all_lints]: https://rust-lang.github.io/rust-clippy/master/
[lint_naming]: https://rust-lang.github.io/rfcs/0344-conventions-galore.html#lints

View file

@ -0,0 +1,93 @@
# Method Checking
In some scenarios we might want to check for methods when developing
a lint. There are two kinds of questions that we might be curious about:
- Invocation: Does an expression call a specific method?
- Definition: Does an `impl` define a method?
## Checking if an `expr` is calling a specific method
Suppose we have an `expr`, we can check whether it calls a specific
method, e.g. `our_fancy_method`, by performing a pattern match on
the [`ExprKind`] that we can access from `expr.kind`:
```rust
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass};
use rustc_span::sym;
use clippy_utils::is_trait_method;
impl<'tcx> LateLintPass<'tcx> for OurFancyMethodLint {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'_>) {
// Check our expr is calling a method with pattern matching
if let hir::ExprKind::MethodCall(path, _, [self_arg, ..]) = &expr.kind
// Check if the name of this method is `our_fancy_method`
&& path.ident.name == sym!(our_fancy_method)
// We can check the type of the self argument whenever necessary.
// (It's necessary if we want to check that method is specifically belonging to a specific trait,
// for example, a `map` method could belong to user-defined trait instead of to `Iterator`)
// See the next section for more information.
&& is_trait_method(cx, self_arg, sym::OurFancyTrait)
{
println!("`expr` is a method call for `our_fancy_method`");
}
}
}
```
Take a closer look at the `ExprKind` enum variant [`MethodCall`] for more
information on the pattern matching. As mentioned in [Define
Lints](defining_lints.md#lint-types), the `methods` lint type is full of pattern
matching with `MethodCall` in case the reader wishes to explore more.
Additionally, we use the [`clippy_utils::sym!`][sym] macro to conveniently
convert an input `our_fancy_method` into a `Symbol` and compare that symbol to
the [`Ident`]'s name in the [`PathSegment`] in the [`MethodCall`].
## Checking if a `impl` block implements a method
While sometimes we want to check whether a method is being called or not, other
times we want to know if our `Ty` defines a method.
To check if our `impl` block defines a method `our_fancy_method`, we will
utilize the [`check_impl_item`] method that is available in our beloved
[`LateLintPass`] (for more information, refer to the ["Lint
Passes"](lint_passes.md) chapter in the Clippy book). This method provides us
with an [`ImplItem`] struct, which represents anything within an `impl` block.
Let us take a look at how we might check for the implementation of
`our_fancy_method` on a type:
```rust
use clippy_utils::ty::is_type_diagnostic_item;
use clippy_utils::return_ty;
use rustc_hir::{ImplItem, ImplItemKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_span::symbol::sym;
impl<'tcx> LateLintPass<'tcx> for MyTypeImpl {
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) {
// Check if item is a method/function
if let ImplItemKind::Fn(ref signature, _) = impl_item.kind
// Check the method is named `our_fancy_method`
&& impl_item.ident.name == sym!(our_fancy_method)
// We can also check it has a parameter `self`
&& signature.decl.implicit_self.has_implicit_self()
// We can go even further and even check if its return type is `String`
&& is_type_diagnostic_item(cx, return_ty(cx, impl_item.hir_id), sym::String)
{
println!("`our_fancy_method` is implemented!");
}
}
}
```
[`check_impl_item`]: https://doc.rust-lang.org/stable/nightly-rustc/rustc_lint/trait.LateLintPass.html#method.check_impl_item
[`ExprKind`]: https://doc.rust-lang.org/beta/nightly-rustc/rustc_hir/hir/enum.ExprKind.html
[`Ident`]: https://doc.rust-lang.org/beta/nightly-rustc/rustc_span/symbol/struct.Ident.html
[`ImplItem`]: https://doc.rust-lang.org/stable/nightly-rustc/rustc_hir/hir/struct.ImplItem.html
[`LateLintPass`]: https://doc.rust-lang.org/stable/nightly-rustc/rustc_lint/trait.LateLintPass.html
[`MethodCall`]: https://doc.rust-lang.org/beta/nightly-rustc/rustc_hir/hir/enum.ExprKind.html#variant.MethodCall
[`PathSegment`]: https://doc.rust-lang.org/beta/nightly-rustc/rustc_hir/hir/struct.PathSegment.html
[sym]: https://doc.rust-lang.org/stable/nightly-rustc/clippy_utils/macro.sym.html

View file

@ -9,16 +9,12 @@ accessed by the `SPEEDTEST` (and `SPEEDTEST_*`) environment variables.
To do a simple speed test of a lint (e.g. `allow_attributes`), use this command. To do a simple speed test of a lint (e.g. `allow_attributes`), use this command.
```sh ```sh
$ SPEEDTEST=ui TESTNAME="allow_attributes" cargo uitest -- --nocapture $ SPEEDTEST=ui TESTNAME="allow_attributes" cargo uitest
``` ```
This will test all `ui` tests (`SPEEDTEST=ui`) whose names start with `allow_attributes`. By default, `SPEEDTEST` will This will test all `ui` tests (`SPEEDTEST=ui`) whose names start with `allow_attributes`. By default, `SPEEDTEST` will
iterate your test 1000 times. But you can change this with `SPEEDTEST_ITERATIONS`. iterate your test 1000 times. But you can change this with `SPEEDTEST_ITERATIONS`.
```sh ```sh
$ SPEEDTEST=toml SPEEDTEST_ITERATIONS=100 TESTNAME="semicolon_block" cargo uitest -- --nocapture $ SPEEDTEST=toml SPEEDTEST_ITERATIONS=100 TESTNAME="semicolon_block" cargo uitest
``` ```
> **WARNING**: Be sure to use `-- --nocapture` at the end of the command to see the average test time. If you don't
> use `-- --nocapture` (e.g. `SPEEDTEST=ui` `TESTNAME="let_underscore_untyped" cargo uitest -- --nocapture`), this
> will not show up.

View file

@ -690,7 +690,6 @@ fn gen_deprecated_lints_test(lints: &[DeprecatedLint]) -> String {
fn gen_renamed_lints_test(lints: &[RenamedLint]) -> String { fn gen_renamed_lints_test(lints: &[RenamedLint]) -> String {
let mut seen_lints = HashSet::new(); let mut seen_lints = HashSet::new();
let mut res: String = GENERATED_FILE_COMMENT.into(); let mut res: String = GENERATED_FILE_COMMENT.into();
res.push_str("//@run-rustfix\n\n");
for lint in lints { for lint in lints {
if seen_lints.insert(&lint.new_name) { if seen_lints.insert(&lint.new_name) {
writeln!(res, "#![allow({})]", lint.new_name).unwrap(); writeln!(res, "#![allow({})]", lint.new_name).unwrap();

View file

@ -1,6 +1,6 @@
[package] [package]
name = "clippy_lints" name = "clippy_lints"
version = "0.1.73" version = "0.1.74"
description = "A bunch of helpful lints to avoid common pitfalls in Rust" description = "A bunch of helpful lints to avoid common pitfalls in Rust"
repository = "https://github.com/rust-lang/rust-clippy" repository = "https://github.com/rust-lang/rust-clippy"
readme = "README.md" readme = "README.md"

View file

@ -6,7 +6,11 @@ use clippy_utils::macros::{is_panic, macro_backtrace};
use clippy_utils::msrvs::{self, Msrv}; use clippy_utils::msrvs::{self, Msrv};
use clippy_utils::source::{first_line_of_span, is_present_in_source, snippet_opt, without_block_comments}; use clippy_utils::source::{first_line_of_span, is_present_in_source, snippet_opt, without_block_comments};
use if_chain::if_chain; use if_chain::if_chain;
use rustc_ast::{AttrKind, AttrStyle, Attribute, LitKind, MetaItemKind, MetaItemLit, NestedMetaItem}; use rustc_ast::token::{Token, TokenKind};
use rustc_ast::tokenstream::TokenTree;
use rustc_ast::{
AttrArgs, AttrArgsEq, AttrKind, AttrStyle, Attribute, LitKind, MetaItemKind, MetaItemLit, NestedMetaItem,
};
use rustc_errors::Applicability; use rustc_errors::Applicability;
use rustc_hir::{ use rustc_hir::{
Block, Expr, ExprKind, ImplItem, ImplItemKind, Item, ItemKind, StmtKind, TraitFn, TraitItem, TraitItemKind, Block, Expr, ExprKind, ImplItem, ImplItemKind, Item, ItemKind, StmtKind, TraitFn, TraitItem, TraitItemKind,
@ -339,6 +343,41 @@ declare_clippy_lint! {
"ensures that all `allow` and `expect` attributes have a reason" "ensures that all `allow` and `expect` attributes have a reason"
} }
declare_clippy_lint! {
/// ### What it does
/// Checks for `#[should_panic]` attributes without specifying the expected panic message.
///
/// ### Why is this bad?
/// The expected panic message should be specified to ensure that the test is actually
/// panicking with the expected message, and not another unrelated panic.
///
/// ### Example
/// ```rust
/// fn random() -> i32 { 0 }
///
/// #[should_panic]
/// #[test]
/// fn my_test() {
/// let _ = 1 / random();
/// }
/// ```
///
/// Use instead:
/// ```rust
/// fn random() -> i32 { 0 }
///
/// #[should_panic = "attempt to divide by zero"]
/// #[test]
/// fn my_test() {
/// let _ = 1 / random();
/// }
/// ```
#[clippy::version = "1.73.0"]
pub SHOULD_PANIC_WITHOUT_EXPECT,
pedantic,
"ensures that all `should_panic` attributes specify its expected panic message"
}
declare_clippy_lint! { declare_clippy_lint! {
/// ### What it does /// ### What it does
/// Checks for `any` and `all` combinators in `cfg` with only one condition. /// Checks for `any` and `all` combinators in `cfg` with only one condition.
@ -395,6 +434,7 @@ declare_lint_pass!(Attributes => [
DEPRECATED_SEMVER, DEPRECATED_SEMVER,
USELESS_ATTRIBUTE, USELESS_ATTRIBUTE,
BLANKET_CLIPPY_RESTRICTION_LINTS, BLANKET_CLIPPY_RESTRICTION_LINTS,
SHOULD_PANIC_WITHOUT_EXPECT,
]); ]);
impl<'tcx> LateLintPass<'tcx> for Attributes { impl<'tcx> LateLintPass<'tcx> for Attributes {
@ -442,6 +482,9 @@ impl<'tcx> LateLintPass<'tcx> for Attributes {
} }
} }
} }
if attr.has_name(sym::should_panic) {
check_should_panic_reason(cx, attr);
}
} }
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) {
@ -550,6 +593,35 @@ fn extract_clippy_lint(lint: &NestedMetaItem) -> Option<Symbol> {
None None
} }
fn check_should_panic_reason(cx: &LateContext<'_>, attr: &Attribute) {
if let AttrKind::Normal(normal_attr) = &attr.kind {
if let AttrArgs::Eq(_, AttrArgsEq::Hir(_)) = &normal_attr.item.args {
// `#[should_panic = ".."]` found, good
return;
}
if let AttrArgs::Delimited(args) = &normal_attr.item.args
&& let mut tt_iter = args.tokens.trees()
&& let Some(TokenTree::Token(Token { kind: TokenKind::Ident(sym::expected, _), .. }, _)) = tt_iter.next()
&& let Some(TokenTree::Token(Token { kind: TokenKind::Eq, .. }, _)) = tt_iter.next()
&& let Some(TokenTree::Token(Token { kind: TokenKind::Literal(_), .. }, _)) = tt_iter.next()
{
// `#[should_panic(expected = "..")]` found, good
return;
}
span_lint_and_sugg(
cx,
SHOULD_PANIC_WITHOUT_EXPECT,
attr.span,
"#[should_panic] attribute without a reason",
"consider specifying the expected panic",
r#"#[should_panic(expected = /* panic message */)]"#.into(),
Applicability::HasPlaceholders,
);
}
}
fn check_clippy_lint_names(cx: &LateContext<'_>, name: Symbol, items: &[NestedMetaItem]) { fn check_clippy_lint_names(cx: &LateContext<'_>, name: Symbol, items: &[NestedMetaItem]) {
for lint in items { for lint in items {
if let Some(lint_name) = extract_clippy_lint(lint) { if let Some(lint_name) = extract_clippy_lint(lint) {

View file

@ -5,6 +5,7 @@ use rustc_hir::{Expr, ExprKind, GenericArg};
use rustc_lint::LateContext; use rustc_lint::LateContext;
use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::{self, Ty}; use rustc_middle::ty::{self, Ty};
use rustc_span::sym;
use super::CAST_PTR_ALIGNMENT; use super::CAST_PTR_ALIGNMENT;
@ -76,13 +77,14 @@ fn is_used_as_unaligned(cx: &LateContext<'_>, e: &Expr<'_>) -> bool {
ExprKind::Call(func, [arg, ..]) if arg.hir_id == e.hir_id => { ExprKind::Call(func, [arg, ..]) if arg.hir_id == e.hir_id => {
static PATHS: &[&[&str]] = &[ static PATHS: &[&[&str]] = &[
paths::PTR_READ_UNALIGNED.as_slice(), paths::PTR_READ_UNALIGNED.as_slice(),
paths::PTR_WRITE_UNALIGNED.as_slice(),
paths::PTR_UNALIGNED_VOLATILE_LOAD.as_slice(), paths::PTR_UNALIGNED_VOLATILE_LOAD.as_slice(),
paths::PTR_UNALIGNED_VOLATILE_STORE.as_slice(), paths::PTR_UNALIGNED_VOLATILE_STORE.as_slice(),
]; ];
if let ExprKind::Path(path) = &func.kind if let ExprKind::Path(path) = &func.kind
&& let Some(def_id) = cx.qpath_res(path, func.hir_id).opt_def_id() && let Some(def_id) = cx.qpath_res(path, func.hir_id).opt_def_id()
&& match_any_def_paths(cx, def_id, PATHS).is_some() && (match_any_def_paths(cx, def_id, PATHS).is_some()
|| cx.tcx.is_diagnostic_item(sym::ptr_write_unaligned, def_id))
{ {
true true
} else { } else {

Some files were not shown because too many files have changed in this diff Show more