diff --git a/src/int/specialized_div_rem/mod.rs b/src/int/specialized_div_rem/mod.rs index 760f5f5b7..458ffbf5f 100644 --- a/src/int/specialized_div_rem/mod.rs +++ b/src/int/specialized_div_rem/mod.rs @@ -75,7 +75,7 @@ fn zero_div_fn() -> ! { // Calling the intrinsic directly, to avoid the `assert_unsafe_precondition` that cannot be used // here because it involves non-`inline` functions // (https://github.com/rust-lang/compiler-builtins/issues/491). - unsafe { core::intrinsics::unreachable() } + unsafe { crate::intrinsics::unreachable() } } const USE_LZ: bool = { diff --git a/src/lib.rs b/src/lib.rs index 3e5491878..eaebaa50e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,7 +5,8 @@ #![feature(cfg_target_has_atomic)] #![feature(compiler_builtins)] #![feature(core_ffi_c)] -#![feature(core_intrinsics)] +#![feature(intrinsics)] +#![feature(rustc_attrs)] #![feature(inline_const)] #![feature(lang_items)] #![feature(linkage)] @@ -80,3 +81,50 @@ pub mod x86; pub mod x86_64; pub mod probestack; + +// `core` is changing the feature name for the `intrinsics` module. +// To permit that transition, we avoid using that feature for now. +mod intrinsics { + extern "rust-intrinsic" { + #[rustc_nounwind] + pub fn atomic_load_unordered(src: *const T) -> T; + + #[rustc_nounwind] + pub fn atomic_store_unordered(dst: *mut T, val: T); + + /// Informs the optimizer that this point in the code is not reachable, + /// enabling further optimizations. + /// + /// N.B., this is very different from the `unreachable!()` macro: Unlike the + /// macro, which panics when it is executed, it is *undefined behavior* to + /// reach code marked with this function. + /// + /// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`]. + #[rustc_nounwind] + pub fn unreachable() -> !; + + /// Performs an exact division, resulting in undefined behavior where + /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1` + /// + /// This intrinsic does not have a stable counterpart. + #[rustc_nounwind] + pub fn exact_div(x: T, y: T) -> T; + + /// Performs an unchecked division, resulting in undefined behavior + /// where `y == 0` or `x == T::MIN && y == -1` + /// + /// Safe wrappers for this intrinsic are available on the integer + /// primitives via the `checked_div` method. For example, + /// [`u32::checked_div`] + #[rustc_nounwind] + pub fn unchecked_div(x: T, y: T) -> T; + /// Returns the remainder of an unchecked division, resulting in + /// undefined behavior when `y == 0` or `x == T::MIN && y == -1` + /// + /// Safe wrappers for this intrinsic are available on the integer + /// primitives via the `checked_rem` method. For example, + /// [`u32::checked_rem`] + #[rustc_nounwind] + pub fn unchecked_rem(x: T, y: T) -> T; + } +} diff --git a/src/mem/mod.rs b/src/mem/mod.rs index ccf191779..3325381aa 100644 --- a/src/mem/mod.rs +++ b/src/mem/mod.rs @@ -8,10 +8,11 @@ type c_int = i16; #[cfg(not(target_pointer_width = "16"))] type c_int = i32; -use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div}; use core::mem; use core::ops::{BitOr, Shl}; +use crate::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div}; + // memcpy/memmove/memset have optimized implementations on some architectures #[cfg_attr( all(not(feature = "no-asm"), target_arch = "x86_64"), diff --git a/src/mem/x86_64.rs b/src/mem/x86_64.rs index 40b67093f..4c017a5ab 100644 --- a/src/mem/x86_64.rs +++ b/src/mem/x86_64.rs @@ -17,9 +17,10 @@ // Note that ERMSB does not enhance the backwards (DF=1) "rep movsb". use core::arch::asm; -use core::intrinsics; use core::mem; +use crate::intrinsics; + #[inline(always)] #[cfg(target_feature = "ermsb")] pub unsafe fn copy_forward(dest: *mut u8, src: *const u8, count: usize) { diff --git a/src/x86_64.rs b/src/x86_64.rs index 7ad941158..bc475029e 100644 --- a/src/x86_64.rs +++ b/src/x86_64.rs @@ -1,6 +1,6 @@ #![allow(unused_imports)] -use core::intrinsics; +use crate::intrinsics; // NOTE These functions are implemented using assembly because they using a custom // calling convention which can't be implemented using a normal Rust function