Skip to main content

rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
4#![cfg_attr(feature = "nightly", feature(step_trait))]
5// tidy-alphabetical-end
6
7/*! ABI handling for rustc
8
9## What is an "ABI"?
10
11Literally, "application binary interface", which means it is everything about how code interacts,
12at the machine level, with other code. This means it technically covers all of the following:
13- object binary format for e.g. relocations or offset tables
14- in-memory layout of types
15- procedure calling conventions
16
17When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
18To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
19Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
20You will encounter all of them and more if you study target-specific codegen enough!
21Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
22either or both of
23- `repr(Rust)` types have a mostly-unspecified layout
24- `extern "Rust" fn(A) -> R` has an unspecified calling convention
25
26## Crate Goal
27
28ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
29It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
30Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
31It should contain traits and types that other crates then use in their implementation.
32For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
33but `rustc_abi` contains the types for calculating layout and describing register-passing.
34This makes it easier to describe things in the same way across targets, codegen backends, and
35even other Rust compilers, such as rust-analyzer!
36
37*/
38
39use std::fmt;
40#[cfg(feature = "nightly")]
41use std::iter::Step;
42use std::num::{NonZeroUsize, ParseIntError};
43use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
44use std::str::FromStr;
45
46use bitflags::bitflags;
47#[cfg(feature = "nightly")]
48use rustc_data_structures::stable_hasher::StableOrd;
49use rustc_hashes::Hash64;
50use rustc_index::{Idx, IndexSlice, IndexVec};
51#[cfg(feature = "nightly")]
52use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
53
54mod callconv;
55mod canon_abi;
56mod extern_abi;
57mod layout;
58#[cfg(test)]
59mod tests;
60
61pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
62pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
63#[cfg(feature = "nightly")]
64pub use extern_abi::CVariadicStatus;
65pub use extern_abi::{ExternAbi, all_names};
66pub use layout::{FIRST_VARIANT, FieldIdx, LayoutCalculator, LayoutCalculatorError, VariantIdx};
67#[cfg(feature = "nightly")]
68pub use layout::{Layout, TyAbiInterface, TyAndLayout};
69
70#[derive(#[automatically_derived]
impl ::core::clone::Clone for ReprFlags {
    #[inline]
    fn clone(&self) -> ReprFlags {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ReprFlags { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprFlags {
    #[inline]
    fn eq(&self, other: &ReprFlags) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ReprFlags {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::default::Default for ReprFlags {
    #[inline]
    fn default() -> ReprFlags {
        ReprFlags(::core::default::Default::default())
    }
}Default)]
71#[cfg_attr(
72    feature = "nightly",
73    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprFlags {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprFlags {
            fn decode(__decoder: &mut __D) -> Self {
                ReprFlags(::rustc_serialize::Decodable::decode(__decoder))
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ReprFlags where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
74)]
75pub struct ReprFlags(u8);
76
77impl ReprFlags {
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_C: Self = Self::from_bits_retain(1 << 0);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SIMD: Self = Self::from_bits_retain(1 << 1);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_TRANSPARENT: Self = Self::from_bits_retain(1 << 2);
    #[doc = r" Internal only for now. If true, don't reorder fields."]
    #[doc = r" On its own it does not prevent ABI optimizations."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_LINEAR: Self = Self::from_bits_retain(1 << 3);
    #[doc =
    r" If true, the type's crate has opted into layout randomization."]
    #[doc =
    r" Other flags can still inhibit reordering and thus randomization."]
    #[doc = r" The seed stored in `ReprOptions.field_shuffle_seed`."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const RANDOMIZE_LAYOUT: Self = Self::from_bits_retain(1 << 4);
    #[doc =
    r" If true, the type is always passed indirectly by non-Rustic ABIs."]
    #[doc =
    r" See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS: Self =
        Self::from_bits_retain(1 << 5);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SCALABLE: Self = Self::from_bits_retain(1 << 6);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const FIELD_ORDER_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                        ReprFlags::IS_SIMD.bits() | ReprFlags::IS_SCALABLE.bits() |
                ReprFlags::IS_LINEAR.bits());
    #[allow(deprecated, non_upper_case_globals,)]
    pub const ABI_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                ReprFlags::IS_SIMD.bits());
}
impl ::bitflags::Flags for ReprFlags {
    const FLAGS: &'static [::bitflags::Flag<ReprFlags>] =
        &[{

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_C", ReprFlags::IS_C)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SIMD", ReprFlags::IS_SIMD)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_TRANSPARENT",
                            ReprFlags::IS_TRANSPARENT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_LINEAR", ReprFlags::IS_LINEAR)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("RANDOMIZE_LAYOUT",
                            ReprFlags::RANDOMIZE_LAYOUT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS",
                            ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SCALABLE", ReprFlags::IS_SCALABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("FIELD_ORDER_UNOPTIMIZABLE",
                            ReprFlags::FIELD_ORDER_UNOPTIMIZABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("ABI_UNOPTIMIZABLE",
                            ReprFlags::ABI_UNOPTIMIZABLE)
                    }];
    type Bits = u8;
    fn bits(&self) -> u8 { ReprFlags::bits(self) }
    fn from_bits_retain(bits: u8) -> ReprFlags {
        ReprFlags::from_bits_retain(bits)
    }
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
    {
        #[allow(dead_code, deprecated, unused_attributes)]
        impl ReprFlags {
            /// Get a flags value with all bits unset.
            #[inline]
            pub const fn empty() -> Self {
                Self(<u8 as ::bitflags::Bits>::EMPTY)
            }
            /// Get a flags value with all known bits set.
            #[inline]
            pub const fn all() -> Self {
                let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
                let mut i = 0;
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                let _ = i;
                Self(truncated)
            }
            /// Get the underlying bits value.
            ///
            /// The returned value is exactly the bits set in this flags value.
            #[inline]
            pub const fn bits(&self) -> u8 { self.0 }
            /// Convert from a bits value.
            ///
            /// This method will return `None` if any unknown bits are set.
            #[inline]
            pub const fn from_bits(bits: u8)
                -> ::bitflags::__private::core::option::Option<Self> {
                let truncated = Self::from_bits_truncate(bits).0;
                if truncated == bits {
                    ::bitflags::__private::core::option::Option::Some(Self(bits))
                } else { ::bitflags::__private::core::option::Option::None }
            }
            /// Convert from a bits value, unsetting any unknown bits.
            #[inline]
            pub const fn from_bits_truncate(bits: u8) -> Self {
                Self(bits & Self::all().0)
            }
            /// Convert from a bits value exactly.
            #[inline]
            pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
            /// Get a flags value with the bits of a flag with the given name set.
            ///
            /// This method will return `None` if `name` is empty or doesn't
            /// correspond to any named flag.
            #[inline]
            pub fn from_name(name: &str)
                -> ::bitflags::__private::core::option::Option<Self> {
                {
                    if name == "IS_C" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_C.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SIMD" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SIMD.bits()));
                    }
                };
                ;
                {
                    if name == "IS_TRANSPARENT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_TRANSPARENT.bits()));
                    }
                };
                ;
                {
                    if name == "IS_LINEAR" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_LINEAR.bits()));
                    }
                };
                ;
                {
                    if name == "RANDOMIZE_LAYOUT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::RANDOMIZE_LAYOUT.bits()));
                    }
                };
                ;
                {
                    if name == "PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SCALABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SCALABLE.bits()));
                    }
                };
                ;
                {
                    if name == "FIELD_ORDER_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                {
                    if name == "ABI_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::ABI_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                let _ = name;
                ::bitflags::__private::core::option::Option::None
            }
            /// Whether all bits in this flags value are unset.
            #[inline]
            pub const fn is_empty(&self) -> bool {
                self.0 == <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all known bits in this flags value are set.
            #[inline]
            pub const fn is_all(&self) -> bool {
                Self::all().0 | self.0 == self.0
            }
            /// Whether any set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn intersects(&self, other: Self) -> bool {
                self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn contains(&self, other: Self) -> bool {
                self.0 & other.0 == other.0
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            pub fn insert(&mut self, other: Self) {
                *self = Self(self.0).union(other);
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `remove` won't truncate `other`, but the `!` operator will.
            #[inline]
            pub fn remove(&mut self, other: Self) {
                *self = Self(self.0).difference(other);
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            pub fn toggle(&mut self, other: Self) {
                *self = Self(self.0).symmetric_difference(other);
            }
            /// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
            #[inline]
            pub fn set(&mut self, other: Self, value: bool) {
                if value { self.insert(other); } else { self.remove(other); }
            }
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn intersection(self, other: Self) -> Self {
                Self(self.0 & other.0)
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn union(self, other: Self) -> Self {
                Self(self.0 | other.0)
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            #[must_use]
            pub const fn difference(self, other: Self) -> Self {
                Self(self.0 & !other.0)
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn symmetric_difference(self, other: Self) -> Self {
                Self(self.0 ^ other.0)
            }
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            #[must_use]
            pub const fn complement(self) -> Self {
                Self::from_bits_truncate(!self.0)
            }
        }
        impl ::bitflags::__private::core::fmt::Binary for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::Octal for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::LowerHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::UpperHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::ops::BitOr for ReprFlags {
            type Output = Self;
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor(self, other: ReprFlags) -> Self { self.union(other) }
        }
        impl ::bitflags::__private::core::ops::BitOrAssign for ReprFlags {
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor_assign(&mut self, other: Self) { self.insert(other); }
        }
        impl ::bitflags::__private::core::ops::BitXor for ReprFlags {
            type Output = Self;
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor(self, other: Self) -> Self {
                self.symmetric_difference(other)
            }
        }
        impl ::bitflags::__private::core::ops::BitXorAssign for ReprFlags {
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
        }
        impl ::bitflags::__private::core::ops::BitAnd for ReprFlags {
            type Output = Self;
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand(self, other: Self) -> Self { self.intersection(other) }
        }
        impl ::bitflags::__private::core::ops::BitAndAssign for ReprFlags {
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand_assign(&mut self, other: Self) {
                *self =
                    Self::from_bits_retain(self.bits()).intersection(other);
            }
        }
        impl ::bitflags::__private::core::ops::Sub for ReprFlags {
            type Output = Self;
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub(self, other: Self) -> Self { self.difference(other) }
        }
        impl ::bitflags::__private::core::ops::SubAssign for ReprFlags {
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub_assign(&mut self, other: Self) { self.remove(other); }
        }
        impl ::bitflags::__private::core::ops::Not for ReprFlags {
            type Output = Self;
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            fn not(self) -> Self { self.complement() }
        }
        impl ::bitflags::__private::core::iter::Extend<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(&mut self, iterator: T) {
                for item in iterator { self.insert(item) }
            }
        }
        impl ::bitflags::__private::core::iter::FromIterator<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(iterator: T) -> Self {
                use ::bitflags::__private::core::iter::Extend;
                let mut result = Self::empty();
                result.extend(iterator);
                result
            }
        }
        impl ReprFlags {
            /// Yield a set of contained flags values.
            ///
            /// Each yielded flags value will correspond to a defined named flag. Any unknown bits
            /// will be yielded together as a final flags value.
            #[inline]
            pub const fn iter(&self) -> ::bitflags::iter::Iter<ReprFlags> {
                ::bitflags::iter::Iter::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
            /// Yield a set of contained named flags values.
            ///
            /// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
            /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
            #[inline]
            pub const fn iter_names(&self)
                -> ::bitflags::iter::IterNames<ReprFlags> {
                ::bitflags::iter::IterNames::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
        }
        impl ::bitflags::__private::core::iter::IntoIterator for ReprFlags {
            type Item = ReprFlags;
            type IntoIter = ::bitflags::iter::Iter<ReprFlags>;
            fn into_iter(self) -> Self::IntoIter { self.iter() }
        }
    };bitflags! {
78    impl ReprFlags: u8 {
79        const IS_C               = 1 << 0;
80        const IS_SIMD            = 1 << 1;
81        const IS_TRANSPARENT     = 1 << 2;
82        /// Internal only for now. If true, don't reorder fields.
83        /// On its own it does not prevent ABI optimizations.
84        const IS_LINEAR          = 1 << 3;
85        /// If true, the type's crate has opted into layout randomization.
86        /// Other flags can still inhibit reordering and thus randomization.
87        /// The seed stored in `ReprOptions.field_shuffle_seed`.
88        const RANDOMIZE_LAYOUT   = 1 << 4;
89        /// If true, the type is always passed indirectly by non-Rustic ABIs.
90        /// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
91        const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS = 1 << 5;
92        const IS_SCALABLE        = 1 << 6;
93         // Any of these flags being set prevent field reordering optimisation.
94        const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits()
95                                 | ReprFlags::IS_SIMD.bits()
96                                 | ReprFlags::IS_SCALABLE.bits()
97                                 | ReprFlags::IS_LINEAR.bits();
98        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
99    }
100}
101
102// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
103// `rustc_data_structures` to make it build on stable.
104impl std::fmt::Debug for ReprFlags {
105    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
106        bitflags::parser::to_writer(self, f)
107    }
108}
109
110#[derive(#[automatically_derived]
impl ::core::marker::Copy for IntegerType { }Copy, #[automatically_derived]
impl ::core::clone::Clone for IntegerType {
    #[inline]
    fn clone(&self) -> IntegerType {
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Integer>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for IntegerType {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            IntegerType::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
            IntegerType::Fixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Fixed",
                    __self_0, &__self_1),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for IntegerType {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for IntegerType {
    #[inline]
    fn eq(&self, other: &IntegerType) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (IntegerType::Pointer(__self_0),
                    IntegerType::Pointer(__arg1_0)) => __self_0 == __arg1_0,
                (IntegerType::Fixed(__self_0, __self_1),
                    IntegerType::Fixed(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq)]
111#[cfg_attr(
112    feature = "nightly",
113    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for IntegerType {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        IntegerType::Pointer(ref __binding_0) => { 0usize }
                        IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                            1usize
                        }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for IntegerType {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        IntegerType::Pointer(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => {
                        IntegerType::Fixed(::rustc_serialize::Decodable::decode(__decoder),
                            ::rustc_serialize::Decodable::decode(__decoder))
                    }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `IntegerType`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for IntegerType where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
114)]
115pub enum IntegerType {
116    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
117    /// `Pointer(true)` means `isize`.
118    Pointer(bool),
119    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
120    /// `Fixed(I8, false)` means `u8`.
121    Fixed(Integer, bool),
122}
123
124impl IntegerType {
125    pub fn is_signed(&self) -> bool {
126        match self {
127            IntegerType::Pointer(b) => *b,
128            IntegerType::Fixed(_, b) => *b,
129        }
130    }
131}
132
133#[derive(#[automatically_derived]
impl ::core::marker::Copy for ScalableElt { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ScalableElt {
    #[inline]
    fn clone(&self) -> ScalableElt {
        let _: ::core::clone::AssertParamIsClone<u16>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ScalableElt {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            ScalableElt::ElementCount(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "ElementCount", &__self_0),
            ScalableElt::Container =>
                ::core::fmt::Formatter::write_str(f, "Container"),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ScalableElt {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u16>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ScalableElt {
    #[inline]
    fn eq(&self, other: &ScalableElt) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (ScalableElt::ElementCount(__self_0),
                    ScalableElt::ElementCount(__arg1_0)) =>
                    __self_0 == __arg1_0,
                _ => true,
            }
    }
}PartialEq)]
134#[cfg_attr(
135    feature = "nightly",
136    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ScalableElt {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        ScalableElt::ElementCount(ref __binding_0) => { 0usize }
                        ScalableElt::Container => { 1usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ScalableElt {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        ScalableElt::ElementCount(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => { ScalableElt::Container }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `ScalableElt`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ScalableElt where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };HashStable_Generic)
137)]
138pub enum ScalableElt {
139    /// `N` in `rustc_scalable_vector(N)` - the element count of the scalable vector
140    ElementCount(u16),
141    /// `rustc_scalable_vector` w/out `N`, used for tuple types of scalable vectors that only
142    /// contain other scalable vectors
143    Container,
144}
145
146/// Represents the repr options provided by the user.
147#[derive(#[automatically_derived]
impl ::core::marker::Copy for ReprOptions { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ReprOptions {
    #[inline]
    fn clone(&self) -> ReprOptions {
        let _: ::core::clone::AssertParamIsClone<Option<IntegerType>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<ReprFlags>;
        let _: ::core::clone::AssertParamIsClone<Option<ScalableElt>>;
        let _: ::core::clone::AssertParamIsClone<Hash64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ReprOptions {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["int", "align", "pack", "flags", "scalable",
                        "field_shuffle_seed"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.int, &self.align, &self.pack, &self.flags, &self.scalable,
                        &&self.field_shuffle_seed];
        ::core::fmt::Formatter::debug_struct_fields_finish(f, "ReprOptions",
            names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ReprOptions {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Option<IntegerType>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<ReprFlags>;
        let _: ::core::cmp::AssertParamIsEq<Option<ScalableElt>>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprOptions {
    #[inline]
    fn eq(&self, other: &ReprOptions) -> bool {
        self.int == other.int && self.align == other.align &&
                        self.pack == other.pack && self.flags == other.flags &&
                self.scalable == other.scalable &&
            self.field_shuffle_seed == other.field_shuffle_seed
    }
}PartialEq, #[automatically_derived]
impl ::core::default::Default for ReprOptions {
    #[inline]
    fn default() -> ReprOptions {
        ReprOptions {
            int: ::core::default::Default::default(),
            align: ::core::default::Default::default(),
            pack: ::core::default::Default::default(),
            flags: ::core::default::Default::default(),
            scalable: ::core::default::Default::default(),
            field_shuffle_seed: ::core::default::Default::default(),
        }
    }
}Default)]
148#[cfg_attr(
149    feature = "nightly",
150    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprOptions {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprOptions {
            fn decode(__decoder: &mut __D) -> Self {
                ReprOptions {
                    int: ::rustc_serialize::Decodable::decode(__decoder),
                    align: ::rustc_serialize::Decodable::decode(__decoder),
                    pack: ::rustc_serialize::Decodable::decode(__decoder),
                    flags: ::rustc_serialize::Decodable::decode(__decoder),
                    scalable: ::rustc_serialize::Decodable::decode(__decoder),
                    field_shuffle_seed: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for ReprOptions where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
151)]
152pub struct ReprOptions {
153    pub int: Option<IntegerType>,
154    pub align: Option<Align>,
155    pub pack: Option<Align>,
156    pub flags: ReprFlags,
157    /// `#[rustc_scalable_vector]`
158    pub scalable: Option<ScalableElt>,
159    /// The seed to be used for randomizing a type's layout
160    ///
161    /// Note: This could technically be a `u128` which would
162    /// be the "most accurate" hash as it'd encompass the item and crate
163    /// hash without loss, but it does pay the price of being larger.
164    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
165    /// purposes (primarily `-Z randomize-layout`)
166    pub field_shuffle_seed: Hash64,
167}
168
169impl ReprOptions {
170    #[inline]
171    pub fn simd(&self) -> bool {
172        self.flags.contains(ReprFlags::IS_SIMD)
173    }
174
175    #[inline]
176    pub fn scalable(&self) -> bool {
177        self.flags.contains(ReprFlags::IS_SCALABLE)
178    }
179
180    #[inline]
181    pub fn c(&self) -> bool {
182        self.flags.contains(ReprFlags::IS_C)
183    }
184
185    #[inline]
186    pub fn packed(&self) -> bool {
187        self.pack.is_some()
188    }
189
190    #[inline]
191    pub fn transparent(&self) -> bool {
192        self.flags.contains(ReprFlags::IS_TRANSPARENT)
193    }
194
195    #[inline]
196    pub fn linear(&self) -> bool {
197        self.flags.contains(ReprFlags::IS_LINEAR)
198    }
199
200    /// Returns the discriminant type, given these `repr` options.
201    /// This must only be called on enums!
202    ///
203    /// This is the "typeck type" of the discriminant, which is effectively the maximum size:
204    /// discriminant values will be wrapped to fit (with a lint). Layout can later decide to use a
205    /// smaller type for the tag that stores the discriminant at runtime and that will work just
206    /// fine, it just induces casts when getting/setting the discriminant.
207    pub fn discr_type(&self) -> IntegerType {
208        self.int.unwrap_or(IntegerType::Pointer(true))
209    }
210
211    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
212    /// layout" optimizations, such as representing `Foo<&T>` as a
213    /// single pointer.
214    pub fn inhibit_enum_layout_opt(&self) -> bool {
215        self.c() || self.int.is_some()
216    }
217
218    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
219        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
220    }
221
222    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
223    /// e.g. `repr(C)` or `repr(<int>)`.
224    pub fn inhibit_struct_field_reordering(&self) -> bool {
225        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
226    }
227
228    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
229    /// was enabled for its declaration crate.
230    pub fn can_randomize_type_layout(&self) -> bool {
231        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
232    }
233
234    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
235    pub fn inhibits_union_abi_opt(&self) -> bool {
236        self.c()
237    }
238}
239
240/// The maximum supported number of lanes in a SIMD vector.
241///
242/// This value is selected based on backend support:
243/// * LLVM does not appear to have a vector width limit.
244/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
245pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
246
247/// How pointers are represented in a given address space
248#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerSpec { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerSpec {
    #[inline]
    fn clone(&self) -> PointerSpec {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointerSpec {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "PointerSpec",
            "pointer_size", &self.pointer_size, "pointer_align",
            &self.pointer_align, "pointer_offset", &self.pointer_offset,
            "_is_fat", &&self._is_fat)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerSpec {
    #[inline]
    fn eq(&self, other: &PointerSpec) -> bool {
        self._is_fat == other._is_fat &&
                    self.pointer_size == other.pointer_size &&
                self.pointer_align == other.pointer_align &&
            self.pointer_offset == other.pointer_offset
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerSpec {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq)]
249pub struct PointerSpec {
250    /// The size of the bitwise representation of the pointer.
251    pointer_size: Size,
252    /// The alignment of pointers for this address space
253    pointer_align: Align,
254    /// The size of the value a pointer can be offset by in this address space.
255    pointer_offset: Size,
256    /// Pointers into this address space contain extra metadata
257    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
258    _is_fat: bool,
259}
260
261/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
262/// for a target, which contains everything needed to compute layouts.
263#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TargetDataLayout {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["endian", "i1_align", "i8_align", "i16_align", "i32_align",
                        "i64_align", "i128_align", "f16_align", "f32_align",
                        "f64_align", "f128_align", "aggregate_align",
                        "vector_align", "default_address_space",
                        "default_address_space_pointer_spec", "address_space_info",
                        "instruction_address_space", "c_enum_min_size"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.endian, &self.i1_align, &self.i8_align, &self.i16_align,
                        &self.i32_align, &self.i64_align, &self.i128_align,
                        &self.f16_align, &self.f32_align, &self.f64_align,
                        &self.f128_align, &self.aggregate_align, &self.vector_align,
                        &self.default_address_space,
                        &self.default_address_space_pointer_spec,
                        &self.address_space_info, &self.instruction_address_space,
                        &&self.c_enum_min_size];
        ::core::fmt::Formatter::debug_struct_fields_finish(f,
            "TargetDataLayout", names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for TargetDataLayout {
    #[inline]
    fn eq(&self, other: &TargetDataLayout) -> bool {
        self.endian == other.endian && self.i1_align == other.i1_align &&
                                                                        self.i8_align == other.i8_align &&
                                                                    self.i16_align == other.i16_align &&
                                                                self.i32_align == other.i32_align &&
                                                            self.i64_align == other.i64_align &&
                                                        self.i128_align == other.i128_align &&
                                                    self.f16_align == other.f16_align &&
                                                self.f32_align == other.f32_align &&
                                            self.f64_align == other.f64_align &&
                                        self.f128_align == other.f128_align &&
                                    self.aggregate_align == other.aggregate_align &&
                                self.vector_align == other.vector_align &&
                            self.default_address_space == other.default_address_space &&
                        self.default_address_space_pointer_spec ==
                            other.default_address_space_pointer_spec &&
                    self.address_space_info == other.address_space_info &&
                self.instruction_address_space ==
                    other.instruction_address_space &&
            self.c_enum_min_size == other.c_enum_min_size
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for TargetDataLayout {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Endian>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(Size, Align)>>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
        let _: ::core::cmp::AssertParamIsEq<PointerSpec>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(AddressSpace, PointerSpec)>>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq)]
264pub struct TargetDataLayout {
265    pub endian: Endian,
266    pub i1_align: Align,
267    pub i8_align: Align,
268    pub i16_align: Align,
269    pub i32_align: Align,
270    pub i64_align: Align,
271    pub i128_align: Align,
272    pub f16_align: Align,
273    pub f32_align: Align,
274    pub f64_align: Align,
275    pub f128_align: Align,
276    pub aggregate_align: Align,
277
278    /// Alignments for vector types.
279    pub vector_align: Vec<(Size, Align)>,
280
281    pub default_address_space: AddressSpace,
282    pub default_address_space_pointer_spec: PointerSpec,
283
284    /// Address space information of all known address spaces.
285    ///
286    /// # Note
287    ///
288    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
289    /// which instead lives in [`Self::default_address_space_pointer_spec`].
290    address_space_info: Vec<(AddressSpace, PointerSpec)>,
291
292    pub instruction_address_space: AddressSpace,
293
294    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
295    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
296    /// so the only valid spec for LLVM is c_int::BITS or 8
297    pub c_enum_min_size: Integer,
298}
299
300impl Default for TargetDataLayout {
301    /// Creates an instance of `TargetDataLayout`.
302    fn default() -> TargetDataLayout {
303        let align = |bits| Align::from_bits(bits).unwrap();
304        TargetDataLayout {
305            endian: Endian::Big,
306            i1_align: align(8),
307            i8_align: align(8),
308            i16_align: align(16),
309            i32_align: align(32),
310            i64_align: align(32),
311            i128_align: align(32),
312            f16_align: align(16),
313            f32_align: align(32),
314            f64_align: align(64),
315            f128_align: align(128),
316            aggregate_align: align(8),
317            vector_align: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [(Size::from_bits(64), align(64)),
                (Size::from_bits(128), align(128))]))vec![
318                (Size::from_bits(64), align(64)),
319                (Size::from_bits(128), align(128)),
320            ],
321            default_address_space: AddressSpace::ZERO,
322            default_address_space_pointer_spec: PointerSpec {
323                pointer_size: Size::from_bits(64),
324                pointer_align: align(64),
325                pointer_offset: Size::from_bits(64),
326                _is_fat: false,
327            },
328            address_space_info: ::alloc::vec::Vec::new()vec![],
329            instruction_address_space: AddressSpace::ZERO,
330            c_enum_min_size: Integer::I32,
331        }
332    }
333}
334
335pub enum TargetDataLayoutErrors<'a> {
336    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
337    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
338    MissingAlignment { cause: &'a str },
339    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
340    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
341    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
342    InvalidBitsSize { err: String },
343    UnknownPointerSpecification { err: String },
344}
345
346impl TargetDataLayout {
347    /// Parse data layout from an
348    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
349    ///
350    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
351    /// determined from llvm string.
352    pub fn parse_from_llvm_datalayout_string<'a>(
353        input: &'a str,
354        default_address_space: AddressSpace,
355    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
356        // Parse an address space index from a string.
357        let parse_address_space = |s: &'a str, cause: &'a str| {
358            s.parse::<u32>().map(AddressSpace).map_err(|err| {
359                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
360            })
361        };
362
363        // Parse a bit count from a string.
364        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
365            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
366                kind,
367                bit: s,
368                cause,
369                err,
370            })
371        };
372
373        // Parse a size string.
374        let parse_size =
375            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
376
377        // Parse an alignment string.
378        let parse_align_str = |s: &'a str, cause: &'a str| {
379            let align_from_bits = |bits| {
380                Align::from_bits(bits)
381                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
382            };
383            let abi = parse_bits(s, "alignment", cause)?;
384            Ok(align_from_bits(abi)?)
385        };
386
387        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
388        // ignoring the secondary alignment specifications.
389        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
390            if s.is_empty() {
391                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
392            }
393            parse_align_str(s[0], cause)
394        };
395
396        let mut dl = TargetDataLayout::default();
397        dl.default_address_space = default_address_space;
398
399        let mut i128_align_src = 64;
400        for spec in input.split('-') {
401            let spec_parts = spec.split(':').collect::<Vec<_>>();
402
403            match &*spec_parts {
404                ["e"] => dl.endian = Endian::Little,
405                ["E"] => dl.endian = Endian::Big,
406                [p] if p.starts_with('P') => {
407                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
408                }
409                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
410                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
411                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
412                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
413                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
414                [p, s, a @ ..] if p.starts_with("p") => {
415                    let mut p = p.strip_prefix('p').unwrap();
416                    let mut _is_fat = false;
417
418                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
419                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
420
421                    if p.starts_with('f') {
422                        p = p.strip_prefix('f').unwrap();
423                        _is_fat = true;
424                    }
425
426                    // However, we currently don't take into account further specifications:
427                    // an error is emitted instead.
428                    if p.starts_with(char::is_alphabetic) {
429                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
430                            err: p.to_string(),
431                        });
432                    }
433
434                    let addr_space = if !p.is_empty() {
435                        parse_address_space(p, "p-")?
436                    } else {
437                        AddressSpace::ZERO
438                    };
439
440                    let pointer_size = parse_size(s, "p-")?;
441                    let pointer_align = parse_align_seq(a, "p-")?;
442                    let info = PointerSpec {
443                        pointer_offset: pointer_size,
444                        pointer_size,
445                        pointer_align,
446                        _is_fat,
447                    };
448                    if addr_space == default_address_space {
449                        dl.default_address_space_pointer_spec = info;
450                    } else {
451                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
452                            Some(e) => e.1 = info,
453                            None => {
454                                dl.address_space_info.push((addr_space, info));
455                            }
456                        }
457                    }
458                }
459                [p, s, a, _pr, i] if p.starts_with("p") => {
460                    let mut p = p.strip_prefix('p').unwrap();
461                    let mut _is_fat = false;
462
463                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
464                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
465
466                    if p.starts_with('f') {
467                        p = p.strip_prefix('f').unwrap();
468                        _is_fat = true;
469                    }
470
471                    // However, we currently don't take into account further specifications:
472                    // an error is emitted instead.
473                    if p.starts_with(char::is_alphabetic) {
474                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
475                            err: p.to_string(),
476                        });
477                    }
478
479                    let addr_space = if !p.is_empty() {
480                        parse_address_space(p, "p")?
481                    } else {
482                        AddressSpace::ZERO
483                    };
484
485                    let info = PointerSpec {
486                        pointer_size: parse_size(s, "p-")?,
487                        pointer_align: parse_align_str(a, "p-")?,
488                        pointer_offset: parse_size(i, "p-")?,
489                        _is_fat,
490                    };
491
492                    if addr_space == default_address_space {
493                        dl.default_address_space_pointer_spec = info;
494                    } else {
495                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
496                            Some(e) => e.1 = info,
497                            None => {
498                                dl.address_space_info.push((addr_space, info));
499                            }
500                        }
501                    }
502                }
503
504                [s, a @ ..] if s.starts_with('i') => {
505                    let Ok(bits) = s[1..].parse::<u64>() else {
506                        parse_size(&s[1..], "i")?; // For the user error.
507                        continue;
508                    };
509                    let a = parse_align_seq(a, s)?;
510                    match bits {
511                        1 => dl.i1_align = a,
512                        8 => dl.i8_align = a,
513                        16 => dl.i16_align = a,
514                        32 => dl.i32_align = a,
515                        64 => dl.i64_align = a,
516                        _ => {}
517                    }
518                    if bits >= i128_align_src && bits <= 128 {
519                        // Default alignment for i128 is decided by taking the alignment of
520                        // largest-sized i{64..=128}.
521                        i128_align_src = bits;
522                        dl.i128_align = a;
523                    }
524                }
525                [s, a @ ..] if s.starts_with('v') => {
526                    let v_size = parse_size(&s[1..], "v")?;
527                    let a = parse_align_seq(a, s)?;
528                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
529                        v.1 = a;
530                        continue;
531                    }
532                    // No existing entry, add a new one.
533                    dl.vector_align.push((v_size, a));
534                }
535                _ => {} // Ignore everything else.
536            }
537        }
538
539        // Inherit, if not given, address space information for specific LLVM elements from the
540        // default data address space.
541        if (dl.instruction_address_space != dl.default_address_space)
542            && dl
543                .address_space_info
544                .iter()
545                .find(|(a, _)| *a == dl.instruction_address_space)
546                .is_none()
547        {
548            dl.address_space_info.push((
549                dl.instruction_address_space,
550                dl.default_address_space_pointer_spec.clone(),
551            ));
552        }
553
554        Ok(dl)
555    }
556
557    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
558    /// space.
559    ///
560    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
561    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
562    /// index every address within an object along with one byte past the end, along with allowing
563    /// `isize` to store the difference between any two pointers into an object.
564    ///
565    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
566    /// so we adopt such a more-constrained size bound due to its technical limitations.
567    #[inline]
568    pub fn obj_size_bound(&self) -> u64 {
569        match self.pointer_size().bits() {
570            16 => 1 << 15,
571            32 => 1 << 31,
572            64 => 1 << 61,
573            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
574        }
575    }
576
577    /// Returns **exclusive** upper bound on object size in bytes.
578    ///
579    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
580    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
581    /// index every address within an object along with one byte past the end, along with allowing
582    /// `isize` to store the difference between any two pointers into an object.
583    ///
584    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
585    /// so we adopt such a more-constrained size bound due to its technical limitations.
586    #[inline]
587    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
588        match self.pointer_size_in(address_space).bits() {
589            16 => 1 << 15,
590            32 => 1 << 31,
591            64 => 1 << 61,
592            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
593        }
594    }
595
596    #[inline]
597    pub fn ptr_sized_integer(&self) -> Integer {
598        use Integer::*;
599        match self.pointer_offset().bits() {
600            16 => I16,
601            32 => I32,
602            64 => I64,
603            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
604        }
605    }
606
607    #[inline]
608    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
609        use Integer::*;
610        match self.pointer_offset_in(address_space).bits() {
611            16 => I16,
612            32 => I32,
613            64 => I64,
614            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
615        }
616    }
617
618    /// psABI-mandated alignment for a vector type, if any
619    #[inline]
620    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
621        self.vector_align
622            .iter()
623            .find(|(size, _align)| *size == vec_size)
624            .map(|(_size, align)| *align)
625    }
626
627    /// an alignment resembling the one LLVM would pick for a vector
628    #[inline]
629    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
630        self.cabi_vector_align(vec_size)
631            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
632    }
633
634    /// Get the pointer size in the default data address space.
635    #[inline]
636    pub fn pointer_size(&self) -> Size {
637        self.default_address_space_pointer_spec.pointer_size
638    }
639
640    /// Get the pointer size in a specific address space.
641    #[inline]
642    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
643        if c == self.default_address_space {
644            return self.default_address_space_pointer_spec.pointer_size;
645        }
646
647        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
648            e.1.pointer_size
649        } else {
650            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
651        }
652    }
653
654    /// Get the pointer index in the default data address space.
655    #[inline]
656    pub fn pointer_offset(&self) -> Size {
657        self.default_address_space_pointer_spec.pointer_offset
658    }
659
660    /// Get the pointer index in a specific address space.
661    #[inline]
662    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
663        if c == self.default_address_space {
664            return self.default_address_space_pointer_spec.pointer_offset;
665        }
666
667        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
668            e.1.pointer_offset
669        } else {
670            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
671        }
672    }
673
674    /// Get the pointer alignment in the default data address space.
675    #[inline]
676    pub fn pointer_align(&self) -> AbiAlign {
677        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
678    }
679
680    /// Get the pointer alignment in a specific address space.
681    #[inline]
682    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
683        AbiAlign::new(if c == self.default_address_space {
684            self.default_address_space_pointer_spec.pointer_align
685        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
686            e.1.pointer_align
687        } else {
688            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
689        })
690    }
691}
692
693pub trait HasDataLayout {
694    fn data_layout(&self) -> &TargetDataLayout;
695}
696
697impl HasDataLayout for TargetDataLayout {
698    #[inline]
699    fn data_layout(&self) -> &TargetDataLayout {
700        self
701    }
702}
703
704// used by rust-analyzer
705impl HasDataLayout for &TargetDataLayout {
706    #[inline]
707    fn data_layout(&self) -> &TargetDataLayout {
708        (**self).data_layout()
709    }
710}
711
712/// Endianness of the target, which must match cfg(target-endian).
713#[derive(#[automatically_derived]
impl ::core::marker::Copy for Endian { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Endian {
    #[inline]
    fn clone(&self) -> Endian { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Endian {
    #[inline]
    fn eq(&self, other: &Endian) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Endian {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq)]
714pub enum Endian {
715    Little,
716    Big,
717}
718
719impl Endian {
720    pub fn as_str(&self) -> &'static str {
721        match self {
722            Self::Little => "little",
723            Self::Big => "big",
724        }
725    }
726}
727
728impl fmt::Debug for Endian {
729    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
730        f.write_str(self.as_str())
731    }
732}
733
734impl FromStr for Endian {
735    type Err = String;
736
737    fn from_str(s: &str) -> Result<Self, Self::Err> {
738        match s {
739            "little" => Ok(Self::Little),
740            "big" => Ok(Self::Big),
741            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("unknown endian: \"{0}\"", s))
    })format!(r#"unknown endian: "{s}""#)),
742        }
743    }
744}
745
746/// Size of a type in bytes.
747#[derive(#[automatically_derived]
impl ::core::marker::Copy for Size { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Size {
    #[inline]
    fn clone(&self) -> Size {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Size {
    #[inline]
    fn eq(&self, other: &Size) -> bool { self.raw == other.raw }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Size {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Size {
    #[inline]
    fn partial_cmp(&self, other: &Size)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.raw, &other.raw)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Size {
    #[inline]
    fn cmp(&self, other: &Size) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.raw, &other.raw)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Size {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.raw, state)
    }
}Hash)]
748#[cfg_attr(
749    feature = "nightly",
750    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Size {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Size {
            fn decode(__decoder: &mut __D) -> Self {
                Size { raw: ::rustc_serialize::Decodable::decode(__decoder) }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Size where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
751)]
752pub struct Size {
753    raw: u64,
754}
755
756#[cfg(feature = "nightly")]
757impl StableOrd for Size {
758    const CAN_USE_UNSTABLE_SORT: bool = true;
759
760    // `Ord` is implemented as just comparing numerical values and numerical values
761    // are not changed by (de-)serialization.
762    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
763}
764
765// This is debug-printed a lot in larger structs, don't waste too much space there
766impl fmt::Debug for Size {
767    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
768        f.write_fmt(format_args!("Size({0} bytes)", self.bytes()))write!(f, "Size({} bytes)", self.bytes())
769    }
770}
771
772impl Size {
773    pub const ZERO: Size = Size { raw: 0 };
774
775    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
776    /// not a multiple of 8.
777    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
778        let bits = bits.try_into().ok().unwrap();
779        Size { raw: bits.div_ceil(8) }
780    }
781
782    #[inline]
783    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
784        let bytes: u64 = bytes.try_into().ok().unwrap();
785        Size { raw: bytes }
786    }
787
788    #[inline]
789    pub fn bytes(self) -> u64 {
790        self.raw
791    }
792
793    #[inline]
794    pub fn bytes_usize(self) -> usize {
795        self.bytes().try_into().unwrap()
796    }
797
798    #[inline]
799    pub fn bits(self) -> u64 {
800        #[cold]
801        fn overflow(bytes: u64) -> ! {
802            {
    ::core::panicking::panic_fmt(format_args!("Size::bits: {0} bytes in bits doesn\'t fit in u64",
            bytes));
}panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
803        }
804
805        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
806    }
807
808    #[inline]
809    pub fn bits_usize(self) -> usize {
810        self.bits().try_into().unwrap()
811    }
812
813    #[inline]
814    pub fn align_to(self, align: Align) -> Size {
815        let mask = align.bytes() - 1;
816        Size::from_bytes((self.bytes() + mask) & !mask)
817    }
818
819    #[inline]
820    pub fn is_aligned(self, align: Align) -> bool {
821        let mask = align.bytes() - 1;
822        self.bytes() & mask == 0
823    }
824
825    #[inline]
826    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
827        let dl = cx.data_layout();
828
829        let bytes = self.bytes().checked_add(offset.bytes())?;
830
831        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
832    }
833
834    #[inline]
835    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
836        let dl = cx.data_layout();
837
838        let bytes = self.bytes().checked_mul(count)?;
839        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
840    }
841
842    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
843    /// (i.e., if it is negative, fill with 1's on the left).
844    #[inline]
845    pub fn sign_extend(self, value: u128) -> i128 {
846        let size = self.bits();
847        if size == 0 {
848            // Truncated until nothing is left.
849            return 0;
850        }
851        // Sign-extend it.
852        let shift = 128 - size;
853        // Shift the unsigned value to the left, then shift back to the right as signed
854        // (essentially fills with sign bit on the left).
855        ((value << shift) as i128) >> shift
856    }
857
858    /// Truncates `value` to `self` bits.
859    #[inline]
860    pub fn truncate(self, value: u128) -> u128 {
861        let size = self.bits();
862        if size == 0 {
863            // Truncated until nothing is left.
864            return 0;
865        }
866        let shift = 128 - size;
867        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
868        (value << shift) >> shift
869    }
870
871    #[inline]
872    pub fn signed_int_min(&self) -> i128 {
873        self.sign_extend(1_u128 << (self.bits() - 1))
874    }
875
876    #[inline]
877    pub fn signed_int_max(&self) -> i128 {
878        i128::MAX >> (128 - self.bits())
879    }
880
881    #[inline]
882    pub fn unsigned_int_max(&self) -> u128 {
883        u128::MAX >> (128 - self.bits())
884    }
885}
886
887// Panicking addition, subtraction and multiplication for convenience.
888// Avoid during layout computation, return `LayoutError` instead.
889
890impl Add for Size {
891    type Output = Size;
892    #[inline]
893    fn add(self, other: Size) -> Size {
894        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
895            {
    ::core::panicking::panic_fmt(format_args!("Size::add: {0} + {1} doesn\'t fit in u64",
            self.bytes(), other.bytes()));
}panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
896        }))
897    }
898}
899
900impl Sub for Size {
901    type Output = Size;
902    #[inline]
903    fn sub(self, other: Size) -> Size {
904        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
905            {
    ::core::panicking::panic_fmt(format_args!("Size::sub: {0} - {1} would result in negative size",
            self.bytes(), other.bytes()));
}panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
906        }))
907    }
908}
909
910impl Mul<Size> for u64 {
911    type Output = Size;
912    #[inline]
913    fn mul(self, size: Size) -> Size {
914        size * self
915    }
916}
917
918impl Mul<u64> for Size {
919    type Output = Size;
920    #[inline]
921    fn mul(self, count: u64) -> Size {
922        match self.bytes().checked_mul(count) {
923            Some(bytes) => Size::from_bytes(bytes),
924            None => {
    ::core::panicking::panic_fmt(format_args!("Size::mul: {0} * {1} doesn\'t fit in u64",
            self.bytes(), count));
}panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
925        }
926    }
927}
928
929impl AddAssign for Size {
930    #[inline]
931    fn add_assign(&mut self, other: Size) {
932        *self = *self + other;
933    }
934}
935
936#[cfg(feature = "nightly")]
937impl Step for Size {
938    #[inline]
939    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
940        u64::steps_between(&start.bytes(), &end.bytes())
941    }
942
943    #[inline]
944    fn forward_checked(start: Self, count: usize) -> Option<Self> {
945        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
946    }
947
948    #[inline]
949    fn forward(start: Self, count: usize) -> Self {
950        Self::from_bytes(u64::forward(start.bytes(), count))
951    }
952
953    #[inline]
954    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
955        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
956    }
957
958    #[inline]
959    fn backward_checked(start: Self, count: usize) -> Option<Self> {
960        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
961    }
962
963    #[inline]
964    fn backward(start: Self, count: usize) -> Self {
965        Self::from_bytes(u64::backward(start.bytes(), count))
966    }
967
968    #[inline]
969    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
970        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
971    }
972}
973
974/// Alignment of a type in bytes (always a power of two).
975#[derive(#[automatically_derived]
impl ::core::marker::Copy for Align { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Align {
    #[inline]
    fn clone(&self) -> Align {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Align {
    #[inline]
    fn eq(&self, other: &Align) -> bool { self.pow2 == other.pow2 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Align {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Align {
    #[inline]
    fn partial_cmp(&self, other: &Align)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.pow2, &other.pow2)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Align {
    #[inline]
    fn cmp(&self, other: &Align) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.pow2, &other.pow2)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Align {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.pow2, state)
    }
}Hash)]
976#[cfg_attr(
977    feature = "nightly",
978    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Align {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Align {
            fn decode(__decoder: &mut __D) -> Self {
                Align {
                    pow2: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Align where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic)
979)]
980pub struct Align {
981    pow2: u8,
982}
983
984// This is debug-printed a lot in larger structs, don't waste too much space there
985impl fmt::Debug for Align {
986    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
987        f.write_fmt(format_args!("Align({0} bytes)", self.bytes()))write!(f, "Align({} bytes)", self.bytes())
988    }
989}
990
991#[derive(#[automatically_derived]
impl ::core::clone::Clone for AlignFromBytesError {
    #[inline]
    fn clone(&self) -> AlignFromBytesError {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for AlignFromBytesError { }Copy)]
992pub enum AlignFromBytesError {
993    NotPowerOfTwo(u64),
994    TooLarge(u64),
995}
996
997impl fmt::Debug for AlignFromBytesError {
998    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
999        fmt::Display::fmt(self, f)
1000    }
1001}
1002
1003impl fmt::Display for AlignFromBytesError {
1004    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1005        match self {
1006            AlignFromBytesError::NotPowerOfTwo(align) => f.write_fmt(format_args!("{0} is not a power of 2", align))write!(f, "{align} is not a power of 2"),
1007            AlignFromBytesError::TooLarge(align) => f.write_fmt(format_args!("{0} is too large", align))write!(f, "{align} is too large"),
1008        }
1009    }
1010}
1011
1012impl Align {
1013    pub const ONE: Align = Align { pow2: 0 };
1014    pub const EIGHT: Align = Align { pow2: 3 };
1015    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1016    pub const MAX: Align = Align { pow2: 29 };
1017
1018    /// Either `1 << (pointer_bits - 1)` or [`Align::MAX`], whichever is smaller.
1019    #[inline]
1020    pub fn max_for_target(tdl: &TargetDataLayout) -> Align {
1021        let pointer_bits = tdl.pointer_size().bits();
1022        if let Ok(pointer_bits) = u8::try_from(pointer_bits)
1023            && pointer_bits <= Align::MAX.pow2
1024        {
1025            Align { pow2: pointer_bits - 1 }
1026        } else {
1027            Align::MAX
1028        }
1029    }
1030
1031    #[inline]
1032    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1033        Align::from_bytes(Size::from_bits(bits).bytes())
1034    }
1035
1036    #[inline]
1037    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1038        // Treat an alignment of 0 bytes like 1-byte alignment.
1039        if align == 0 {
1040            return Ok(Align::ONE);
1041        }
1042
1043        #[cold]
1044        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1045            AlignFromBytesError::NotPowerOfTwo(align)
1046        }
1047
1048        #[cold]
1049        const fn too_large(align: u64) -> AlignFromBytesError {
1050            AlignFromBytesError::TooLarge(align)
1051        }
1052
1053        let tz = align.trailing_zeros();
1054        if align != (1 << tz) {
1055            return Err(not_power_of_2(align));
1056        }
1057
1058        let pow2 = tz as u8;
1059        if pow2 > Self::MAX.pow2 {
1060            return Err(too_large(align));
1061        }
1062
1063        Ok(Align { pow2 })
1064    }
1065
1066    #[inline]
1067    pub const fn bytes(self) -> u64 {
1068        1 << self.pow2
1069    }
1070
1071    #[inline]
1072    pub fn bytes_usize(self) -> usize {
1073        self.bytes().try_into().unwrap()
1074    }
1075
1076    #[inline]
1077    pub const fn bits(self) -> u64 {
1078        self.bytes() * 8
1079    }
1080
1081    #[inline]
1082    pub fn bits_usize(self) -> usize {
1083        self.bits().try_into().unwrap()
1084    }
1085
1086    /// Obtain the greatest factor of `size` that is an alignment
1087    /// (the largest power of two the Size is a multiple of).
1088    ///
1089    /// Note that all numbers are factors of 0
1090    #[inline]
1091    pub fn max_aligned_factor(size: Size) -> Align {
1092        Align { pow2: size.bytes().trailing_zeros() as u8 }
1093    }
1094
1095    /// Reduces Align to an aligned factor of `size`.
1096    #[inline]
1097    pub fn restrict_for_offset(self, size: Size) -> Align {
1098        self.min(Align::max_aligned_factor(size))
1099    }
1100}
1101
1102/// A pair of alignments, ABI-mandated and preferred.
1103///
1104/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1105/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1106/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1107/// and thus in practice the two values are almost always identical.
1108///
1109/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1110/// It is of effectively no consequence for layout in structs and on the stack.
1111#[derive(#[automatically_derived]
impl ::core::marker::Copy for AbiAlign { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AbiAlign {
    #[inline]
    fn clone(&self) -> AbiAlign {
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AbiAlign {
    #[inline]
    fn eq(&self, other: &AbiAlign) -> bool { self.abi == other.abi }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AbiAlign {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Align>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for AbiAlign {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.abi, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for AbiAlign {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field1_finish(f, "AbiAlign",
            "abi", &&self.abi)
    }
}Debug)]
1112#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for AbiAlign where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AbiAlign { abi: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1113pub struct AbiAlign {
1114    pub abi: Align,
1115}
1116
1117impl AbiAlign {
1118    #[inline]
1119    pub fn new(align: Align) -> AbiAlign {
1120        AbiAlign { abi: align }
1121    }
1122
1123    #[inline]
1124    pub fn min(self, other: AbiAlign) -> AbiAlign {
1125        AbiAlign { abi: self.abi.min(other.abi) }
1126    }
1127
1128    #[inline]
1129    pub fn max(self, other: AbiAlign) -> AbiAlign {
1130        AbiAlign { abi: self.abi.max(other.abi) }
1131    }
1132}
1133
1134impl Deref for AbiAlign {
1135    type Target = Align;
1136
1137    fn deref(&self) -> &Self::Target {
1138        &self.abi
1139    }
1140}
1141
1142/// Integers, also used for enum discriminants.
1143#[derive(#[automatically_derived]
impl ::core::marker::Copy for Integer { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Integer {
    #[inline]
    fn clone(&self) -> Integer { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Integer {
    #[inline]
    fn eq(&self, other: &Integer) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Integer {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Integer {
    #[inline]
    fn partial_cmp(&self, other: &Integer)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Integer {
    #[inline]
    fn cmp(&self, other: &Integer) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Integer {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Integer {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Integer::I8 => "I8",
                Integer::I16 => "I16",
                Integer::I32 => "I32",
                Integer::I64 => "I64",
                Integer::I128 => "I128",
            })
    }
}Debug)]
1144#[cfg_attr(
1145    feature = "nightly",
1146    derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Integer {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        Integer::I8 => { 0usize }
                        Integer::I16 => { 1usize }
                        Integer::I32 => { 2usize }
                        Integer::I64 => { 3usize }
                        Integer::I128 => { 4usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Integer {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { Integer::I8 }
                    1usize => { Integer::I16 }
                    2usize => { Integer::I32 }
                    3usize => { Integer::I64 }
                    4usize => { Integer::I128 }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `Integer`, expected 0..5, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Integer where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };HashStable_Generic)
1147)]
1148pub enum Integer {
1149    I8,
1150    I16,
1151    I32,
1152    I64,
1153    I128,
1154}
1155
1156impl Integer {
1157    pub fn int_ty_str(self) -> &'static str {
1158        use Integer::*;
1159        match self {
1160            I8 => "i8",
1161            I16 => "i16",
1162            I32 => "i32",
1163            I64 => "i64",
1164            I128 => "i128",
1165        }
1166    }
1167
1168    pub fn uint_ty_str(self) -> &'static str {
1169        use Integer::*;
1170        match self {
1171            I8 => "u8",
1172            I16 => "u16",
1173            I32 => "u32",
1174            I64 => "u64",
1175            I128 => "u128",
1176        }
1177    }
1178
1179    #[inline]
1180    pub fn size(self) -> Size {
1181        use Integer::*;
1182        match self {
1183            I8 => Size::from_bytes(1),
1184            I16 => Size::from_bytes(2),
1185            I32 => Size::from_bytes(4),
1186            I64 => Size::from_bytes(8),
1187            I128 => Size::from_bytes(16),
1188        }
1189    }
1190
1191    /// Gets the Integer type from an IntegerType.
1192    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1193        let dl = cx.data_layout();
1194
1195        match ity {
1196            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1197            IntegerType::Fixed(x, _) => x,
1198        }
1199    }
1200
1201    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1202        use Integer::*;
1203        let dl = cx.data_layout();
1204
1205        AbiAlign::new(match self {
1206            I8 => dl.i8_align,
1207            I16 => dl.i16_align,
1208            I32 => dl.i32_align,
1209            I64 => dl.i64_align,
1210            I128 => dl.i128_align,
1211        })
1212    }
1213
1214    /// Returns the largest signed value that can be represented by this Integer.
1215    #[inline]
1216    pub fn signed_max(self) -> i128 {
1217        use Integer::*;
1218        match self {
1219            I8 => i8::MAX as i128,
1220            I16 => i16::MAX as i128,
1221            I32 => i32::MAX as i128,
1222            I64 => i64::MAX as i128,
1223            I128 => i128::MAX,
1224        }
1225    }
1226
1227    /// Returns the smallest signed value that can be represented by this Integer.
1228    #[inline]
1229    pub fn signed_min(self) -> i128 {
1230        use Integer::*;
1231        match self {
1232            I8 => i8::MIN as i128,
1233            I16 => i16::MIN as i128,
1234            I32 => i32::MIN as i128,
1235            I64 => i64::MIN as i128,
1236            I128 => i128::MIN,
1237        }
1238    }
1239
1240    /// Finds the smallest Integer type which can represent the signed value.
1241    #[inline]
1242    pub fn fit_signed(x: i128) -> Integer {
1243        use Integer::*;
1244        match x {
1245            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1246            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1247            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1248            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1249            _ => I128,
1250        }
1251    }
1252
1253    /// Finds the smallest Integer type which can represent the unsigned value.
1254    #[inline]
1255    pub fn fit_unsigned(x: u128) -> Integer {
1256        use Integer::*;
1257        match x {
1258            0..=0x0000_0000_0000_00ff => I8,
1259            0..=0x0000_0000_0000_ffff => I16,
1260            0..=0x0000_0000_ffff_ffff => I32,
1261            0..=0xffff_ffff_ffff_ffff => I64,
1262            _ => I128,
1263        }
1264    }
1265
1266    /// Finds the smallest integer with the given alignment.
1267    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1268        use Integer::*;
1269        let dl = cx.data_layout();
1270
1271        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1272            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1273        })
1274    }
1275
1276    /// Find the largest integer with the given alignment or less.
1277    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1278        use Integer::*;
1279        let dl = cx.data_layout();
1280
1281        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1282        for candidate in [I64, I32, I16] {
1283            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1284                return candidate;
1285            }
1286        }
1287        I8
1288    }
1289
1290    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1291    // `Integer` given some requirements.
1292    #[inline]
1293    pub fn from_size(size: Size) -> Result<Self, String> {
1294        match size.bits() {
1295            8 => Ok(Integer::I8),
1296            16 => Ok(Integer::I16),
1297            32 => Ok(Integer::I32),
1298            64 => Ok(Integer::I64),
1299            128 => Ok(Integer::I128),
1300            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("rust does not support integers with {0} bits",
                size.bits()))
    })format!("rust does not support integers with {} bits", size.bits())),
1301        }
1302    }
1303}
1304
1305/// Floating-point types.
1306#[derive(#[automatically_derived]
impl ::core::marker::Copy for Float { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Float {
    #[inline]
    fn clone(&self) -> Float { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Float {
    #[inline]
    fn eq(&self, other: &Float) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Float {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Float {
    #[inline]
    fn partial_cmp(&self, other: &Float)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Float {
    #[inline]
    fn cmp(&self, other: &Float) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Float {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Float {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Float::F16 => "F16",
                Float::F32 => "F32",
                Float::F64 => "F64",
                Float::F128 => "F128",
            })
    }
}Debug)]
1307#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Float where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Float::F16 => {}
                    Float::F32 => {}
                    Float::F64 => {}
                    Float::F128 => {}
                }
            }
        }
    };HashStable_Generic))]
1308pub enum Float {
1309    F16,
1310    F32,
1311    F64,
1312    F128,
1313}
1314
1315impl Float {
1316    pub fn size(self) -> Size {
1317        use Float::*;
1318
1319        match self {
1320            F16 => Size::from_bits(16),
1321            F32 => Size::from_bits(32),
1322            F64 => Size::from_bits(64),
1323            F128 => Size::from_bits(128),
1324        }
1325    }
1326
1327    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1328        use Float::*;
1329        let dl = cx.data_layout();
1330
1331        AbiAlign::new(match self {
1332            F16 => dl.f16_align,
1333            F32 => dl.f32_align,
1334            F64 => dl.f64_align,
1335            F128 => dl.f128_align,
1336        })
1337    }
1338}
1339
1340/// Fundamental unit of memory access and layout.
1341#[derive(#[automatically_derived]
impl ::core::marker::Copy for Primitive { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Primitive {
    #[inline]
    fn clone(&self) -> Primitive {
        let _: ::core::clone::AssertParamIsClone<Integer>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Float>;
        let _: ::core::clone::AssertParamIsClone<AddressSpace>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Primitive {
    #[inline]
    fn eq(&self, other: &Primitive) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Primitive::Int(__self_0, __self_1),
                    Primitive::Int(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (Primitive::Float(__self_0), Primitive::Float(__arg1_0)) =>
                    __self_0 == __arg1_0,
                (Primitive::Pointer(__self_0), Primitive::Pointer(__arg1_0))
                    => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Primitive {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Integer>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Float>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Primitive {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Primitive::Int(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Primitive::Float(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            Primitive::Pointer(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Primitive {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Primitive::Int(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Int",
                    __self_0, &__self_1),
            Primitive::Float(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Float",
                    &__self_0),
            Primitive::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
        }
    }
}Debug)]
1342#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Primitive where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Primitive::Int(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Float(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1343pub enum Primitive {
1344    /// The `bool` is the signedness of the `Integer` type.
1345    ///
1346    /// One would think we would not care about such details this low down,
1347    /// but some ABIs are described in terms of C types and ISAs where the
1348    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1349    /// a negative integer passed by zero-extension will appear positive in
1350    /// the callee, and most operations on it will produce the wrong values.
1351    Int(Integer, bool),
1352    Float(Float),
1353    Pointer(AddressSpace),
1354}
1355
1356impl Primitive {
1357    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1358        use Primitive::*;
1359        let dl = cx.data_layout();
1360
1361        match self {
1362            Int(i, _) => i.size(),
1363            Float(f) => f.size(),
1364            Pointer(a) => dl.pointer_size_in(a),
1365        }
1366    }
1367
1368    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1369        use Primitive::*;
1370        let dl = cx.data_layout();
1371
1372        match self {
1373            Int(i, _) => i.align(dl),
1374            Float(f) => f.align(dl),
1375            Pointer(a) => dl.pointer_align_in(a),
1376        }
1377    }
1378}
1379
1380/// Inclusive wrap-around range of valid values, that is, if
1381/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1382///
1383/// That is, for an i8 primitive, a range of `254..=2` means following
1384/// sequence:
1385///
1386///    254 (-2), 255 (-1), 0, 1, 2
1387///
1388/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1389#[derive(#[automatically_derived]
impl ::core::clone::Clone for WrappingRange {
    #[inline]
    fn clone(&self) -> WrappingRange {
        let _: ::core::clone::AssertParamIsClone<u128>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for WrappingRange { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for WrappingRange {
    #[inline]
    fn eq(&self, other: &WrappingRange) -> bool {
        self.start == other.start && self.end == other.end
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for WrappingRange {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for WrappingRange {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.start, state);
        ::core::hash::Hash::hash(&self.end, state)
    }
}Hash)]
1390#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for WrappingRange where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    WrappingRange { start: ref __binding_0, end: ref __binding_1
                        } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1391pub struct WrappingRange {
1392    pub start: u128,
1393    pub end: u128,
1394}
1395
1396impl WrappingRange {
1397    pub fn full(size: Size) -> Self {
1398        Self { start: 0, end: size.unsigned_int_max() }
1399    }
1400
1401    /// Returns `true` if `v` is contained in the range.
1402    #[inline(always)]
1403    pub fn contains(&self, v: u128) -> bool {
1404        if self.start <= self.end {
1405            self.start <= v && v <= self.end
1406        } else {
1407            self.start <= v || v <= self.end
1408        }
1409    }
1410
1411    /// Returns `true` if all the values in `other` are contained in this range,
1412    /// when the values are considered as having width `size`.
1413    #[inline(always)]
1414    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1415        if self.is_full_for(size) {
1416            true
1417        } else {
1418            let trunc = |x| size.truncate(x);
1419
1420            let delta = self.start;
1421            let max = trunc(self.end.wrapping_sub(delta));
1422
1423            let other_start = trunc(other.start.wrapping_sub(delta));
1424            let other_end = trunc(other.end.wrapping_sub(delta));
1425
1426            // Having shifted both input ranges by `delta`, now we only need to check
1427            // whether `0..=max` contains `other_start..=other_end`, which can only
1428            // happen if the other doesn't wrap since `self` isn't everything.
1429            (other_start <= other_end) && (other_end <= max)
1430        }
1431    }
1432
1433    /// Returns `self` with replaced `start`
1434    #[inline(always)]
1435    fn with_start(mut self, start: u128) -> Self {
1436        self.start = start;
1437        self
1438    }
1439
1440    /// Returns `self` with replaced `end`
1441    #[inline(always)]
1442    fn with_end(mut self, end: u128) -> Self {
1443        self.end = end;
1444        self
1445    }
1446
1447    /// Returns `true` if `size` completely fills the range.
1448    ///
1449    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1450    /// Niche calculations can produce full ranges which are not the canonical one;
1451    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1452    #[inline]
1453    fn is_full_for(&self, size: Size) -> bool {
1454        let max_value = size.unsigned_int_max();
1455        if true {
    if !(self.start <= max_value && self.end <= max_value) {
        ::core::panicking::panic("assertion failed: self.start <= max_value && self.end <= max_value")
    };
};debug_assert!(self.start <= max_value && self.end <= max_value);
1456        self.start == (self.end.wrapping_add(1) & max_value)
1457    }
1458
1459    /// Checks whether this range is considered non-wrapping when the values are
1460    /// interpreted as *unsigned* numbers of width `size`.
1461    ///
1462    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1463    /// and `Err(..)` if the range is full so it depends how you think about it.
1464    #[inline]
1465    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1466        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1467    }
1468
1469    /// Checks whether this range is considered non-wrapping when the values are
1470    /// interpreted as *signed* numbers of width `size`.
1471    ///
1472    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1473    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1474    ///
1475    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1476    /// and `Err(..)` if the range is full so it depends how you think about it.
1477    #[inline]
1478    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1479        if self.is_full_for(size) {
1480            Err(..)
1481        } else {
1482            let start: i128 = size.sign_extend(self.start);
1483            let end: i128 = size.sign_extend(self.end);
1484            Ok(start <= end)
1485        }
1486    }
1487}
1488
1489impl fmt::Debug for WrappingRange {
1490    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1491        if self.start > self.end {
1492            fmt.write_fmt(format_args!("(..={0}) | ({1}..)", self.end, self.start))write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1493        } else {
1494            fmt.write_fmt(format_args!("{0}..={1}", self.start, self.end))write!(fmt, "{}..={}", self.start, self.end)?;
1495        }
1496        Ok(())
1497    }
1498}
1499
1500/// Information about one scalar component of a Rust type.
1501#[derive(#[automatically_derived]
impl ::core::clone::Clone for Scalar {
    #[inline]
    fn clone(&self) -> Scalar {
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Scalar { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Scalar {
    #[inline]
    fn eq(&self, other: &Scalar) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Scalar::Initialized { value: __self_0, valid_range: __self_1
                    }, Scalar::Initialized {
                    value: __arg1_0, valid_range: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (Scalar::Union { value: __self_0 }, Scalar::Union {
                    value: __arg1_0 }) => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Scalar {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Scalar {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Scalar::Union { value: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Scalar {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Initialized", "value", __self_0, "valid_range", &__self_1),
            Scalar::Union { value: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f, "Union",
                    "value", &__self_0),
        }
    }
}Debug)]
1502#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Scalar where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Scalar::Initialized {
                        value: ref __binding_0, valid_range: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Scalar::Union { value: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1503pub enum Scalar {
1504    Initialized {
1505        value: Primitive,
1506
1507        // FIXME(eddyb) always use the shortest range, e.g., by finding
1508        // the largest space between two consecutive valid values and
1509        // taking everything else as the (shortest) valid range.
1510        valid_range: WrappingRange,
1511    },
1512    Union {
1513        /// Even for unions, we need to use the correct registers for the kind of
1514        /// values inside the union, so we keep the `Primitive` type around. We
1515        /// also use it to compute the size of the scalar.
1516        /// However, unions never have niches and even allow undef,
1517        /// so there is no `valid_range`.
1518        value: Primitive,
1519    },
1520}
1521
1522impl Scalar {
1523    #[inline]
1524    pub fn is_bool(&self) -> bool {
1525        use Integer::*;
1526        #[allow(non_exhaustive_omitted_patterns)] match self {
    Scalar::Initialized {
        value: Primitive::Int(I8, false),
        valid_range: WrappingRange { start: 0, end: 1 } } => true,
    _ => false,
}matches!(
1527            self,
1528            Scalar::Initialized {
1529                value: Primitive::Int(I8, false),
1530                valid_range: WrappingRange { start: 0, end: 1 }
1531            }
1532        )
1533    }
1534
1535    /// Get the primitive representation of this type, ignoring the valid range and whether the
1536    /// value is allowed to be undefined (due to being a union).
1537    pub fn primitive(&self) -> Primitive {
1538        match *self {
1539            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1540        }
1541    }
1542
1543    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1544        self.primitive().align(cx)
1545    }
1546
1547    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1548        self.primitive().size(cx)
1549    }
1550
1551    #[inline]
1552    pub fn to_union(&self) -> Self {
1553        Self::Union { value: self.primitive() }
1554    }
1555
1556    #[inline]
1557    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1558        match *self {
1559            Scalar::Initialized { valid_range, .. } => valid_range,
1560            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1561        }
1562    }
1563
1564    #[inline]
1565    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1566    /// union.
1567    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1568        match self {
1569            Scalar::Initialized { valid_range, .. } => valid_range,
1570            Scalar::Union { .. } => {
    ::core::panicking::panic_fmt(format_args!("cannot change the valid range of a union"));
}panic!("cannot change the valid range of a union"),
1571        }
1572    }
1573
1574    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1575    /// layout.
1576    #[inline]
1577    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1578        match *self {
1579            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1580            Scalar::Union { .. } => true,
1581        }
1582    }
1583
1584    /// Returns `true` if this type can be left uninit.
1585    #[inline]
1586    pub fn is_uninit_valid(&self) -> bool {
1587        match *self {
1588            Scalar::Initialized { .. } => false,
1589            Scalar::Union { .. } => true,
1590        }
1591    }
1592
1593    /// Returns `true` if this is a signed integer scalar
1594    #[inline]
1595    pub fn is_signed(&self) -> bool {
1596        match self.primitive() {
1597            Primitive::Int(_, signed) => signed,
1598            _ => false,
1599        }
1600    }
1601}
1602
1603// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1604/// Describes how the fields of a type are located in memory.
1605#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    FieldsShape<FieldIdx> {
    #[inline]
    fn eq(&self, other: &FieldsShape<FieldIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (FieldsShape::Union(__self_0), FieldsShape::Union(__arg1_0))
                    => __self_0 == __arg1_0,
                (FieldsShape::Array { stride: __self_0, count: __self_1 },
                    FieldsShape::Array { stride: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (FieldsShape::Arbitrary {
                    offsets: __self_0, in_memory_order: __self_1 },
                    FieldsShape::Arbitrary {
                    offsets: __arg1_0, in_memory_order: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    FieldsShape<FieldIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<NonZeroUsize>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<FieldIdx, Size>>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<u32, FieldIdx>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    FieldsShape<FieldIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            FieldsShape::Union(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            FieldsShape::Array { stride: __self_0, count: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    FieldsShape<FieldIdx> {
    #[inline]
    fn clone(&self) -> FieldsShape<FieldIdx> {
        match self {
            FieldsShape::Primitive => FieldsShape::Primitive,
            FieldsShape::Union(__self_0) =>
                FieldsShape::Union(::core::clone::Clone::clone(__self_0)),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                FieldsShape::Array {
                    stride: ::core::clone::Clone::clone(__self_0),
                    count: ::core::clone::Clone::clone(__self_1),
                },
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                FieldsShape::Arbitrary {
                    offsets: ::core::clone::Clone::clone(__self_0),
                    in_memory_order: ::core::clone::Clone::clone(__self_1),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    FieldsShape<FieldIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            FieldsShape::Primitive =>
                ::core::fmt::Formatter::write_str(f, "Primitive"),
            FieldsShape::Union(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Union",
                    &__self_0),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Array",
                    "stride", __self_0, "count", &__self_1),
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Arbitrary", "offsets", __self_0, "in_memory_order",
                    &__self_1),
        }
    }
}Debug)]
1606#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            FieldsShape<FieldIdx> where
            __CTX: ::rustc_span::HashStableContext,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    FieldsShape::Primitive => {}
                    FieldsShape::Union(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Array {
                        stride: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Arbitrary {
                        offsets: ref __binding_0, in_memory_order: ref __binding_1 }
                        => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1607pub enum FieldsShape<FieldIdx: Idx> {
1608    /// Scalar primitives and `!`, which never have fields.
1609    Primitive,
1610
1611    /// All fields start at no offset. The `usize` is the field count.
1612    Union(NonZeroUsize),
1613
1614    /// Array/vector-like placement, with all fields of identical types.
1615    Array { stride: Size, count: u64 },
1616
1617    /// Struct-like placement, with precomputed offsets.
1618    ///
1619    /// Fields are guaranteed to not overlap, but note that gaps
1620    /// before, between and after all the fields are NOT always
1621    /// padding, and as such their contents may not be discarded.
1622    /// For example, enum variants leave a gap at the start,
1623    /// where the discriminant field in the enum layout goes.
1624    Arbitrary {
1625        /// Offsets for the first byte of each field,
1626        /// ordered to match the source definition order.
1627        /// This vector does not go in increasing order.
1628        // FIXME(eddyb) use small vector optimization for the common case.
1629        offsets: IndexVec<FieldIdx, Size>,
1630
1631        /// Maps memory order field indices to source order indices,
1632        /// depending on how the fields were reordered (if at all).
1633        /// This is a permutation, with both the source order and the
1634        /// memory order using the same (0..n) index ranges.
1635        ///
1636        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1637        // FIXME(camlorn) also consider small vector optimization here.
1638        in_memory_order: IndexVec<u32, FieldIdx>,
1639    },
1640}
1641
1642impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1643    #[inline]
1644    pub fn count(&self) -> usize {
1645        match *self {
1646            FieldsShape::Primitive => 0,
1647            FieldsShape::Union(count) => count.get(),
1648            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1649            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1650        }
1651    }
1652
1653    #[inline]
1654    pub fn offset(&self, i: usize) -> Size {
1655        match *self {
1656            FieldsShape::Primitive => {
1657                {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("FieldsShape::offset: `Primitive`s have no fields")));
}unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1658            }
1659            FieldsShape::Union(count) => {
1660                if !(i < count.get()) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of union with {1} fields",
                i, count));
    }
};assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1661                Size::ZERO
1662            }
1663            FieldsShape::Array { stride, count } => {
1664                let i = u64::try_from(i).unwrap();
1665                if !(i < count) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of array with {1} fields",
                i, count));
    }
};assert!(i < count, "tried to access field {i} of array with {count} fields");
1666                stride * i
1667            }
1668            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1669        }
1670    }
1671
1672    /// Gets source indices of the fields by increasing offsets.
1673    #[inline]
1674    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1675        // Primitives don't really have fields in the way that structs do,
1676        // but having this return an empty iterator for them is unhelpful
1677        // since that makes them look kinda like ZSTs, which they're not.
1678        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1679
1680        (0..pseudofield_count).map(move |i| match self {
1681            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1682            FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
1683        })
1684    }
1685}
1686
1687/// An identifier that specifies the address space that some operation
1688/// should operate on. Special address spaces have an effect on code generation,
1689/// depending on the target and the address spaces it implements.
1690#[derive(#[automatically_derived]
impl ::core::marker::Copy for AddressSpace { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AddressSpace {
    #[inline]
    fn clone(&self) -> AddressSpace {
        let _: ::core::clone::AssertParamIsClone<u32>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AddressSpace {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f, "AddressSpace",
            &&self.0)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for AddressSpace {
    #[inline]
    fn eq(&self, other: &AddressSpace) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AddressSpace {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u32>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for AddressSpace {
    #[inline]
    fn partial_cmp(&self, other: &AddressSpace)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for AddressSpace {
    #[inline]
    fn cmp(&self, other: &AddressSpace) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.0, &other.0)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for AddressSpace {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash)]
1691#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for AddressSpace where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AddressSpace(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1692pub struct AddressSpace(pub u32);
1693
1694impl AddressSpace {
1695    /// LLVM's `0` address space.
1696    pub const ZERO: Self = AddressSpace(0);
1697}
1698
1699/// How many scalable vectors are in a `BackendRepr::ScalableVector`?
1700#[derive(#[automatically_derived]
impl ::core::clone::Clone for NumScalableVectors {
    #[inline]
    fn clone(&self) -> NumScalableVectors {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for NumScalableVectors { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for NumScalableVectors {
    #[inline]
    fn eq(&self, other: &NumScalableVectors) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for NumScalableVectors {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for NumScalableVectors {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for NumScalableVectors {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f,
            "NumScalableVectors", &&self.0)
    }
}Debug)]
1701#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for NumScalableVectors where
            __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    NumScalableVectors(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1702pub struct NumScalableVectors(pub u8);
1703
1704impl NumScalableVectors {
1705    /// Returns a `NumScalableVector` for a non-tuple scalable vector (e.g. a single vector).
1706    pub fn for_non_tuple() -> Self {
1707        NumScalableVectors(1)
1708    }
1709
1710    // Returns `NumScalableVectors` for values of two through eight, which are a valid number of
1711    // fields for a tuple of scalable vectors to have. `1` is a valid value of `NumScalableVectors`
1712    // but not for a tuple which would have a field count.
1713    pub fn from_field_count(count: usize) -> Option<Self> {
1714        match count {
1715            2..8 => Some(NumScalableVectors(count as u8)),
1716            _ => None,
1717        }
1718    }
1719}
1720
1721/// The way we represent values to the backend
1722///
1723/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1724/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1725/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1726/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1727/// how the value will be lowered to the calling convention, in itself.
1728///
1729/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1730/// and larger values will usually prefer to be represented as memory.
1731#[derive(#[automatically_derived]
impl ::core::clone::Clone for BackendRepr {
    #[inline]
    fn clone(&self) -> BackendRepr {
        let _: ::core::clone::AssertParamIsClone<Scalar>;
        let _: ::core::clone::AssertParamIsClone<u64>;
        let _: ::core::clone::AssertParamIsClone<NumScalableVectors>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BackendRepr { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BackendRepr {
    #[inline]
    fn eq(&self, other: &BackendRepr) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (BackendRepr::Scalar(__self_0), BackendRepr::Scalar(__arg1_0))
                    => __self_0 == __arg1_0,
                (BackendRepr::ScalarPair(__self_0, __self_1),
                    BackendRepr::ScalarPair(__arg1_0, __arg1_1)) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (BackendRepr::SimdScalableVector {
                    element: __self_0,
                    count: __self_1,
                    number_of_vectors: __self_2 },
                    BackendRepr::SimdScalableVector {
                    element: __arg1_0,
                    count: __arg1_1,
                    number_of_vectors: __arg1_2 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0 &&
                        __self_2 == __arg1_2,
                (BackendRepr::SimdVector { element: __self_0, count: __self_1
                    }, BackendRepr::SimdVector {
                    element: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (BackendRepr::Memory { sized: __self_0 },
                    BackendRepr::Memory { sized: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for BackendRepr {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<NumScalableVectors>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for BackendRepr {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            BackendRepr::ScalarPair(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::SimdScalableVector {
                element: __self_0,
                count: __self_1,
                number_of_vectors: __self_2 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for BackendRepr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Scalar",
                    &__self_0),
            BackendRepr::ScalarPair(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "ScalarPair", __self_0, &__self_1),
            BackendRepr::SimdScalableVector {
                element: __self_0,
                count: __self_1,
                number_of_vectors: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f,
                    "SimdScalableVector", "element", __self_0, "count",
                    __self_1, "number_of_vectors", &__self_2),
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "SimdVector", "element", __self_0, "count", &__self_1),
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Memory", "sized", &__self_0),
        }
    }
}Debug)]
1732#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for BackendRepr where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    BackendRepr::Scalar(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::ScalarPair(ref __binding_0, ref __binding_1) =>
                        {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::SimdScalableVector {
                        element: ref __binding_0,
                        count: ref __binding_1,
                        number_of_vectors: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::SimdVector {
                        element: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::Memory { sized: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1733pub enum BackendRepr {
1734    Scalar(Scalar),
1735    ScalarPair(Scalar, Scalar),
1736    SimdScalableVector {
1737        element: Scalar,
1738        count: u64,
1739        number_of_vectors: NumScalableVectors,
1740    },
1741    SimdVector {
1742        element: Scalar,
1743        count: u64,
1744    },
1745    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1746    Memory {
1747        /// If true, the size is exact, otherwise it's only a lower bound.
1748        sized: bool,
1749    },
1750}
1751
1752impl BackendRepr {
1753    /// Returns `true` if the layout corresponds to an unsized type.
1754    #[inline]
1755    pub fn is_unsized(&self) -> bool {
1756        match *self {
1757            BackendRepr::Scalar(_)
1758            | BackendRepr::ScalarPair(..)
1759            // FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the
1760            // `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is
1761            // fully implemented, scalable vectors will remain `Sized`, they just won't be
1762            // `const Sized` - whether `is_unsized` continues to return `false` at that point will
1763            // need to be revisited and will depend on what `is_unsized` is used for.
1764            | BackendRepr::SimdScalableVector { .. }
1765            | BackendRepr::SimdVector { .. } => false,
1766            BackendRepr::Memory { sized } => !sized,
1767        }
1768    }
1769
1770    #[inline]
1771    pub fn is_sized(&self) -> bool {
1772        !self.is_unsized()
1773    }
1774
1775    /// Returns `true` if this is a single signed integer scalar.
1776    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1777    #[inline]
1778    pub fn is_signed(&self) -> bool {
1779        match self {
1780            BackendRepr::Scalar(scal) => scal.is_signed(),
1781            _ => {
    ::core::panicking::panic_fmt(format_args!("`is_signed` on non-scalar ABI {0:?}",
            self));
}panic!("`is_signed` on non-scalar ABI {self:?}"),
1782        }
1783    }
1784
1785    /// Returns `true` if this is a scalar type
1786    #[inline]
1787    pub fn is_scalar(&self) -> bool {
1788        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(_) => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(_))
1789    }
1790
1791    /// Returns `true` if this is a bool
1792    #[inline]
1793    pub fn is_bool(&self) -> bool {
1794        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(s) if s.is_bool() => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1795    }
1796
1797    /// The psABI alignment for a `Scalar` or `ScalarPair`
1798    ///
1799    /// `None` for other variants.
1800    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1801        match *self {
1802            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1803            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1804            // The align of a Vector can vary in surprising ways
1805            BackendRepr::SimdVector { .. }
1806            | BackendRepr::Memory { .. }
1807            | BackendRepr::SimdScalableVector { .. } => None,
1808        }
1809    }
1810
1811    /// The psABI size for a `Scalar` or `ScalarPair`
1812    ///
1813    /// `None` for other variants
1814    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1815        match *self {
1816            // No padding in scalars.
1817            BackendRepr::Scalar(s) => Some(s.size(cx)),
1818            // May have some padding between the pair.
1819            BackendRepr::ScalarPair(s1, s2) => {
1820                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1821                let size = (field2_offset + s2.size(cx)).align_to(
1822                    self.scalar_align(cx)
1823                        // We absolutely must have an answer here or everything is FUBAR.
1824                        .unwrap(),
1825                );
1826                Some(size)
1827            }
1828            // The size of a Vector can vary in surprising ways
1829            BackendRepr::SimdVector { .. }
1830            | BackendRepr::Memory { .. }
1831            | BackendRepr::SimdScalableVector { .. } => None,
1832        }
1833    }
1834
1835    /// Discard validity range information and allow undef.
1836    pub fn to_union(&self) -> Self {
1837        match *self {
1838            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1839            BackendRepr::ScalarPair(s1, s2) => {
1840                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1841            }
1842            BackendRepr::SimdVector { element, count } => {
1843                BackendRepr::SimdVector { element: element.to_union(), count }
1844            }
1845            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1846            BackendRepr::SimdScalableVector { element, count, number_of_vectors } => {
1847                BackendRepr::SimdScalableVector {
1848                    element: element.to_union(),
1849                    count,
1850                    number_of_vectors,
1851                }
1852            }
1853        }
1854    }
1855
1856    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1857        match (self, other) {
1858            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1859            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1860            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1861            (
1862                BackendRepr::SimdVector { element: element_l, count: count_l },
1863                BackendRepr::SimdVector { element: element_r, count: count_r },
1864            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1865            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1866                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1867            }
1868            // Everything else must be strictly identical.
1869            _ => self == other,
1870        }
1871    }
1872}
1873
1874// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1875#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &Variants<FieldIdx, VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Variants::Single { index: __self_0 }, Variants::Single {
                    index: __arg1_0 }) => __self_0 == __arg1_0,
                (Variants::Multiple {
                    tag: __self_0,
                    tag_encoding: __self_1,
                    tag_field: __self_2,
                    variants: __self_3 }, Variants::Multiple {
                    tag: __arg1_0,
                    tag_encoding: __arg1_1,
                    tag_field: __arg1_2,
                    variants: __arg1_3 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1 &&
                            __self_2 == __arg1_2 && __self_3 == __arg1_3,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for Variants<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<TagEncoding<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<FieldIdx>;
        let _:
                ::core::cmp::AssertParamIsEq<IndexVec<VariantIdx,
                LayoutData<FieldIdx, VariantIdx>>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Variants::Single { index: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state);
                ::core::hash::Hash::hash(__self_3, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> Variants<FieldIdx, VariantIdx> {
        match self {
            Variants::Empty => Variants::Empty,
            Variants::Single { index: __self_0 } =>
                Variants::Single {
                    index: ::core::clone::Clone::clone(__self_0),
                },
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                Variants::Multiple {
                    tag: ::core::clone::Clone::clone(__self_0),
                    tag_encoding: ::core::clone::Clone::clone(__self_1),
                    tag_field: ::core::clone::Clone::clone(__self_2),
                    variants: ::core::clone::Clone::clone(__self_3),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx, VariantIdx: ::core::fmt::Debug + Idx>
    ::core::fmt::Debug for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Variants::Empty => ::core::fmt::Formatter::write_str(f, "Empty"),
            Variants::Single { index: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Single", "index", &__self_0),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                ::core::fmt::Formatter::debug_struct_field4_finish(f,
                    "Multiple", "tag", __self_0, "tag_encoding", __self_1,
                    "tag_field", __self_2, "variants", &__self_3),
        }
    }
}Debug)]
1876#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            Variants<FieldIdx, VariantIdx> where
            __CTX: ::rustc_span::HashStableContext,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Variants::Empty => {}
                    Variants::Single { index: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Variants::Multiple {
                        tag: ref __binding_0,
                        tag_encoding: ref __binding_1,
                        tag_field: ref __binding_2,
                        variants: ref __binding_3 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1877pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1878    /// A type with no valid variants. Must be uninhabited.
1879    Empty,
1880
1881    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1882    Single {
1883        /// Always `0` for types that cannot have multiple variants.
1884        index: VariantIdx,
1885    },
1886
1887    /// Enum-likes with more than one variant: each variant comes with
1888    /// a *discriminant* (usually the same as the variant index but the user can
1889    /// assign explicit discriminant values). That discriminant is encoded
1890    /// as a *tag* on the machine. The layout of each variant is
1891    /// a struct, and they all have space reserved for the tag.
1892    /// For enums, the tag is the sole field of the layout.
1893    Multiple {
1894        tag: Scalar,
1895        tag_encoding: TagEncoding<VariantIdx>,
1896        tag_field: FieldIdx,
1897        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1898    },
1899}
1900
1901// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1902#[derive(#[automatically_derived]
impl<VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    TagEncoding<VariantIdx> {
    #[inline]
    fn eq(&self, other: &TagEncoding<VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (TagEncoding::Niche {
                    untagged_variant: __self_0,
                    niche_variants: __self_1,
                    niche_start: __self_2 }, TagEncoding::Niche {
                    untagged_variant: __arg1_0,
                    niche_variants: __arg1_1,
                    niche_start: __arg1_2 }) =>
                    __self_2 == __arg1_2 && __self_0 == __arg1_0 &&
                        __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<VariantIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    TagEncoding<VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<RangeInclusive<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl<VariantIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    TagEncoding<VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<VariantIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    TagEncoding<VariantIdx> {
    #[inline]
    fn clone(&self) -> TagEncoding<VariantIdx> {
        match self {
            TagEncoding::Direct => TagEncoding::Direct,
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                TagEncoding::Niche {
                    untagged_variant: ::core::clone::Clone::clone(__self_0),
                    niche_variants: ::core::clone::Clone::clone(__self_1),
                    niche_start: ::core::clone::Clone::clone(__self_2),
                },
        }
    }
}Clone, #[automatically_derived]
impl<VariantIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    TagEncoding<VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            TagEncoding::Direct =>
                ::core::fmt::Formatter::write_str(f, "Direct"),
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
                    "untagged_variant", __self_0, "niche_variants", __self_1,
                    "niche_start", &__self_2),
        }
    }
}Debug)]
1903#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            TagEncoding<VariantIdx> where
            __CTX: ::rustc_span::HashStableContext,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    TagEncoding::Direct => {}
                    TagEncoding::Niche {
                        untagged_variant: ref __binding_0,
                        niche_variants: ref __binding_1,
                        niche_start: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1904pub enum TagEncoding<VariantIdx: Idx> {
1905    /// The tag directly stores the discriminant, but possibly with a smaller layout
1906    /// (so converting the tag to the discriminant can require sign extension).
1907    Direct,
1908
1909    /// Niche (values invalid for a type) encoding the discriminant.
1910    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1911    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1912    ///
1913    /// The variant `untagged_variant` contains a niche at an arbitrary
1914    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1915    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1916    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1917    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1918    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1919    /// query implementation).
1920    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1921    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1922    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
1923    ///
1924    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
1925    /// `None` is the null pointer in the second tuple field, and
1926    /// `Some` is the identity function (with a non-null reference)
1927    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
1928    ///
1929    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1930    /// range cannot be represented; they must be uninhabited.
1931    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
1932    Niche {
1933        untagged_variant: VariantIdx,
1934        /// This range *may* contain `untagged_variant` or uninhabited variants;
1935        /// these are then just "dead values" and not used to encode anything.
1936        niche_variants: RangeInclusive<VariantIdx>,
1937        /// This is inbounds of the type of the niche field
1938        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1939        niche_start: u128,
1940    },
1941}
1942
1943#[derive(#[automatically_derived]
impl ::core::clone::Clone for Niche {
    #[inline]
    fn clone(&self) -> Niche {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Niche { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Niche {
    #[inline]
    fn eq(&self, other: &Niche) -> bool {
        self.offset == other.offset && self.value == other.value &&
            self.valid_range == other.valid_range
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Niche {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Niche {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.offset, state);
        ::core::hash::Hash::hash(&self.value, state);
        ::core::hash::Hash::hash(&self.valid_range, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Niche {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
            "offset", &self.offset, "value", &self.value, "valid_range",
            &&self.valid_range)
    }
}Debug)]
1944#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            for Niche where __CTX: ::rustc_span::HashStableContext {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Niche {
                        offset: ref __binding_0,
                        value: ref __binding_1,
                        valid_range: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
1945pub struct Niche {
1946    pub offset: Size,
1947    pub value: Primitive,
1948    pub valid_range: WrappingRange,
1949}
1950
1951impl Niche {
1952    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1953        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1954        let niche = Niche { offset, value, valid_range };
1955        if niche.available(cx) > 0 { Some(niche) } else { None }
1956    }
1957
1958    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1959        let Self { value, valid_range: v, .. } = *self;
1960        let size = value.size(cx);
1961        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
1962        let max_value = size.unsigned_int_max();
1963
1964        // Find out how many values are outside the valid range.
1965        let niche = v.end.wrapping_add(1)..v.start;
1966        niche.end.wrapping_sub(niche.start) & max_value
1967    }
1968
1969    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1970        if !(count > 0) { ::core::panicking::panic("assertion failed: count > 0") };assert!(count > 0);
1971
1972        let Self { value, valid_range: v, .. } = *self;
1973        let size = value.size(cx);
1974        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
1975        let max_value = size.unsigned_int_max();
1976
1977        let niche = v.end.wrapping_add(1)..v.start;
1978        let available = niche.end.wrapping_sub(niche.start) & max_value;
1979        if count > available {
1980            return None;
1981        }
1982
1983        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1984        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1985        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1986        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1987        // enable some special optimizations.
1988        //
1989        // Bound selection criteria:
1990        // 1. Select closest to zero given wrapping semantics.
1991        // 2. Avoid moving past zero if possible.
1992        //
1993        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1994        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1995        // bounds are of little interest.
1996        let move_start = |v: WrappingRange| {
1997            let start = v.start.wrapping_sub(count) & max_value;
1998            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1999        };
2000        let move_end = |v: WrappingRange| {
2001            let start = v.end.wrapping_add(1) & max_value;
2002            let end = v.end.wrapping_add(count) & max_value;
2003            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
2004        };
2005        let distance_end_zero = max_value - v.end;
2006        if v.start > v.end {
2007            // zero is unavailable because wrapping occurs
2008            move_end(v)
2009        } else if v.start <= distance_end_zero {
2010            if count <= v.start {
2011                move_start(v)
2012            } else {
2013                // moved past zero, use other bound
2014                move_end(v)
2015            }
2016        } else {
2017            let end = v.end.wrapping_add(count) & max_value;
2018            let overshot_zero = (1..=v.end).contains(&end);
2019            if overshot_zero {
2020                // moved past zero, use other bound
2021                move_start(v)
2022            } else {
2023                move_end(v)
2024            }
2025        }
2026    }
2027}
2028
2029// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2030#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &LayoutData<FieldIdx, VariantIdx>) -> bool {
        self.uninhabited == other.uninhabited && self.fields == other.fields
                                        && self.variants == other.variants &&
                                    self.backend_repr == other.backend_repr &&
                                self.largest_niche == other.largest_niche &&
                            self.align == other.align && self.size == other.size &&
                    self.max_repr_align == other.max_repr_align &&
                self.unadjusted_abi_align == other.unadjusted_abi_align &&
            self.randomization_seed == other.randomization_seed
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<FieldsShape<FieldIdx>>;
        let _: ::core::cmp::AssertParamIsEq<Variants<FieldIdx, VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<BackendRepr>;
        let _: ::core::cmp::AssertParamIsEq<Option<Niche>>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<AbiAlign>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.fields, state);
        ::core::hash::Hash::hash(&self.variants, state);
        ::core::hash::Hash::hash(&self.backend_repr, state);
        ::core::hash::Hash::hash(&self.largest_niche, state);
        ::core::hash::Hash::hash(&self.uninhabited, state);
        ::core::hash::Hash::hash(&self.align, state);
        ::core::hash::Hash::hash(&self.size, state);
        ::core::hash::Hash::hash(&self.max_repr_align, state);
        ::core::hash::Hash::hash(&self.unadjusted_abi_align, state);
        ::core::hash::Hash::hash(&self.randomization_seed, state)
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> LayoutData<FieldIdx, VariantIdx> {
        LayoutData {
            fields: ::core::clone::Clone::clone(&self.fields),
            variants: ::core::clone::Clone::clone(&self.variants),
            backend_repr: ::core::clone::Clone::clone(&self.backend_repr),
            largest_niche: ::core::clone::Clone::clone(&self.largest_niche),
            uninhabited: ::core::clone::Clone::clone(&self.uninhabited),
            align: ::core::clone::Clone::clone(&self.align),
            size: ::core::clone::Clone::clone(&self.size),
            max_repr_align: ::core::clone::Clone::clone(&self.max_repr_align),
            unadjusted_abi_align: ::core::clone::Clone::clone(&self.unadjusted_abi_align),
            randomization_seed: ::core::clone::Clone::clone(&self.randomization_seed),
        }
    }
}Clone)]
2031#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx, __CTX>
            ::rustc_data_structures::stable_hasher::HashStable<__CTX> for
            LayoutData<FieldIdx, VariantIdx> where
            __CTX: ::rustc_span::HashStableContext,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable<__CTX>
            {
            #[inline]
            fn hash_stable(&self, __hcx: &mut __CTX,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    LayoutData {
                        fields: ref __binding_0,
                        variants: ref __binding_1,
                        backend_repr: ref __binding_2,
                        largest_niche: ref __binding_3,
                        uninhabited: ref __binding_4,
                        align: ref __binding_5,
                        size: ref __binding_6,
                        max_repr_align: ref __binding_7,
                        unadjusted_abi_align: ref __binding_8,
                        randomization_seed: ref __binding_9 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                        { __binding_6.hash_stable(__hcx, __hasher); }
                        { __binding_7.hash_stable(__hcx, __hasher); }
                        { __binding_8.hash_stable(__hcx, __hasher); }
                        { __binding_9.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable_Generic))]
2032pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2033    /// Says where the fields are located within the layout.
2034    pub fields: FieldsShape<FieldIdx>,
2035
2036    /// Encodes information about multi-variant layouts.
2037    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2038    /// shared between all variants. One of them will be the discriminant,
2039    /// but e.g. coroutines can have more.
2040    ///
2041    /// To access all fields of this layout, both `fields` and the fields of the active variant
2042    /// must be taken into account.
2043    pub variants: Variants<FieldIdx, VariantIdx>,
2044
2045    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2046    /// and encodes value restrictions via `valid_range`.
2047    ///
2048    /// Note that this is entirely orthogonal to the recursive structure defined by
2049    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2050    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2051    /// have to be taken into account to find all fields of this layout.
2052    pub backend_repr: BackendRepr,
2053
2054    /// The leaf scalar with the largest number of invalid values
2055    /// (i.e. outside of its `valid_range`), if it exists.
2056    pub largest_niche: Option<Niche>,
2057    /// Is this type known to be uninhabted?
2058    ///
2059    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2060    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2061    pub uninhabited: bool,
2062
2063    pub align: AbiAlign,
2064    pub size: Size,
2065
2066    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2067    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2068    /// requested, even if the requested alignment is equal to the natural alignment.
2069    pub max_repr_align: Option<Align>,
2070
2071    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2072    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2073    /// in some cases.
2074    pub unadjusted_abi_align: Align,
2075
2076    /// The randomization seed based on this type's own repr and its fields.
2077    ///
2078    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2079    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2080    /// types.
2081    ///
2082    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2083    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2084    /// to reorder its fields based on that information. The current implementation is a conservative
2085    /// approximation of this goal.
2086    pub randomization_seed: Hash64,
2087}
2088
2089impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2090    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2091    pub fn is_aggregate(&self) -> bool {
2092        match self.backend_repr {
2093            BackendRepr::Scalar(_)
2094            | BackendRepr::SimdVector { .. }
2095            | BackendRepr::SimdScalableVector { .. } => false,
2096            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2097        }
2098    }
2099
2100    /// Returns `true` if this is an uninhabited type
2101    pub fn is_uninhabited(&self) -> bool {
2102        self.uninhabited
2103    }
2104}
2105
2106impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2107where
2108    FieldsShape<FieldIdx>: fmt::Debug,
2109    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2110{
2111    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2112        // This is how `Layout` used to print before it become
2113        // `Interned<LayoutData>`. We print it like this to avoid having to update
2114        // expected output in a lot of tests.
2115        let LayoutData {
2116            size,
2117            align,
2118            backend_repr,
2119            fields,
2120            largest_niche,
2121            uninhabited,
2122            variants,
2123            max_repr_align,
2124            unadjusted_abi_align,
2125            randomization_seed,
2126        } = self;
2127        f.debug_struct("Layout")
2128            .field("size", size)
2129            .field("align", align)
2130            .field("backend_repr", backend_repr)
2131            .field("fields", fields)
2132            .field("largest_niche", largest_niche)
2133            .field("uninhabited", uninhabited)
2134            .field("variants", variants)
2135            .field("max_repr_align", max_repr_align)
2136            .field("unadjusted_abi_align", unadjusted_abi_align)
2137            .field("randomization_seed", randomization_seed)
2138            .finish()
2139    }
2140}
2141
2142#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerKind {
    #[inline]
    fn clone(&self) -> PointerKind {
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerKind {
    #[inline]
    fn eq(&self, other: &PointerKind) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (PointerKind::SharedRef { frozen: __self_0 },
                    PointerKind::SharedRef { frozen: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::MutableRef { unpin: __self_0 },
                    PointerKind::MutableRef { unpin: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::Box { unpin: __self_0, global: __self_1 },
                    PointerKind::Box { unpin: __arg1_0, global: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerKind {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::fmt::Debug for PointerKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            PointerKind::SharedRef { frozen: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "SharedRef", "frozen", &__self_0),
            PointerKind::MutableRef { unpin: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "MutableRef", "unpin", &__self_0),
            PointerKind::Box { unpin: __self_0, global: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Box",
                    "unpin", __self_0, "global", &__self_1),
        }
    }
}Debug)]
2143pub enum PointerKind {
2144    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2145    SharedRef { frozen: bool },
2146    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2147    MutableRef { unpin: bool },
2148    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2149    /// uses the global allocator or a custom one.
2150    Box { unpin: bool, global: bool },
2151}
2152
2153/// Encodes extra information we have about a pointer.
2154///
2155/// Note that this information is advisory only, and backends are free to ignore it:
2156/// if the information is wrong, that can cause UB, but if the information is absent,
2157/// that must always be okay.
2158#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointeeInfo { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointeeInfo {
    #[inline]
    fn clone(&self) -> PointeeInfo {
        let _: ::core::clone::AssertParamIsClone<Option<PointerKind>>;
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointeeInfo {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "PointeeInfo",
            "safe", &self.safe, "size", &self.size, "align", &&self.align)
    }
}Debug)]
2159pub struct PointeeInfo {
2160    /// If this is `None`, then this is a raw pointer.
2161    pub safe: Option<PointerKind>,
2162    /// If `size` is not zero, then the pointer is either null or dereferenceable for this many bytes
2163    /// (independent of `safe`).
2164    ///
2165    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2166    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2167    /// while this function is still running.
2168    pub size: Size,
2169    /// The pointer is guaranteed to be aligned this much (independent of `safe`).
2170    pub align: Align,
2171}
2172
2173impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2174    /// Returns `true` if the layout corresponds to an unsized type.
2175    #[inline]
2176    pub fn is_unsized(&self) -> bool {
2177        self.backend_repr.is_unsized()
2178    }
2179
2180    #[inline]
2181    pub fn is_sized(&self) -> bool {
2182        self.backend_repr.is_sized()
2183    }
2184
2185    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2186    pub fn is_1zst(&self) -> bool {
2187        self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
2188    }
2189
2190    /// Returns `true` if the size of the type is only known at runtime.
2191    pub fn is_scalable_vector(&self) -> bool {
2192        #[allow(non_exhaustive_omitted_patterns)] match self.backend_repr {
    BackendRepr::SimdScalableVector { .. } => true,
    _ => false,
}matches!(self.backend_repr, BackendRepr::SimdScalableVector { .. })
2193    }
2194
2195    /// Returns the elements count of a scalable vector.
2196    pub fn scalable_vector_element_count(&self) -> Option<u64> {
2197        match self.backend_repr {
2198            BackendRepr::SimdScalableVector { count, .. } => Some(count),
2199            _ => None,
2200        }
2201    }
2202
2203    /// Returns `true` if the type is a ZST and not unsized.
2204    ///
2205    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2206    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2207    pub fn is_zst(&self) -> bool {
2208        match self.backend_repr {
2209            BackendRepr::Scalar(_)
2210            | BackendRepr::ScalarPair(..)
2211            | BackendRepr::SimdScalableVector { .. }
2212            | BackendRepr::SimdVector { .. } => false,
2213            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2214        }
2215    }
2216
2217    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2218    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2219    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2220    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2221    /// checks would otherwise be required.
2222    pub fn eq_abi(&self, other: &Self) -> bool {
2223        // The one thing that we are not capturing here is that for unsized types, the metadata must
2224        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2225        // 2nd point is quite hard to check though.
2226        self.size == other.size
2227            && self.is_sized() == other.is_sized()
2228            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2229            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2230            && self.align.abi == other.align.abi
2231            && self.max_repr_align == other.max_repr_align
2232            && self.unadjusted_abi_align == other.unadjusted_abi_align
2233    }
2234}
2235
2236#[derive(#[automatically_derived]
impl ::core::marker::Copy for StructKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for StructKind {
    #[inline]
    fn clone(&self) -> StructKind {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for StructKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            StructKind::AlwaysSized =>
                ::core::fmt::Formatter::write_str(f, "AlwaysSized"),
            StructKind::MaybeUnsized =>
                ::core::fmt::Formatter::write_str(f, "MaybeUnsized"),
            StructKind::Prefixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "Prefixed", __self_0, &__self_1),
        }
    }
}Debug)]
2237pub enum StructKind {
2238    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2239    AlwaysSized,
2240    /// A univariant, the last field of which may be coerced to unsized.
2241    MaybeUnsized,
2242    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2243    Prefixed(Size, Align),
2244}
2245
2246#[derive(#[automatically_derived]
impl ::core::clone::Clone for AbiFromStrErr {
    #[inline]
    fn clone(&self) -> AbiFromStrErr {
        match self {
            AbiFromStrErr::Unknown => AbiFromStrErr::Unknown,
            AbiFromStrErr::NoExplicitUnwind =>
                AbiFromStrErr::NoExplicitUnwind,
        }
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AbiFromStrErr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                AbiFromStrErr::Unknown => "Unknown",
                AbiFromStrErr::NoExplicitUnwind => "NoExplicitUnwind",
            })
    }
}Debug)]
2247pub enum AbiFromStrErr {
2248    /// not a known ABI
2249    Unknown,
2250    /// no "-unwind" variant can be used here
2251    NoExplicitUnwind,
2252}