Skip to main content

rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
4#![cfg_attr(feature = "nightly", feature(step_trait))]
5// tidy-alphabetical-end
6
7/*! ABI handling for rustc
8
9## What is an "ABI"?
10
11Literally, "application binary interface", which means it is everything about how code interacts,
12at the machine level, with other code. This means it technically covers all of the following:
13- object binary format for e.g. relocations or offset tables
14- in-memory layout of types
15- procedure calling conventions
16
17When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
18To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
19Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
20You will encounter all of them and more if you study target-specific codegen enough!
21Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
22either or both of
23- `repr(Rust)` types have a mostly-unspecified layout
24- `extern "Rust" fn(A) -> R` has an unspecified calling convention
25
26## Crate Goal
27
28ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
29It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
30Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
31It should contain traits and types that other crates then use in their implementation.
32For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
33but `rustc_abi` contains the types for calculating layout and describing register-passing.
34This makes it easier to describe things in the same way across targets, codegen backends, and
35even other Rust compilers, such as rust-analyzer!
36
37*/
38
39use std::fmt;
40#[cfg(feature = "nightly")]
41use std::iter::Step;
42use std::num::{NonZeroUsize, ParseIntError};
43use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
44use std::str::FromStr;
45
46use bitflags::bitflags;
47#[cfg(feature = "nightly")]
48use rustc_data_structures::stable_hasher::StableOrd;
49#[cfg(feature = "nightly")]
50use rustc_error_messages::{DiagArgValue, IntoDiagArg};
51#[cfg(feature = "nightly")]
52use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, msg};
53use rustc_hashes::Hash64;
54use rustc_index::{Idx, IndexSlice, IndexVec};
55#[cfg(feature = "nightly")]
56use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable};
57#[cfg(feature = "nightly")]
58use rustc_span::{Symbol, sym};
59
60mod callconv;
61mod canon_abi;
62mod extern_abi;
63mod layout;
64#[cfg(test)]
65mod tests;
66
67pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
68pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
69#[cfg(feature = "nightly")]
70pub use extern_abi::CVariadicStatus;
71pub use extern_abi::{ExternAbi, all_names};
72pub use layout::{FIRST_VARIANT, FieldIdx, LayoutCalculator, LayoutCalculatorError, VariantIdx};
73#[cfg(feature = "nightly")]
74pub use layout::{Layout, TyAbiInterface, TyAndLayout};
75
76#[derive(#[automatically_derived]
impl ::core::clone::Clone for ReprFlags {
    #[inline]
    fn clone(&self) -> ReprFlags {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ReprFlags { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprFlags {
    #[inline]
    fn eq(&self, other: &ReprFlags) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ReprFlags {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::default::Default for ReprFlags {
    #[inline]
    fn default() -> ReprFlags {
        ReprFlags(::core::default::Default::default())
    }
}Default)]
77#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprFlags {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprFlags {
            fn decode(__decoder: &mut __D) -> Self {
                ReprFlags(::rustc_serialize::Decodable::decode(__decoder))
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for ReprFlags
            {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprFlags(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
78pub struct ReprFlags(u8);
79
80impl ReprFlags {
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_C: Self = Self::from_bits_retain(1 << 0);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SIMD: Self = Self::from_bits_retain(1 << 1);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_TRANSPARENT: Self = Self::from_bits_retain(1 << 2);
    #[doc = r" Internal only for now. If true, don't reorder fields."]
    #[doc = r" On its own it does not prevent ABI optimizations."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_LINEAR: Self = Self::from_bits_retain(1 << 3);
    #[doc =
    r" If true, the type's crate has opted into layout randomization."]
    #[doc =
    r" Other flags can still inhibit reordering and thus randomization."]
    #[doc = r" The seed stored in `ReprOptions.field_shuffle_seed`."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const RANDOMIZE_LAYOUT: Self = Self::from_bits_retain(1 << 4);
    #[doc =
    r" If true, the type is always passed indirectly by non-Rustic ABIs."]
    #[doc =
    r" See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details."]
    #[allow(deprecated, non_upper_case_globals,)]
    pub const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS: Self =
        Self::from_bits_retain(1 << 5);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const IS_SCALABLE: Self = Self::from_bits_retain(1 << 6);
    #[allow(deprecated, non_upper_case_globals,)]
    pub const FIELD_ORDER_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                        ReprFlags::IS_SIMD.bits() | ReprFlags::IS_SCALABLE.bits() |
                ReprFlags::IS_LINEAR.bits());
    #[allow(deprecated, non_upper_case_globals,)]
    pub const ABI_UNOPTIMIZABLE: Self =
        Self::from_bits_retain(ReprFlags::IS_C.bits() |
                ReprFlags::IS_SIMD.bits());
}
impl ::bitflags::Flags for ReprFlags {
    const FLAGS: &'static [::bitflags::Flag<ReprFlags>] =
        &[{

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_C", ReprFlags::IS_C)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SIMD", ReprFlags::IS_SIMD)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_TRANSPARENT",
                            ReprFlags::IS_TRANSPARENT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_LINEAR", ReprFlags::IS_LINEAR)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("RANDOMIZE_LAYOUT",
                            ReprFlags::RANDOMIZE_LAYOUT)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS",
                            ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("IS_SCALABLE", ReprFlags::IS_SCALABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("FIELD_ORDER_UNOPTIMIZABLE",
                            ReprFlags::FIELD_ORDER_UNOPTIMIZABLE)
                    },
                    {

                        #[allow(deprecated, non_upper_case_globals,)]
                        ::bitflags::Flag::new("ABI_UNOPTIMIZABLE",
                            ReprFlags::ABI_UNOPTIMIZABLE)
                    }];
    type Bits = u8;
    fn bits(&self) -> u8 { ReprFlags::bits(self) }
    fn from_bits_retain(bits: u8) -> ReprFlags {
        ReprFlags::from_bits_retain(bits)
    }
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
    {
        #[allow(dead_code, deprecated, unused_attributes)]
        impl ReprFlags {
            /// Get a flags value with all bits unset.
            #[inline]
            pub const fn empty() -> Self {
                Self(<u8 as ::bitflags::Bits>::EMPTY)
            }
            /// Get a flags value with all known bits set.
            #[inline]
            pub const fn all() -> Self {
                let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
                let mut i = 0;
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                {
                    {
                        let flag =
                            <ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
                        truncated = truncated | flag;
                        i += 1;
                    }
                };
                let _ = i;
                Self(truncated)
            }
            /// Get the underlying bits value.
            ///
            /// The returned value is exactly the bits set in this flags value.
            #[inline]
            pub const fn bits(&self) -> u8 { self.0 }
            /// Convert from a bits value.
            ///
            /// This method will return `None` if any unknown bits are set.
            #[inline]
            pub const fn from_bits(bits: u8)
                -> ::bitflags::__private::core::option::Option<Self> {
                let truncated = Self::from_bits_truncate(bits).0;
                if truncated == bits {
                    ::bitflags::__private::core::option::Option::Some(Self(bits))
                } else { ::bitflags::__private::core::option::Option::None }
            }
            /// Convert from a bits value, unsetting any unknown bits.
            #[inline]
            pub const fn from_bits_truncate(bits: u8) -> Self {
                Self(bits & Self::all().0)
            }
            /// Convert from a bits value exactly.
            #[inline]
            pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
            /// Get a flags value with the bits of a flag with the given name set.
            ///
            /// This method will return `None` if `name` is empty or doesn't
            /// correspond to any named flag.
            #[inline]
            pub fn from_name(name: &str)
                -> ::bitflags::__private::core::option::Option<Self> {
                {
                    if name == "IS_C" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_C.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SIMD" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SIMD.bits()));
                    }
                };
                ;
                {
                    if name == "IS_TRANSPARENT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_TRANSPARENT.bits()));
                    }
                };
                ;
                {
                    if name == "IS_LINEAR" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_LINEAR.bits()));
                    }
                };
                ;
                {
                    if name == "RANDOMIZE_LAYOUT" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::RANDOMIZE_LAYOUT.bits()));
                    }
                };
                ;
                {
                    if name == "PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS.bits()));
                    }
                };
                ;
                {
                    if name == "IS_SCALABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SCALABLE.bits()));
                    }
                };
                ;
                {
                    if name == "FIELD_ORDER_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                {
                    if name == "ABI_UNOPTIMIZABLE" {
                        return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::ABI_UNOPTIMIZABLE.bits()));
                    }
                };
                ;
                let _ = name;
                ::bitflags::__private::core::option::Option::None
            }
            /// Whether all bits in this flags value are unset.
            #[inline]
            pub const fn is_empty(&self) -> bool {
                self.0 == <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all known bits in this flags value are set.
            #[inline]
            pub const fn is_all(&self) -> bool {
                Self::all().0 | self.0 == self.0
            }
            /// Whether any set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn intersects(&self, other: Self) -> bool {
                self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
            }
            /// Whether all set bits in a source flags value are also set in a target flags value.
            #[inline]
            pub const fn contains(&self, other: Self) -> bool {
                self.0 & other.0 == other.0
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            pub fn insert(&mut self, other: Self) {
                *self = Self(self.0).union(other);
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `remove` won't truncate `other`, but the `!` operator will.
            #[inline]
            pub fn remove(&mut self, other: Self) {
                *self = Self(self.0).difference(other);
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            pub fn toggle(&mut self, other: Self) {
                *self = Self(self.0).symmetric_difference(other);
            }
            /// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
            #[inline]
            pub fn set(&mut self, other: Self, value: bool) {
                if value { self.insert(other); } else { self.remove(other); }
            }
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn intersection(self, other: Self) -> Self {
                Self(self.0 & other.0)
            }
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn union(self, other: Self) -> Self {
                Self(self.0 | other.0)
            }
            /// The intersection of a source flags value with the complement of a target flags
            /// value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            #[must_use]
            pub const fn difference(self, other: Self) -> Self {
                Self(self.0 & !other.0)
            }
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            #[must_use]
            pub const fn symmetric_difference(self, other: Self) -> Self {
                Self(self.0 ^ other.0)
            }
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            #[must_use]
            pub const fn complement(self) -> Self {
                Self::from_bits_truncate(!self.0)
            }
        }
        impl ::bitflags::__private::core::fmt::Binary for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::Octal for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::LowerHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::fmt::UpperHex for ReprFlags {
            fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
                -> ::bitflags::__private::core::fmt::Result {
                let inner = self.0;
                ::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
            }
        }
        impl ::bitflags::__private::core::ops::BitOr for ReprFlags {
            type Output = Self;
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor(self, other: ReprFlags) -> Self { self.union(other) }
        }
        impl ::bitflags::__private::core::ops::BitOrAssign for ReprFlags {
            /// The bitwise or (`|`) of the bits in two flags values.
            #[inline]
            fn bitor_assign(&mut self, other: Self) { self.insert(other); }
        }
        impl ::bitflags::__private::core::ops::BitXor for ReprFlags {
            type Output = Self;
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor(self, other: Self) -> Self {
                self.symmetric_difference(other)
            }
        }
        impl ::bitflags::__private::core::ops::BitXorAssign for ReprFlags {
            /// The bitwise exclusive-or (`^`) of the bits in two flags values.
            #[inline]
            fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
        }
        impl ::bitflags::__private::core::ops::BitAnd for ReprFlags {
            type Output = Self;
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand(self, other: Self) -> Self { self.intersection(other) }
        }
        impl ::bitflags::__private::core::ops::BitAndAssign for ReprFlags {
            /// The bitwise and (`&`) of the bits in two flags values.
            #[inline]
            fn bitand_assign(&mut self, other: Self) {
                *self =
                    Self::from_bits_retain(self.bits()).intersection(other);
            }
        }
        impl ::bitflags::__private::core::ops::Sub for ReprFlags {
            type Output = Self;
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub(self, other: Self) -> Self { self.difference(other) }
        }
        impl ::bitflags::__private::core::ops::SubAssign for ReprFlags {
            /// The intersection of a source flags value with the complement of a target flags value (`&!`).
            ///
            /// This method is not equivalent to `self & !other` when `other` has unknown bits set.
            /// `difference` won't truncate `other`, but the `!` operator will.
            #[inline]
            fn sub_assign(&mut self, other: Self) { self.remove(other); }
        }
        impl ::bitflags::__private::core::ops::Not for ReprFlags {
            type Output = Self;
            /// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
            #[inline]
            fn not(self) -> Self { self.complement() }
        }
        impl ::bitflags::__private::core::iter::Extend<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(&mut self, iterator: T) {
                for item in iterator { self.insert(item) }
            }
        }
        impl ::bitflags::__private::core::iter::FromIterator<ReprFlags> for
            ReprFlags {
            /// The bitwise or (`|`) of the bits in each flags value.
            fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
                = Self>>(iterator: T) -> Self {
                use ::bitflags::__private::core::iter::Extend;
                let mut result = Self::empty();
                result.extend(iterator);
                result
            }
        }
        impl ReprFlags {
            /// Yield a set of contained flags values.
            ///
            /// Each yielded flags value will correspond to a defined named flag. Any unknown bits
            /// will be yielded together as a final flags value.
            #[inline]
            pub const fn iter(&self) -> ::bitflags::iter::Iter<ReprFlags> {
                ::bitflags::iter::Iter::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
            /// Yield a set of contained named flags values.
            ///
            /// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
            /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
            #[inline]
            pub const fn iter_names(&self)
                -> ::bitflags::iter::IterNames<ReprFlags> {
                ::bitflags::iter::IterNames::__private_const_new(<ReprFlags as
                        ::bitflags::Flags>::FLAGS,
                    ReprFlags::from_bits_retain(self.bits()),
                    ReprFlags::from_bits_retain(self.bits()))
            }
        }
        impl ::bitflags::__private::core::iter::IntoIterator for ReprFlags {
            type Item = ReprFlags;
            type IntoIter = ::bitflags::iter::Iter<ReprFlags>;
            fn into_iter(self) -> Self::IntoIter { self.iter() }
        }
    };bitflags! {
81    impl ReprFlags: u8 {
82        const IS_C               = 1 << 0;
83        const IS_SIMD            = 1 << 1;
84        const IS_TRANSPARENT     = 1 << 2;
85        /// Internal only for now. If true, don't reorder fields.
86        /// On its own it does not prevent ABI optimizations.
87        const IS_LINEAR          = 1 << 3;
88        /// If true, the type's crate has opted into layout randomization.
89        /// Other flags can still inhibit reordering and thus randomization.
90        /// The seed stored in `ReprOptions.field_shuffle_seed`.
91        const RANDOMIZE_LAYOUT   = 1 << 4;
92        /// If true, the type is always passed indirectly by non-Rustic ABIs.
93        /// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
94        const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS = 1 << 5;
95        const IS_SCALABLE        = 1 << 6;
96         // Any of these flags being set prevent field reordering optimisation.
97        const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits()
98                                 | ReprFlags::IS_SIMD.bits()
99                                 | ReprFlags::IS_SCALABLE.bits()
100                                 | ReprFlags::IS_LINEAR.bits();
101        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
102    }
103}
104
105// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
106// `rustc_data_structures` to make it build on stable.
107impl std::fmt::Debug for ReprFlags {
108    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
109        bitflags::parser::to_writer(self, f)
110    }
111}
112
113#[derive(#[automatically_derived]
impl ::core::marker::Copy for IntegerType { }Copy, #[automatically_derived]
impl ::core::clone::Clone for IntegerType {
    #[inline]
    fn clone(&self) -> IntegerType {
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Integer>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for IntegerType {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            IntegerType::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
            IntegerType::Fixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Fixed",
                    __self_0, &__self_1),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for IntegerType {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for IntegerType {
    #[inline]
    fn eq(&self, other: &IntegerType) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (IntegerType::Pointer(__self_0),
                    IntegerType::Pointer(__arg1_0)) => __self_0 == __arg1_0,
                (IntegerType::Fixed(__self_0, __self_1),
                    IntegerType::Fixed(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq)]
114#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for IntegerType {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        IntegerType::Pointer(ref __binding_0) => { 0usize }
                        IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                            1usize
                        }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for IntegerType {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        IntegerType::Pointer(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => {
                        IntegerType::Fixed(::rustc_serialize::Decodable::decode(__decoder),
                            ::rustc_serialize::Decodable::decode(__decoder))
                    }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `IntegerType`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for
            IntegerType {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    IntegerType::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
115pub enum IntegerType {
116    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
117    /// `Pointer(true)` means `isize`.
118    Pointer(bool),
119    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
120    /// `Fixed(I8, false)` means `u8`.
121    Fixed(Integer, bool),
122}
123
124impl IntegerType {
125    pub fn is_signed(&self) -> bool {
126        match self {
127            IntegerType::Pointer(b) => *b,
128            IntegerType::Fixed(_, b) => *b,
129        }
130    }
131}
132
133#[derive(#[automatically_derived]
impl ::core::marker::Copy for ScalableElt { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ScalableElt {
    #[inline]
    fn clone(&self) -> ScalableElt {
        let _: ::core::clone::AssertParamIsClone<u16>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ScalableElt {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            ScalableElt::ElementCount(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "ElementCount", &__self_0),
            ScalableElt::Container =>
                ::core::fmt::Formatter::write_str(f, "Container"),
        }
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ScalableElt {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u16>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ScalableElt {
    #[inline]
    fn eq(&self, other: &ScalableElt) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (ScalableElt::ElementCount(__self_0),
                    ScalableElt::ElementCount(__arg1_0)) =>
                    __self_0 == __arg1_0,
                _ => true,
            }
    }
}PartialEq)]
134#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ScalableElt {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        ScalableElt::ElementCount(ref __binding_0) => { 0usize }
                        ScalableElt::Container => { 1usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ScalableElt {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        ScalableElt::ElementCount(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => { ScalableElt::Container }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `ScalableElt`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for
            ScalableElt {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    ScalableElt::ElementCount(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    ScalableElt::Container => {}
                }
            }
        }
    };HashStable))]
135pub enum ScalableElt {
136    /// `N` in `rustc_scalable_vector(N)` - the element count of the scalable vector
137    ElementCount(u16),
138    /// `rustc_scalable_vector` w/out `N`, used for tuple types of scalable vectors that only
139    /// contain other scalable vectors
140    Container,
141}
142
143/// Represents the repr options provided by the user.
144#[derive(#[automatically_derived]
impl ::core::marker::Copy for ReprOptions { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ReprOptions {
    #[inline]
    fn clone(&self) -> ReprOptions {
        let _: ::core::clone::AssertParamIsClone<Option<IntegerType>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<Option<Align>>;
        let _: ::core::clone::AssertParamIsClone<ReprFlags>;
        let _: ::core::clone::AssertParamIsClone<Option<ScalableElt>>;
        let _: ::core::clone::AssertParamIsClone<Hash64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ReprOptions {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["int", "align", "pack", "flags", "scalable",
                        "field_shuffle_seed"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.int, &self.align, &self.pack, &self.flags, &self.scalable,
                        &&self.field_shuffle_seed];
        ::core::fmt::Formatter::debug_struct_fields_finish(f, "ReprOptions",
            names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ReprOptions {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Option<IntegerType>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<ReprFlags>;
        let _: ::core::cmp::AssertParamIsEq<Option<ScalableElt>>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprOptions {
    #[inline]
    fn eq(&self, other: &ReprOptions) -> bool {
        self.int == other.int && self.align == other.align &&
                        self.pack == other.pack && self.flags == other.flags &&
                self.scalable == other.scalable &&
            self.field_shuffle_seed == other.field_shuffle_seed
    }
}PartialEq, #[automatically_derived]
impl ::core::default::Default for ReprOptions {
    #[inline]
    fn default() -> ReprOptions {
        ReprOptions {
            int: ::core::default::Default::default(),
            align: ::core::default::Default::default(),
            pack: ::core::default::Default::default(),
            flags: ::core::default::Default::default(),
            scalable: ::core::default::Default::default(),
            field_shuffle_seed: ::core::default::Default::default(),
        }
    }
}Default)]
145#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for ReprOptions {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for ReprOptions {
            fn decode(__decoder: &mut __D) -> Self {
                ReprOptions {
                    int: ::rustc_serialize::Decodable::decode(__decoder),
                    align: ::rustc_serialize::Decodable::decode(__decoder),
                    pack: ::rustc_serialize::Decodable::decode(__decoder),
                    flags: ::rustc_serialize::Decodable::decode(__decoder),
                    scalable: ::rustc_serialize::Decodable::decode(__decoder),
                    field_shuffle_seed: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for
            ReprOptions {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    ReprOptions {
                        int: ref __binding_0,
                        align: ref __binding_1,
                        pack: ref __binding_2,
                        flags: ref __binding_3,
                        scalable: ref __binding_4,
                        field_shuffle_seed: ref __binding_5 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
146pub struct ReprOptions {
147    pub int: Option<IntegerType>,
148    pub align: Option<Align>,
149    pub pack: Option<Align>,
150    pub flags: ReprFlags,
151    /// `#[rustc_scalable_vector]`
152    pub scalable: Option<ScalableElt>,
153    /// The seed to be used for randomizing a type's layout
154    ///
155    /// Note: This could technically be a `u128` which would
156    /// be the "most accurate" hash as it'd encompass the item and crate
157    /// hash without loss, but it does pay the price of being larger.
158    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
159    /// purposes (primarily `-Z randomize-layout`)
160    pub field_shuffle_seed: Hash64,
161}
162
163impl ReprOptions {
164    #[inline]
165    pub fn simd(&self) -> bool {
166        self.flags.contains(ReprFlags::IS_SIMD)
167    }
168
169    #[inline]
170    pub fn scalable(&self) -> bool {
171        self.flags.contains(ReprFlags::IS_SCALABLE)
172    }
173
174    #[inline]
175    pub fn c(&self) -> bool {
176        self.flags.contains(ReprFlags::IS_C)
177    }
178
179    #[inline]
180    pub fn packed(&self) -> bool {
181        self.pack.is_some()
182    }
183
184    #[inline]
185    pub fn transparent(&self) -> bool {
186        self.flags.contains(ReprFlags::IS_TRANSPARENT)
187    }
188
189    #[inline]
190    pub fn linear(&self) -> bool {
191        self.flags.contains(ReprFlags::IS_LINEAR)
192    }
193
194    /// Returns the discriminant type, given these `repr` options.
195    /// This must only be called on enums!
196    ///
197    /// This is the "typeck type" of the discriminant, which is effectively the maximum size:
198    /// discriminant values will be wrapped to fit (with a lint). Layout can later decide to use a
199    /// smaller type for the tag that stores the discriminant at runtime and that will work just
200    /// fine, it just induces casts when getting/setting the discriminant.
201    pub fn discr_type(&self) -> IntegerType {
202        self.int.unwrap_or(IntegerType::Pointer(true))
203    }
204
205    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
206    /// layout" optimizations, such as representing `Foo<&T>` as a
207    /// single pointer.
208    pub fn inhibit_enum_layout_opt(&self) -> bool {
209        self.c() || self.int.is_some()
210    }
211
212    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
213        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
214    }
215
216    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
217    /// e.g. `repr(C)` or `repr(<int>)`.
218    pub fn inhibit_struct_field_reordering(&self) -> bool {
219        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
220    }
221
222    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
223    /// was enabled for its declaration crate.
224    pub fn can_randomize_type_layout(&self) -> bool {
225        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
226    }
227
228    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
229    pub fn inhibits_union_abi_opt(&self) -> bool {
230        self.c()
231    }
232}
233
234/// The maximum supported number of lanes in a SIMD vector.
235///
236/// This value is selected based on backend support:
237/// * LLVM does not appear to have a vector width limit.
238/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
239pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
240
241/// How pointers are represented in a given address space
242#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerSpec { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerSpec {
    #[inline]
    fn clone(&self) -> PointerSpec {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointerSpec {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "PointerSpec",
            "pointer_size", &self.pointer_size, "pointer_align",
            &self.pointer_align, "pointer_offset", &self.pointer_offset,
            "_is_fat", &&self._is_fat)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerSpec {
    #[inline]
    fn eq(&self, other: &PointerSpec) -> bool {
        self._is_fat == other._is_fat &&
                    self.pointer_size == other.pointer_size &&
                self.pointer_align == other.pointer_align &&
            self.pointer_offset == other.pointer_offset
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerSpec {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq)]
243pub struct PointerSpec {
244    /// The size of the bitwise representation of the pointer.
245    pointer_size: Size,
246    /// The alignment of pointers for this address space
247    pointer_align: Align,
248    /// The size of the value a pointer can be offset by in this address space.
249    pointer_offset: Size,
250    /// Pointers into this address space contain extra metadata
251    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
252    _is_fat: bool,
253}
254
255/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
256/// for a target, which contains everything needed to compute layouts.
257#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TargetDataLayout {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        let names: &'static _ =
            &["endian", "i1_align", "i8_align", "i16_align", "i32_align",
                        "i64_align", "i128_align", "f16_align", "f32_align",
                        "f64_align", "f128_align", "aggregate_align",
                        "vector_align", "default_address_space",
                        "default_address_space_pointer_spec", "address_space_info",
                        "instruction_address_space", "c_enum_min_size"];
        let values: &[&dyn ::core::fmt::Debug] =
            &[&self.endian, &self.i1_align, &self.i8_align, &self.i16_align,
                        &self.i32_align, &self.i64_align, &self.i128_align,
                        &self.f16_align, &self.f32_align, &self.f64_align,
                        &self.f128_align, &self.aggregate_align, &self.vector_align,
                        &self.default_address_space,
                        &self.default_address_space_pointer_spec,
                        &self.address_space_info, &self.instruction_address_space,
                        &&self.c_enum_min_size];
        ::core::fmt::Formatter::debug_struct_fields_finish(f,
            "TargetDataLayout", names, values)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for TargetDataLayout {
    #[inline]
    fn eq(&self, other: &TargetDataLayout) -> bool {
        self.endian == other.endian && self.i1_align == other.i1_align &&
                                                                        self.i8_align == other.i8_align &&
                                                                    self.i16_align == other.i16_align &&
                                                                self.i32_align == other.i32_align &&
                                                            self.i64_align == other.i64_align &&
                                                        self.i128_align == other.i128_align &&
                                                    self.f16_align == other.f16_align &&
                                                self.f32_align == other.f32_align &&
                                            self.f64_align == other.f64_align &&
                                        self.f128_align == other.f128_align &&
                                    self.aggregate_align == other.aggregate_align &&
                                self.vector_align == other.vector_align &&
                            self.default_address_space == other.default_address_space &&
                        self.default_address_space_pointer_spec ==
                            other.default_address_space_pointer_spec &&
                    self.address_space_info == other.address_space_info &&
                self.instruction_address_space ==
                    other.instruction_address_space &&
            self.c_enum_min_size == other.c_enum_min_size
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for TargetDataLayout {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Endian>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(Size, Align)>>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
        let _: ::core::cmp::AssertParamIsEq<PointerSpec>;
        let _: ::core::cmp::AssertParamIsEq<Vec<(AddressSpace, PointerSpec)>>;
        let _: ::core::cmp::AssertParamIsEq<Integer>;
    }
}Eq)]
258pub struct TargetDataLayout {
259    pub endian: Endian,
260    pub i1_align: Align,
261    pub i8_align: Align,
262    pub i16_align: Align,
263    pub i32_align: Align,
264    pub i64_align: Align,
265    pub i128_align: Align,
266    pub f16_align: Align,
267    pub f32_align: Align,
268    pub f64_align: Align,
269    pub f128_align: Align,
270    pub aggregate_align: Align,
271
272    /// Alignments for vector types.
273    pub vector_align: Vec<(Size, Align)>,
274
275    pub default_address_space: AddressSpace,
276    pub default_address_space_pointer_spec: PointerSpec,
277
278    /// Address space information of all known address spaces.
279    ///
280    /// # Note
281    ///
282    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
283    /// which instead lives in [`Self::default_address_space_pointer_spec`].
284    address_space_info: Vec<(AddressSpace, PointerSpec)>,
285
286    pub instruction_address_space: AddressSpace,
287
288    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
289    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
290    /// so the only valid spec for LLVM is c_int::BITS or 8
291    pub c_enum_min_size: Integer,
292}
293
294impl Default for TargetDataLayout {
295    /// Creates an instance of `TargetDataLayout`.
296    fn default() -> TargetDataLayout {
297        let align = |bits| Align::from_bits(bits).unwrap();
298        TargetDataLayout {
299            endian: Endian::Big,
300            i1_align: align(8),
301            i8_align: align(8),
302            i16_align: align(16),
303            i32_align: align(32),
304            i64_align: align(32),
305            i128_align: align(32),
306            f16_align: align(16),
307            f32_align: align(32),
308            f64_align: align(64),
309            f128_align: align(128),
310            aggregate_align: align(8),
311            vector_align: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [(Size::from_bits(64), align(64)),
                (Size::from_bits(128), align(128))]))vec![
312                (Size::from_bits(64), align(64)),
313                (Size::from_bits(128), align(128)),
314            ],
315            default_address_space: AddressSpace::ZERO,
316            default_address_space_pointer_spec: PointerSpec {
317                pointer_size: Size::from_bits(64),
318                pointer_align: align(64),
319                pointer_offset: Size::from_bits(64),
320                _is_fat: false,
321            },
322            address_space_info: ::alloc::vec::Vec::new()vec![],
323            instruction_address_space: AddressSpace::ZERO,
324            c_enum_min_size: Integer::I32,
325        }
326    }
327}
328
329pub enum TargetDataLayoutError<'a> {
330    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
331    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
332    MissingAlignment { cause: &'a str },
333    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
334    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
335    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
336    InvalidBitsSize { err: String },
337    UnknownPointerSpecification { err: String },
338}
339
340#[cfg(feature = "nightly")]
341impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetDataLayoutError<'_> {
342    fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {
343        match self {
344            TargetDataLayoutError::InvalidAddressSpace { addr_space, err, cause } => {
345                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid address space `{$addr_space}` for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid address space `{$addr_space}` for `{$cause}` in \"data-layout\": {$err}"))
346                    .with_arg("addr_space", addr_space)
347                    .with_arg("cause", cause)
348                    .with_arg("err", err)
349            }
350            TargetDataLayoutError::InvalidBits { kind, bit, cause, err } => {
351                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid {$kind} `{$bit}` for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid {$kind} `{$bit}` for `{$cause}` in \"data-layout\": {$err}"))
352                    .with_arg("kind", kind)
353                    .with_arg("bit", bit)
354                    .with_arg("cause", cause)
355                    .with_arg("err", err)
356            }
357            TargetDataLayoutError::MissingAlignment { cause } => {
358                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("missing alignment for `{$cause}` in \"data-layout\""))msg!("missing alignment for `{$cause}` in \"data-layout\""))
359                    .with_arg("cause", cause)
360            }
361            TargetDataLayoutError::InvalidAlignment { cause, err } => {
362                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid alignment for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid alignment for `{$cause}` in \"data-layout\": {$err}"))
363                    .with_arg("cause", cause)
364                    .with_arg("err", err.to_string())
365            }
366            TargetDataLayoutError::InconsistentTargetArchitecture { dl, target } => {
367                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("inconsistent target specification: \"data-layout\" claims architecture is {$dl}-endian, while \"target-endian\" is `{$target}`"))msg!("inconsistent target specification: \"data-layout\" claims architecture is {$dl}-endian, while \"target-endian\" is `{$target}`"))
368                    .with_arg("dl", dl).with_arg("target", target)
369            }
370            TargetDataLayoutError::InconsistentTargetPointerWidth { pointer_size, target } => {
371                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("inconsistent target specification: \"data-layout\" claims pointers are {$pointer_size}-bit, while \"target-pointer-width\" is `{$target}`"))msg!("inconsistent target specification: \"data-layout\" claims pointers are {$pointer_size}-bit, while \"target-pointer-width\" is `{$target}`"))
372                    .with_arg("pointer_size", pointer_size).with_arg("target", target)
373            }
374            TargetDataLayoutError::InvalidBitsSize { err } => {
375                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("{$err}"))msg!("{$err}")).with_arg("err", err)
376            }
377            TargetDataLayoutError::UnknownPointerSpecification { err } => {
378                Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("unknown pointer specification `{$err}` in datalayout string"))msg!("unknown pointer specification `{$err}` in datalayout string"))
379                    .with_arg("err", err)
380            }
381        }
382    }
383}
384
385impl TargetDataLayout {
386    /// Parse data layout from an
387    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
388    ///
389    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
390    /// determined from llvm string.
391    pub fn parse_from_llvm_datalayout_string<'a>(
392        input: &'a str,
393        default_address_space: AddressSpace,
394    ) -> Result<TargetDataLayout, TargetDataLayoutError<'a>> {
395        // Parse an address space index from a string.
396        let parse_address_space = |s: &'a str, cause: &'a str| {
397            s.parse::<u32>().map(AddressSpace).map_err(|err| {
398                TargetDataLayoutError::InvalidAddressSpace { addr_space: s, cause, err }
399            })
400        };
401
402        // Parse a bit count from a string.
403        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
404            s.parse::<u64>().map_err(|err| TargetDataLayoutError::InvalidBits {
405                kind,
406                bit: s,
407                cause,
408                err,
409            })
410        };
411
412        // Parse a size string.
413        let parse_size =
414            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
415
416        // Parse an alignment string.
417        let parse_align_str = |s: &'a str, cause: &'a str| {
418            let align_from_bits = |bits| {
419                Align::from_bits(bits)
420                    .map_err(|err| TargetDataLayoutError::InvalidAlignment { cause, err })
421            };
422            let abi = parse_bits(s, "alignment", cause)?;
423            Ok(align_from_bits(abi)?)
424        };
425
426        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
427        // ignoring the secondary alignment specifications.
428        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
429            if s.is_empty() {
430                return Err(TargetDataLayoutError::MissingAlignment { cause });
431            }
432            parse_align_str(s[0], cause)
433        };
434
435        let mut dl = TargetDataLayout::default();
436        dl.default_address_space = default_address_space;
437
438        let mut i128_align_src = 64;
439        for spec in input.split('-') {
440            let spec_parts = spec.split(':').collect::<Vec<_>>();
441
442            match &*spec_parts {
443                ["e"] => dl.endian = Endian::Little,
444                ["E"] => dl.endian = Endian::Big,
445                [p] if p.starts_with('P') => {
446                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
447                }
448                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
449                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
450                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
451                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
452                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
453                [p, s, a @ ..] if p.starts_with("p") => {
454                    let mut p = p.strip_prefix('p').unwrap();
455                    let mut _is_fat = false;
456
457                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
458                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
459
460                    if p.starts_with('f') {
461                        p = p.strip_prefix('f').unwrap();
462                        _is_fat = true;
463                    }
464
465                    // However, we currently don't take into account further specifications:
466                    // an error is emitted instead.
467                    if p.starts_with(char::is_alphabetic) {
468                        return Err(TargetDataLayoutError::UnknownPointerSpecification {
469                            err: p.to_string(),
470                        });
471                    }
472
473                    let addr_space = if !p.is_empty() {
474                        parse_address_space(p, "p-")?
475                    } else {
476                        AddressSpace::ZERO
477                    };
478
479                    let pointer_size = parse_size(s, "p-")?;
480                    let pointer_align = parse_align_seq(a, "p-")?;
481                    let info = PointerSpec {
482                        pointer_offset: pointer_size,
483                        pointer_size,
484                        pointer_align,
485                        _is_fat,
486                    };
487                    if addr_space == default_address_space {
488                        dl.default_address_space_pointer_spec = info;
489                    } else {
490                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
491                            Some(e) => e.1 = info,
492                            None => {
493                                dl.address_space_info.push((addr_space, info));
494                            }
495                        }
496                    }
497                }
498                [p, s, a, _pr, i] if p.starts_with("p") => {
499                    let mut p = p.strip_prefix('p').unwrap();
500                    let mut _is_fat = false;
501
502                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
503                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
504
505                    if p.starts_with('f') {
506                        p = p.strip_prefix('f').unwrap();
507                        _is_fat = true;
508                    }
509
510                    // However, we currently don't take into account further specifications:
511                    // an error is emitted instead.
512                    if p.starts_with(char::is_alphabetic) {
513                        return Err(TargetDataLayoutError::UnknownPointerSpecification {
514                            err: p.to_string(),
515                        });
516                    }
517
518                    let addr_space = if !p.is_empty() {
519                        parse_address_space(p, "p")?
520                    } else {
521                        AddressSpace::ZERO
522                    };
523
524                    let info = PointerSpec {
525                        pointer_size: parse_size(s, "p-")?,
526                        pointer_align: parse_align_str(a, "p-")?,
527                        pointer_offset: parse_size(i, "p-")?,
528                        _is_fat,
529                    };
530
531                    if addr_space == default_address_space {
532                        dl.default_address_space_pointer_spec = info;
533                    } else {
534                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
535                            Some(e) => e.1 = info,
536                            None => {
537                                dl.address_space_info.push((addr_space, info));
538                            }
539                        }
540                    }
541                }
542
543                [s, a @ ..] if s.starts_with('i') => {
544                    let Ok(bits) = s[1..].parse::<u64>() else {
545                        parse_size(&s[1..], "i")?; // For the user error.
546                        continue;
547                    };
548                    let a = parse_align_seq(a, s)?;
549                    match bits {
550                        1 => dl.i1_align = a,
551                        8 => dl.i8_align = a,
552                        16 => dl.i16_align = a,
553                        32 => dl.i32_align = a,
554                        64 => dl.i64_align = a,
555                        _ => {}
556                    }
557                    if bits >= i128_align_src && bits <= 128 {
558                        // Default alignment for i128 is decided by taking the alignment of
559                        // largest-sized i{64..=128}.
560                        i128_align_src = bits;
561                        dl.i128_align = a;
562                    }
563                }
564                [s, a @ ..] if s.starts_with('v') => {
565                    let v_size = parse_size(&s[1..], "v")?;
566                    let a = parse_align_seq(a, s)?;
567                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
568                        v.1 = a;
569                        continue;
570                    }
571                    // No existing entry, add a new one.
572                    dl.vector_align.push((v_size, a));
573                }
574                _ => {} // Ignore everything else.
575            }
576        }
577
578        // Inherit, if not given, address space information for specific LLVM elements from the
579        // default data address space.
580        if (dl.instruction_address_space != dl.default_address_space)
581            && dl
582                .address_space_info
583                .iter()
584                .find(|(a, _)| *a == dl.instruction_address_space)
585                .is_none()
586        {
587            dl.address_space_info.push((
588                dl.instruction_address_space,
589                dl.default_address_space_pointer_spec.clone(),
590            ));
591        }
592
593        Ok(dl)
594    }
595
596    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
597    /// space.
598    ///
599    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
600    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
601    /// index every address within an object along with one byte past the end, along with allowing
602    /// `isize` to store the difference between any two pointers into an object.
603    ///
604    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
605    /// so we adopt such a more-constrained size bound due to its technical limitations.
606    #[inline]
607    pub fn obj_size_bound(&self) -> u64 {
608        match self.pointer_size().bits() {
609            16 => 1 << 15,
610            32 => 1 << 31,
611            64 => 1 << 61,
612            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
613        }
614    }
615
616    /// Returns **exclusive** upper bound on object size in bytes.
617    ///
618    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
619    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
620    /// index every address within an object along with one byte past the end, along with allowing
621    /// `isize` to store the difference between any two pointers into an object.
622    ///
623    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
624    /// so we adopt such a more-constrained size bound due to its technical limitations.
625    #[inline]
626    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
627        match self.pointer_size_in(address_space).bits() {
628            16 => 1 << 15,
629            32 => 1 << 31,
630            64 => 1 << 61,
631            bits => {
    ::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
            bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
632        }
633    }
634
635    #[inline]
636    pub fn ptr_sized_integer(&self) -> Integer {
637        use Integer::*;
638        match self.pointer_offset().bits() {
639            16 => I16,
640            32 => I32,
641            64 => I64,
642            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
643        }
644    }
645
646    #[inline]
647    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
648        use Integer::*;
649        match self.pointer_offset_in(address_space).bits() {
650            16 => I16,
651            32 => I32,
652            64 => I64,
653            bits => {
    ::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
            bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
654        }
655    }
656
657    /// psABI-mandated alignment for a vector type, if any
658    #[inline]
659    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
660        self.vector_align
661            .iter()
662            .find(|(size, _align)| *size == vec_size)
663            .map(|(_size, align)| *align)
664    }
665
666    /// an alignment resembling the one LLVM would pick for a vector
667    #[inline]
668    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
669        self.cabi_vector_align(vec_size)
670            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
671    }
672
673    /// Get the pointer size in the default data address space.
674    #[inline]
675    pub fn pointer_size(&self) -> Size {
676        self.default_address_space_pointer_spec.pointer_size
677    }
678
679    /// Get the pointer size in a specific address space.
680    #[inline]
681    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
682        if c == self.default_address_space {
683            return self.default_address_space_pointer_spec.pointer_size;
684        }
685
686        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
687            e.1.pointer_size
688        } else {
689            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
690        }
691    }
692
693    /// Get the pointer index in the default data address space.
694    #[inline]
695    pub fn pointer_offset(&self) -> Size {
696        self.default_address_space_pointer_spec.pointer_offset
697    }
698
699    /// Get the pointer index in a specific address space.
700    #[inline]
701    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
702        if c == self.default_address_space {
703            return self.default_address_space_pointer_spec.pointer_offset;
704        }
705
706        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
707            e.1.pointer_offset
708        } else {
709            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
710        }
711    }
712
713    /// Get the pointer alignment in the default data address space.
714    #[inline]
715    pub fn pointer_align(&self) -> AbiAlign {
716        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
717    }
718
719    /// Get the pointer alignment in a specific address space.
720    #[inline]
721    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
722        AbiAlign::new(if c == self.default_address_space {
723            self.default_address_space_pointer_spec.pointer_align
724        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
725            e.1.pointer_align
726        } else {
727            {
    ::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
            c));
};panic!("Use of unknown address space {c:?}");
728        })
729    }
730}
731
732pub trait HasDataLayout {
733    fn data_layout(&self) -> &TargetDataLayout;
734}
735
736impl HasDataLayout for TargetDataLayout {
737    #[inline]
738    fn data_layout(&self) -> &TargetDataLayout {
739        self
740    }
741}
742
743// used by rust-analyzer
744impl HasDataLayout for &TargetDataLayout {
745    #[inline]
746    fn data_layout(&self) -> &TargetDataLayout {
747        (**self).data_layout()
748    }
749}
750
751/// Endianness of the target, which must match cfg(target-endian).
752#[derive(#[automatically_derived]
impl ::core::marker::Copy for Endian { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Endian {
    #[inline]
    fn clone(&self) -> Endian { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Endian {
    #[inline]
    fn eq(&self, other: &Endian) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Endian {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq)]
753pub enum Endian {
754    Little,
755    Big,
756}
757
758impl Endian {
759    pub fn as_str(&self) -> &'static str {
760        match self {
761            Self::Little => "little",
762            Self::Big => "big",
763        }
764    }
765
766    #[cfg(feature = "nightly")]
767    pub fn desc_symbol(&self) -> Symbol {
768        match self {
769            Self::Little => sym::little,
770            Self::Big => sym::big,
771        }
772    }
773}
774
775impl fmt::Debug for Endian {
776    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
777        f.write_str(self.as_str())
778    }
779}
780
781impl FromStr for Endian {
782    type Err = String;
783
784    fn from_str(s: &str) -> Result<Self, Self::Err> {
785        match s {
786            "little" => Ok(Self::Little),
787            "big" => Ok(Self::Big),
788            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("unknown endian: \"{0}\"", s))
    })format!(r#"unknown endian: "{s}""#)),
789        }
790    }
791}
792
793/// Size of a type in bytes.
794#[derive(#[automatically_derived]
impl ::core::marker::Copy for Size { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Size {
    #[inline]
    fn clone(&self) -> Size {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Size {
    #[inline]
    fn eq(&self, other: &Size) -> bool { self.raw == other.raw }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Size {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u64>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Size {
    #[inline]
    fn partial_cmp(&self, other: &Size)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.raw, &other.raw)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Size {
    #[inline]
    fn cmp(&self, other: &Size) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.raw, &other.raw)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Size {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.raw, state)
    }
}Hash)]
795#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Size {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Size {
            fn decode(__decoder: &mut __D) -> Self {
                Size { raw: ::rustc_serialize::Decodable::decode(__decoder) }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for Size {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Size { raw: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
796pub struct Size {
797    raw: u64,
798}
799
800#[cfg(feature = "nightly")]
801impl StableOrd for Size {
802    const CAN_USE_UNSTABLE_SORT: bool = true;
803
804    // `Ord` is implemented as just comparing numerical values and numerical values
805    // are not changed by (de-)serialization.
806    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
807}
808
809// This is debug-printed a lot in larger structs, don't waste too much space there
810impl fmt::Debug for Size {
811    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
812        f.write_fmt(format_args!("Size({0} bytes)", self.bytes()))write!(f, "Size({} bytes)", self.bytes())
813    }
814}
815
816impl Size {
817    pub const ZERO: Size = Size { raw: 0 };
818
819    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
820    /// not a multiple of 8.
821    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
822        let bits = bits.try_into().ok().unwrap();
823        Size { raw: bits.div_ceil(8) }
824    }
825
826    #[inline]
827    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
828        let bytes: u64 = bytes.try_into().ok().unwrap();
829        Size { raw: bytes }
830    }
831
832    #[inline]
833    pub fn bytes(self) -> u64 {
834        self.raw
835    }
836
837    #[inline]
838    pub fn bytes_usize(self) -> usize {
839        self.bytes().try_into().unwrap()
840    }
841
842    #[inline]
843    pub fn bits(self) -> u64 {
844        #[cold]
845        fn overflow(bytes: u64) -> ! {
846            {
    ::core::panicking::panic_fmt(format_args!("Size::bits: {0} bytes in bits doesn\'t fit in u64",
            bytes));
}panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
847        }
848
849        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
850    }
851
852    #[inline]
853    pub fn bits_usize(self) -> usize {
854        self.bits().try_into().unwrap()
855    }
856
857    #[inline]
858    pub fn align_to(self, align: Align) -> Size {
859        let mask = align.bytes() - 1;
860        Size::from_bytes((self.bytes() + mask) & !mask)
861    }
862
863    #[inline]
864    pub fn is_aligned(self, align: Align) -> bool {
865        let mask = align.bytes() - 1;
866        self.bytes() & mask == 0
867    }
868
869    #[inline]
870    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
871        let dl = cx.data_layout();
872
873        let bytes = self.bytes().checked_add(offset.bytes())?;
874
875        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
876    }
877
878    #[inline]
879    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
880        let dl = cx.data_layout();
881
882        let bytes = self.bytes().checked_mul(count)?;
883        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
884    }
885
886    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
887    /// (i.e., if it is negative, fill with 1's on the left).
888    #[inline]
889    pub fn sign_extend(self, value: u128) -> i128 {
890        let size = self.bits();
891        if size == 0 {
892            // Truncated until nothing is left.
893            return 0;
894        }
895        // Sign-extend it.
896        let shift = 128 - size;
897        // Shift the unsigned value to the left, then shift back to the right as signed
898        // (essentially fills with sign bit on the left).
899        ((value << shift) as i128) >> shift
900    }
901
902    /// Truncates `value` to `self` bits.
903    #[inline]
904    pub fn truncate(self, value: u128) -> u128 {
905        let size = self.bits();
906        if size == 0 {
907            // Truncated until nothing is left.
908            return 0;
909        }
910        let shift = 128 - size;
911        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
912        (value << shift) >> shift
913    }
914
915    #[inline]
916    pub fn signed_int_min(&self) -> i128 {
917        self.sign_extend(1_u128 << (self.bits() - 1))
918    }
919
920    #[inline]
921    pub fn signed_int_max(&self) -> i128 {
922        i128::MAX >> (128 - self.bits())
923    }
924
925    #[inline]
926    pub fn unsigned_int_max(&self) -> u128 {
927        u128::MAX >> (128 - self.bits())
928    }
929}
930
931// Panicking addition, subtraction and multiplication for convenience.
932// Avoid during layout computation, return `LayoutError` instead.
933
934impl Add for Size {
935    type Output = Size;
936    #[inline]
937    fn add(self, other: Size) -> Size {
938        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
939            {
    ::core::panicking::panic_fmt(format_args!("Size::add: {0} + {1} doesn\'t fit in u64",
            self.bytes(), other.bytes()));
}panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
940        }))
941    }
942}
943
944impl Sub for Size {
945    type Output = Size;
946    #[inline]
947    fn sub(self, other: Size) -> Size {
948        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
949            {
    ::core::panicking::panic_fmt(format_args!("Size::sub: {0} - {1} would result in negative size",
            self.bytes(), other.bytes()));
}panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
950        }))
951    }
952}
953
954impl Mul<Size> for u64 {
955    type Output = Size;
956    #[inline]
957    fn mul(self, size: Size) -> Size {
958        size * self
959    }
960}
961
962impl Mul<u64> for Size {
963    type Output = Size;
964    #[inline]
965    fn mul(self, count: u64) -> Size {
966        match self.bytes().checked_mul(count) {
967            Some(bytes) => Size::from_bytes(bytes),
968            None => {
    ::core::panicking::panic_fmt(format_args!("Size::mul: {0} * {1} doesn\'t fit in u64",
            self.bytes(), count));
}panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
969        }
970    }
971}
972
973impl AddAssign for Size {
974    #[inline]
975    fn add_assign(&mut self, other: Size) {
976        *self = *self + other;
977    }
978}
979
980#[cfg(feature = "nightly")]
981impl Step for Size {
982    #[inline]
983    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
984        u64::steps_between(&start.bytes(), &end.bytes())
985    }
986
987    #[inline]
988    fn forward_checked(start: Self, count: usize) -> Option<Self> {
989        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
990    }
991
992    #[inline]
993    fn forward(start: Self, count: usize) -> Self {
994        Self::from_bytes(u64::forward(start.bytes(), count))
995    }
996
997    #[inline]
998    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
999        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
1000    }
1001
1002    #[inline]
1003    fn backward_checked(start: Self, count: usize) -> Option<Self> {
1004        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
1005    }
1006
1007    #[inline]
1008    fn backward(start: Self, count: usize) -> Self {
1009        Self::from_bytes(u64::backward(start.bytes(), count))
1010    }
1011
1012    #[inline]
1013    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
1014        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
1015    }
1016}
1017
1018/// Alignment of a type in bytes (always a power of two).
1019#[derive(#[automatically_derived]
impl ::core::marker::Copy for Align { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Align {
    #[inline]
    fn clone(&self) -> Align {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Align {
    #[inline]
    fn eq(&self, other: &Align) -> bool { self.pow2 == other.pow2 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Align {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Align {
    #[inline]
    fn partial_cmp(&self, other: &Align)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.pow2, &other.pow2)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Align {
    #[inline]
    fn cmp(&self, other: &Align) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.pow2, &other.pow2)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Align {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.pow2, state)
    }
}Hash)]
1020#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Align {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Align {
            fn decode(__decoder: &mut __D) -> Self {
                Align {
                    pow2: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for Align {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Align { pow2: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1021pub struct Align {
1022    pow2: u8,
1023}
1024
1025// This is debug-printed a lot in larger structs, don't waste too much space there
1026impl fmt::Debug for Align {
1027    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1028        f.write_fmt(format_args!("Align({0} bytes)", self.bytes()))write!(f, "Align({} bytes)", self.bytes())
1029    }
1030}
1031
1032#[derive(#[automatically_derived]
impl ::core::clone::Clone for AlignFromBytesError {
    #[inline]
    fn clone(&self) -> AlignFromBytesError {
        let _: ::core::clone::AssertParamIsClone<u64>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for AlignFromBytesError { }Copy)]
1033pub enum AlignFromBytesError {
1034    NotPowerOfTwo(u64),
1035    TooLarge(u64),
1036}
1037
1038impl fmt::Debug for AlignFromBytesError {
1039    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1040        fmt::Display::fmt(self, f)
1041    }
1042}
1043
1044impl fmt::Display for AlignFromBytesError {
1045    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1046        match self {
1047            AlignFromBytesError::NotPowerOfTwo(align) => f.write_fmt(format_args!("{0} is not a power of 2", align))write!(f, "{align} is not a power of 2"),
1048            AlignFromBytesError::TooLarge(align) => f.write_fmt(format_args!("{0} is too large", align))write!(f, "{align} is too large"),
1049        }
1050    }
1051}
1052
1053impl Align {
1054    pub const ONE: Align = Align { pow2: 0 };
1055    pub const EIGHT: Align = Align { pow2: 3 };
1056    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1057    pub const MAX: Align = Align { pow2: 29 };
1058
1059    /// Either `1 << (pointer_bits - 1)` or [`Align::MAX`], whichever is smaller.
1060    #[inline]
1061    pub fn max_for_target(tdl: &TargetDataLayout) -> Align {
1062        let pointer_bits = tdl.pointer_size().bits();
1063        if let Ok(pointer_bits) = u8::try_from(pointer_bits)
1064            && pointer_bits <= Align::MAX.pow2
1065        {
1066            Align { pow2: pointer_bits - 1 }
1067        } else {
1068            Align::MAX
1069        }
1070    }
1071
1072    #[inline]
1073    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1074        Align::from_bytes(Size::from_bits(bits).bytes())
1075    }
1076
1077    #[inline]
1078    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1079        // Treat an alignment of 0 bytes like 1-byte alignment.
1080        if align == 0 {
1081            return Ok(Align::ONE);
1082        }
1083
1084        #[cold]
1085        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1086            AlignFromBytesError::NotPowerOfTwo(align)
1087        }
1088
1089        #[cold]
1090        const fn too_large(align: u64) -> AlignFromBytesError {
1091            AlignFromBytesError::TooLarge(align)
1092        }
1093
1094        let tz = align.trailing_zeros();
1095        if align != (1 << tz) {
1096            return Err(not_power_of_2(align));
1097        }
1098
1099        let pow2 = tz as u8;
1100        if pow2 > Self::MAX.pow2 {
1101            return Err(too_large(align));
1102        }
1103
1104        Ok(Align { pow2 })
1105    }
1106
1107    #[inline]
1108    pub const fn bytes(self) -> u64 {
1109        1 << self.pow2
1110    }
1111
1112    #[inline]
1113    pub fn bytes_usize(self) -> usize {
1114        self.bytes().try_into().unwrap()
1115    }
1116
1117    #[inline]
1118    pub const fn bits(self) -> u64 {
1119        self.bytes() * 8
1120    }
1121
1122    #[inline]
1123    pub fn bits_usize(self) -> usize {
1124        self.bits().try_into().unwrap()
1125    }
1126
1127    /// Obtain the greatest factor of `size` that is an alignment
1128    /// (the largest power of two the Size is a multiple of).
1129    ///
1130    /// Note that all numbers are factors of 0
1131    #[inline]
1132    pub fn max_aligned_factor(size: Size) -> Align {
1133        Align { pow2: size.bytes().trailing_zeros() as u8 }
1134    }
1135
1136    /// Reduces Align to an aligned factor of `size`.
1137    #[inline]
1138    pub fn restrict_for_offset(self, size: Size) -> Align {
1139        self.min(Align::max_aligned_factor(size))
1140    }
1141}
1142
1143/// A pair of alignments, ABI-mandated and preferred.
1144///
1145/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1146/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1147/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1148/// and thus in practice the two values are almost always identical.
1149///
1150/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1151/// It is of effectively no consequence for layout in structs and on the stack.
1152#[derive(#[automatically_derived]
impl ::core::marker::Copy for AbiAlign { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AbiAlign {
    #[inline]
    fn clone(&self) -> AbiAlign {
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AbiAlign {
    #[inline]
    fn eq(&self, other: &AbiAlign) -> bool { self.abi == other.abi }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AbiAlign {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Align>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for AbiAlign {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.abi, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for AbiAlign {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field1_finish(f, "AbiAlign",
            "abi", &&self.abi)
    }
}Debug)]
1153#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for AbiAlign {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AbiAlign { abi: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1154pub struct AbiAlign {
1155    pub abi: Align,
1156}
1157
1158impl AbiAlign {
1159    #[inline]
1160    pub fn new(align: Align) -> AbiAlign {
1161        AbiAlign { abi: align }
1162    }
1163
1164    #[inline]
1165    pub fn min(self, other: AbiAlign) -> AbiAlign {
1166        AbiAlign { abi: self.abi.min(other.abi) }
1167    }
1168
1169    #[inline]
1170    pub fn max(self, other: AbiAlign) -> AbiAlign {
1171        AbiAlign { abi: self.abi.max(other.abi) }
1172    }
1173}
1174
1175impl Deref for AbiAlign {
1176    type Target = Align;
1177
1178    fn deref(&self) -> &Self::Target {
1179        &self.abi
1180    }
1181}
1182
1183/// Integers, also used for enum discriminants.
1184#[derive(#[automatically_derived]
impl ::core::marker::Copy for Integer { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Integer {
    #[inline]
    fn clone(&self) -> Integer { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Integer {
    #[inline]
    fn eq(&self, other: &Integer) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Integer {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Integer {
    #[inline]
    fn partial_cmp(&self, other: &Integer)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Integer {
    #[inline]
    fn cmp(&self, other: &Integer) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Integer {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Integer {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Integer::I8 => "I8",
                Integer::I16 => "I16",
                Integer::I32 => "I32",
                Integer::I64 => "I64",
                Integer::I128 => "I128",
            })
    }
}Debug)]
1185#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<__E: ::rustc_serialize::Encoder>
            ::rustc_serialize::Encodable<__E> for Integer {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        Integer::I8 => { 0usize }
                        Integer::I16 => { 1usize }
                        Integer::I32 => { 2usize }
                        Integer::I64 => { 3usize }
                        Integer::I128 => { 4usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };Encodable_NoContext, const _: () =
    {
        impl<__D: ::rustc_serialize::Decoder>
            ::rustc_serialize::Decodable<__D> for Integer {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { Integer::I8 }
                    1usize => { Integer::I16 }
                    2usize => { Integer::I32 }
                    3usize => { Integer::I64 }
                    4usize => { Integer::I128 }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `Integer`, expected 0..5, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable_NoContext, const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for Integer {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Integer::I8 => {}
                    Integer::I16 => {}
                    Integer::I32 => {}
                    Integer::I64 => {}
                    Integer::I128 => {}
                }
            }
        }
    };HashStable))]
1186pub enum Integer {
1187    I8,
1188    I16,
1189    I32,
1190    I64,
1191    I128,
1192}
1193
1194impl Integer {
1195    pub fn int_ty_str(self) -> &'static str {
1196        use Integer::*;
1197        match self {
1198            I8 => "i8",
1199            I16 => "i16",
1200            I32 => "i32",
1201            I64 => "i64",
1202            I128 => "i128",
1203        }
1204    }
1205
1206    pub fn uint_ty_str(self) -> &'static str {
1207        use Integer::*;
1208        match self {
1209            I8 => "u8",
1210            I16 => "u16",
1211            I32 => "u32",
1212            I64 => "u64",
1213            I128 => "u128",
1214        }
1215    }
1216
1217    #[inline]
1218    pub fn size(self) -> Size {
1219        use Integer::*;
1220        match self {
1221            I8 => Size::from_bytes(1),
1222            I16 => Size::from_bytes(2),
1223            I32 => Size::from_bytes(4),
1224            I64 => Size::from_bytes(8),
1225            I128 => Size::from_bytes(16),
1226        }
1227    }
1228
1229    /// Gets the Integer type from an IntegerType.
1230    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1231        let dl = cx.data_layout();
1232
1233        match ity {
1234            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1235            IntegerType::Fixed(x, _) => x,
1236        }
1237    }
1238
1239    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1240        use Integer::*;
1241        let dl = cx.data_layout();
1242
1243        AbiAlign::new(match self {
1244            I8 => dl.i8_align,
1245            I16 => dl.i16_align,
1246            I32 => dl.i32_align,
1247            I64 => dl.i64_align,
1248            I128 => dl.i128_align,
1249        })
1250    }
1251
1252    /// Returns the largest signed value that can be represented by this Integer.
1253    #[inline]
1254    pub fn signed_max(self) -> i128 {
1255        use Integer::*;
1256        match self {
1257            I8 => i8::MAX as i128,
1258            I16 => i16::MAX as i128,
1259            I32 => i32::MAX as i128,
1260            I64 => i64::MAX as i128,
1261            I128 => i128::MAX,
1262        }
1263    }
1264
1265    /// Returns the smallest signed value that can be represented by this Integer.
1266    #[inline]
1267    pub fn signed_min(self) -> i128 {
1268        use Integer::*;
1269        match self {
1270            I8 => i8::MIN as i128,
1271            I16 => i16::MIN as i128,
1272            I32 => i32::MIN as i128,
1273            I64 => i64::MIN as i128,
1274            I128 => i128::MIN,
1275        }
1276    }
1277
1278    /// Finds the smallest Integer type which can represent the signed value.
1279    #[inline]
1280    pub fn fit_signed(x: i128) -> Integer {
1281        use Integer::*;
1282        match x {
1283            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1284            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1285            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1286            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1287            _ => I128,
1288        }
1289    }
1290
1291    /// Finds the smallest Integer type which can represent the unsigned value.
1292    #[inline]
1293    pub fn fit_unsigned(x: u128) -> Integer {
1294        use Integer::*;
1295        match x {
1296            0..=0x0000_0000_0000_00ff => I8,
1297            0..=0x0000_0000_0000_ffff => I16,
1298            0..=0x0000_0000_ffff_ffff => I32,
1299            0..=0xffff_ffff_ffff_ffff => I64,
1300            _ => I128,
1301        }
1302    }
1303
1304    /// Finds the smallest integer with the given alignment.
1305    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1306        use Integer::*;
1307        let dl = cx.data_layout();
1308
1309        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1310            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1311        })
1312    }
1313
1314    /// Find the largest integer with the given alignment or less.
1315    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1316        use Integer::*;
1317        let dl = cx.data_layout();
1318
1319        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1320        for candidate in [I64, I32, I16] {
1321            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1322                return candidate;
1323            }
1324        }
1325        I8
1326    }
1327
1328    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1329    // `Integer` given some requirements.
1330    #[inline]
1331    pub fn from_size(size: Size) -> Result<Self, String> {
1332        match size.bits() {
1333            8 => Ok(Integer::I8),
1334            16 => Ok(Integer::I16),
1335            32 => Ok(Integer::I32),
1336            64 => Ok(Integer::I64),
1337            128 => Ok(Integer::I128),
1338            _ => Err(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("rust does not support integers with {0} bits",
                size.bits()))
    })format!("rust does not support integers with {} bits", size.bits())),
1339        }
1340    }
1341}
1342
1343/// Floating-point types.
1344#[derive(#[automatically_derived]
impl ::core::marker::Copy for Float { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Float {
    #[inline]
    fn clone(&self) -> Float { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Float {
    #[inline]
    fn eq(&self, other: &Float) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Float {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Float {
    #[inline]
    fn partial_cmp(&self, other: &Float)
        -> ::core::option::Option<::core::cmp::Ordering> {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Float {
    #[inline]
    fn cmp(&self, other: &Float) -> ::core::cmp::Ordering {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        ::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Float {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Float {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                Float::F16 => "F16",
                Float::F32 => "F32",
                Float::F64 => "F64",
                Float::F128 => "F128",
            })
    }
}Debug)]
1345#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for Float {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Float::F16 => {}
                    Float::F32 => {}
                    Float::F64 => {}
                    Float::F128 => {}
                }
            }
        }
    };HashStable))]
1346pub enum Float {
1347    F16,
1348    F32,
1349    F64,
1350    F128,
1351}
1352
1353impl Float {
1354    pub fn size(self) -> Size {
1355        use Float::*;
1356
1357        match self {
1358            F16 => Size::from_bits(16),
1359            F32 => Size::from_bits(32),
1360            F64 => Size::from_bits(64),
1361            F128 => Size::from_bits(128),
1362        }
1363    }
1364
1365    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1366        use Float::*;
1367        let dl = cx.data_layout();
1368
1369        AbiAlign::new(match self {
1370            F16 => dl.f16_align,
1371            F32 => dl.f32_align,
1372            F64 => dl.f64_align,
1373            F128 => dl.f128_align,
1374        })
1375    }
1376}
1377
1378/// Fundamental unit of memory access and layout.
1379#[derive(#[automatically_derived]
impl ::core::marker::Copy for Primitive { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Primitive {
    #[inline]
    fn clone(&self) -> Primitive {
        let _: ::core::clone::AssertParamIsClone<Integer>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        let _: ::core::clone::AssertParamIsClone<Float>;
        let _: ::core::clone::AssertParamIsClone<AddressSpace>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Primitive {
    #[inline]
    fn eq(&self, other: &Primitive) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Primitive::Int(__self_0, __self_1),
                    Primitive::Int(__arg1_0, __arg1_1)) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (Primitive::Float(__self_0), Primitive::Float(__arg1_0)) =>
                    __self_0 == __arg1_0,
                (Primitive::Pointer(__self_0), Primitive::Pointer(__arg1_0))
                    => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Primitive {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Integer>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<Float>;
        let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Primitive {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Primitive::Int(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Primitive::Float(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            Primitive::Pointer(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Primitive {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Primitive::Int(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Int",
                    __self_0, &__self_1),
            Primitive::Float(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Float",
                    &__self_0),
            Primitive::Pointer(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Pointer", &__self_0),
        }
    }
}Debug)]
1380#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for Primitive
            {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Primitive::Int(ref __binding_0, ref __binding_1) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Float(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Primitive::Pointer(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1381pub enum Primitive {
1382    /// The `bool` is the signedness of the `Integer` type.
1383    ///
1384    /// One would think we would not care about such details this low down,
1385    /// but some ABIs are described in terms of C types and ISAs where the
1386    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1387    /// a negative integer passed by zero-extension will appear positive in
1388    /// the callee, and most operations on it will produce the wrong values.
1389    Int(Integer, bool),
1390    Float(Float),
1391    Pointer(AddressSpace),
1392}
1393
1394impl Primitive {
1395    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1396        use Primitive::*;
1397        let dl = cx.data_layout();
1398
1399        match self {
1400            Int(i, _) => i.size(),
1401            Float(f) => f.size(),
1402            Pointer(a) => dl.pointer_size_in(a),
1403        }
1404    }
1405
1406    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1407        use Primitive::*;
1408        let dl = cx.data_layout();
1409
1410        match self {
1411            Int(i, _) => i.align(dl),
1412            Float(f) => f.align(dl),
1413            Pointer(a) => dl.pointer_align_in(a),
1414        }
1415    }
1416}
1417
1418/// Inclusive wrap-around range of valid values, that is, if
1419/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1420///
1421/// That is, for an i8 primitive, a range of `254..=2` means following
1422/// sequence:
1423///
1424///    254 (-2), 255 (-1), 0, 1, 2
1425///
1426/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1427#[derive(#[automatically_derived]
impl ::core::clone::Clone for WrappingRange {
    #[inline]
    fn clone(&self) -> WrappingRange {
        let _: ::core::clone::AssertParamIsClone<u128>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for WrappingRange { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for WrappingRange {
    #[inline]
    fn eq(&self, other: &WrappingRange) -> bool {
        self.start == other.start && self.end == other.end
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for WrappingRange {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for WrappingRange {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.start, state);
        ::core::hash::Hash::hash(&self.end, state)
    }
}Hash)]
1428#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for
            WrappingRange {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    WrappingRange { start: ref __binding_0, end: ref __binding_1
                        } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1429pub struct WrappingRange {
1430    pub start: u128,
1431    pub end: u128,
1432}
1433
1434impl WrappingRange {
1435    pub fn full(size: Size) -> Self {
1436        Self { start: 0, end: size.unsigned_int_max() }
1437    }
1438
1439    /// Returns `true` if `v` is contained in the range.
1440    #[inline(always)]
1441    pub fn contains(&self, v: u128) -> bool {
1442        if self.start <= self.end {
1443            self.start <= v && v <= self.end
1444        } else {
1445            self.start <= v || v <= self.end
1446        }
1447    }
1448
1449    /// Returns `true` if all the values in `other` are contained in this range,
1450    /// when the values are considered as having width `size`.
1451    #[inline(always)]
1452    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1453        if self.is_full_for(size) {
1454            true
1455        } else {
1456            let trunc = |x| size.truncate(x);
1457
1458            let delta = self.start;
1459            let max = trunc(self.end.wrapping_sub(delta));
1460
1461            let other_start = trunc(other.start.wrapping_sub(delta));
1462            let other_end = trunc(other.end.wrapping_sub(delta));
1463
1464            // Having shifted both input ranges by `delta`, now we only need to check
1465            // whether `0..=max` contains `other_start..=other_end`, which can only
1466            // happen if the other doesn't wrap since `self` isn't everything.
1467            (other_start <= other_end) && (other_end <= max)
1468        }
1469    }
1470
1471    /// Returns `self` with replaced `start`
1472    #[inline(always)]
1473    fn with_start(mut self, start: u128) -> Self {
1474        self.start = start;
1475        self
1476    }
1477
1478    /// Returns `self` with replaced `end`
1479    #[inline(always)]
1480    fn with_end(mut self, end: u128) -> Self {
1481        self.end = end;
1482        self
1483    }
1484
1485    /// Returns `true` if `size` completely fills the range.
1486    ///
1487    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1488    /// Niche calculations can produce full ranges which are not the canonical one;
1489    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1490    #[inline]
1491    fn is_full_for(&self, size: Size) -> bool {
1492        let max_value = size.unsigned_int_max();
1493        if true {
    if !(self.start <= max_value && self.end <= max_value) {
        ::core::panicking::panic("assertion failed: self.start <= max_value && self.end <= max_value")
    };
};debug_assert!(self.start <= max_value && self.end <= max_value);
1494        self.start == (self.end.wrapping_add(1) & max_value)
1495    }
1496
1497    /// Checks whether this range is considered non-wrapping when the values are
1498    /// interpreted as *unsigned* numbers of width `size`.
1499    ///
1500    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1501    /// and `Err(..)` if the range is full so it depends how you think about it.
1502    #[inline]
1503    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1504        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1505    }
1506
1507    /// Checks whether this range is considered non-wrapping when the values are
1508    /// interpreted as *signed* numbers of width `size`.
1509    ///
1510    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1511    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1512    ///
1513    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1514    /// and `Err(..)` if the range is full so it depends how you think about it.
1515    #[inline]
1516    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1517        if self.is_full_for(size) {
1518            Err(..)
1519        } else {
1520            let start: i128 = size.sign_extend(self.start);
1521            let end: i128 = size.sign_extend(self.end);
1522            Ok(start <= end)
1523        }
1524    }
1525}
1526
1527impl fmt::Debug for WrappingRange {
1528    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1529        if self.start > self.end {
1530            fmt.write_fmt(format_args!("(..={0}) | ({1}..)", self.end, self.start))write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1531        } else {
1532            fmt.write_fmt(format_args!("{0}..={1}", self.start, self.end))write!(fmt, "{}..={}", self.start, self.end)?;
1533        }
1534        Ok(())
1535    }
1536}
1537
1538/// Information about one scalar component of a Rust type.
1539#[derive(#[automatically_derived]
impl ::core::clone::Clone for Scalar {
    #[inline]
    fn clone(&self) -> Scalar {
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Scalar { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Scalar {
    #[inline]
    fn eq(&self, other: &Scalar) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Scalar::Initialized { value: __self_0, valid_range: __self_1
                    }, Scalar::Initialized {
                    value: __arg1_0, valid_range: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (Scalar::Union { value: __self_0 }, Scalar::Union {
                    value: __arg1_0 }) => __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Scalar {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Scalar {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            Scalar::Union { value: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Scalar {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Initialized", "value", __self_0, "valid_range", &__self_1),
            Scalar::Union { value: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f, "Union",
                    "value", &__self_0),
        }
    }
}Debug)]
1540#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for Scalar {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Scalar::Initialized {
                        value: ref __binding_0, valid_range: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    Scalar::Union { value: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1541pub enum Scalar {
1542    Initialized {
1543        value: Primitive,
1544
1545        // FIXME(eddyb) always use the shortest range, e.g., by finding
1546        // the largest space between two consecutive valid values and
1547        // taking everything else as the (shortest) valid range.
1548        valid_range: WrappingRange,
1549    },
1550    Union {
1551        /// Even for unions, we need to use the correct registers for the kind of
1552        /// values inside the union, so we keep the `Primitive` type around. We
1553        /// also use it to compute the size of the scalar.
1554        /// However, unions never have niches and even allow undef,
1555        /// so there is no `valid_range`.
1556        value: Primitive,
1557    },
1558}
1559
1560impl Scalar {
1561    #[inline]
1562    pub fn is_bool(&self) -> bool {
1563        use Integer::*;
1564        #[allow(non_exhaustive_omitted_patterns)] match self {
    Scalar::Initialized {
        value: Primitive::Int(I8, false),
        valid_range: WrappingRange { start: 0, end: 1 } } => true,
    _ => false,
}matches!(
1565            self,
1566            Scalar::Initialized {
1567                value: Primitive::Int(I8, false),
1568                valid_range: WrappingRange { start: 0, end: 1 }
1569            }
1570        )
1571    }
1572
1573    /// Get the primitive representation of this type, ignoring the valid range and whether the
1574    /// value is allowed to be undefined (due to being a union).
1575    pub fn primitive(&self) -> Primitive {
1576        match *self {
1577            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1578        }
1579    }
1580
1581    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1582        self.primitive().align(cx)
1583    }
1584
1585    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1586        self.primitive().size(cx)
1587    }
1588
1589    #[inline]
1590    pub fn to_union(&self) -> Self {
1591        Self::Union { value: self.primitive() }
1592    }
1593
1594    #[inline]
1595    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1596        match *self {
1597            Scalar::Initialized { valid_range, .. } => valid_range,
1598            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1599        }
1600    }
1601
1602    #[inline]
1603    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1604    /// union.
1605    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1606        match self {
1607            Scalar::Initialized { valid_range, .. } => valid_range,
1608            Scalar::Union { .. } => {
    ::core::panicking::panic_fmt(format_args!("cannot change the valid range of a union"));
}panic!("cannot change the valid range of a union"),
1609        }
1610    }
1611
1612    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1613    /// layout.
1614    #[inline]
1615    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1616        match *self {
1617            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1618            Scalar::Union { .. } => true,
1619        }
1620    }
1621
1622    /// Returns `true` if this type can be left uninit.
1623    #[inline]
1624    pub fn is_uninit_valid(&self) -> bool {
1625        match *self {
1626            Scalar::Initialized { .. } => false,
1627            Scalar::Union { .. } => true,
1628        }
1629    }
1630
1631    /// Returns `true` if this is a signed integer scalar
1632    #[inline]
1633    pub fn is_signed(&self) -> bool {
1634        match self.primitive() {
1635            Primitive::Int(_, signed) => signed,
1636            _ => false,
1637        }
1638    }
1639}
1640
1641// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1642/// Describes how the fields of a type are located in memory.
1643#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    FieldsShape<FieldIdx> {
    #[inline]
    fn eq(&self, other: &FieldsShape<FieldIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (FieldsShape::Union(__self_0), FieldsShape::Union(__arg1_0))
                    => __self_0 == __arg1_0,
                (FieldsShape::Array { stride: __self_0, count: __self_1 },
                    FieldsShape::Array { stride: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (FieldsShape::Arbitrary {
                    offsets: __self_0, in_memory_order: __self_1 },
                    FieldsShape::Arbitrary {
                    offsets: __arg1_0, in_memory_order: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    FieldsShape<FieldIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<NonZeroUsize>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<FieldIdx, Size>>;
        let _: ::core::cmp::AssertParamIsEq<IndexVec<u32, FieldIdx>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    FieldsShape<FieldIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            FieldsShape::Union(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            FieldsShape::Array { stride: __self_0, count: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    FieldsShape<FieldIdx> {
    #[inline]
    fn clone(&self) -> FieldsShape<FieldIdx> {
        match self {
            FieldsShape::Primitive => FieldsShape::Primitive,
            FieldsShape::Union(__self_0) =>
                FieldsShape::Union(::core::clone::Clone::clone(__self_0)),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                FieldsShape::Array {
                    stride: ::core::clone::Clone::clone(__self_0),
                    count: ::core::clone::Clone::clone(__self_1),
                },
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                FieldsShape::Arbitrary {
                    offsets: ::core::clone::Clone::clone(__self_0),
                    in_memory_order: ::core::clone::Clone::clone(__self_1),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    FieldsShape<FieldIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            FieldsShape::Primitive =>
                ::core::fmt::Formatter::write_str(f, "Primitive"),
            FieldsShape::Union(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Union",
                    &__self_0),
            FieldsShape::Array { stride: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Array",
                    "stride", __self_0, "count", &__self_1),
            FieldsShape::Arbitrary {
                offsets: __self_0, in_memory_order: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "Arbitrary", "offsets", __self_0, "in_memory_order",
                    &__self_1),
        }
    }
}Debug)]
1644#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx> ::rustc_data_structures::stable_hasher::HashStable
            for FieldsShape<FieldIdx> where
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    FieldsShape::Primitive => {}
                    FieldsShape::Union(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Array {
                        stride: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    FieldsShape::Arbitrary {
                        offsets: ref __binding_0, in_memory_order: ref __binding_1 }
                        => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1645pub enum FieldsShape<FieldIdx: Idx> {
1646    /// Scalar primitives and `!`, which never have fields.
1647    Primitive,
1648
1649    /// All fields start at no offset. The `usize` is the field count.
1650    Union(NonZeroUsize),
1651
1652    /// Array/vector-like placement, with all fields of identical types.
1653    Array { stride: Size, count: u64 },
1654
1655    /// Struct-like placement, with precomputed offsets.
1656    ///
1657    /// Fields are guaranteed to not overlap, but note that gaps
1658    /// before, between and after all the fields are NOT always
1659    /// padding, and as such their contents may not be discarded.
1660    /// For example, enum variants leave a gap at the start,
1661    /// where the discriminant field in the enum layout goes.
1662    Arbitrary {
1663        /// Offsets for the first byte of each field,
1664        /// ordered to match the source definition order.
1665        /// This vector does not go in increasing order.
1666        // FIXME(eddyb) use small vector optimization for the common case.
1667        offsets: IndexVec<FieldIdx, Size>,
1668
1669        /// Maps memory order field indices to source order indices,
1670        /// depending on how the fields were reordered (if at all).
1671        /// This is a permutation, with both the source order and the
1672        /// memory order using the same (0..n) index ranges.
1673        ///
1674        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1675        // FIXME(camlorn) also consider small vector optimization here.
1676        in_memory_order: IndexVec<u32, FieldIdx>,
1677    },
1678}
1679
1680impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1681    #[inline]
1682    pub fn count(&self) -> usize {
1683        match *self {
1684            FieldsShape::Primitive => 0,
1685            FieldsShape::Union(count) => count.get(),
1686            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1687            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1688        }
1689    }
1690
1691    #[inline]
1692    pub fn offset(&self, i: usize) -> Size {
1693        match *self {
1694            FieldsShape::Primitive => {
1695                {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("FieldsShape::offset: `Primitive`s have no fields")));
}unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1696            }
1697            FieldsShape::Union(count) => {
1698                if !(i < count.get()) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of union with {1} fields",
                i, count));
    }
};assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1699                Size::ZERO
1700            }
1701            FieldsShape::Array { stride, count } => {
1702                let i = u64::try_from(i).unwrap();
1703                if !(i < count) {
    {
        ::core::panicking::panic_fmt(format_args!("tried to access field {0} of array with {1} fields",
                i, count));
    }
};assert!(i < count, "tried to access field {i} of array with {count} fields");
1704                stride * i
1705            }
1706            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1707        }
1708    }
1709
1710    /// Gets source indices of the fields by increasing offsets.
1711    #[inline]
1712    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1713        // Primitives don't really have fields in the way that structs do,
1714        // but having this return an empty iterator for them is unhelpful
1715        // since that makes them look kinda like ZSTs, which they're not.
1716        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1717
1718        (0..pseudofield_count).map(move |i| match self {
1719            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1720            FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
1721        })
1722    }
1723}
1724
1725/// An identifier that specifies the address space that some operation
1726/// should operate on. Special address spaces have an effect on code generation,
1727/// depending on the target and the address spaces it implements.
1728#[derive(#[automatically_derived]
impl ::core::marker::Copy for AddressSpace { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AddressSpace {
    #[inline]
    fn clone(&self) -> AddressSpace {
        let _: ::core::clone::AssertParamIsClone<u32>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AddressSpace {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f, "AddressSpace",
            &&self.0)
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for AddressSpace {
    #[inline]
    fn eq(&self, other: &AddressSpace) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AddressSpace {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u32>;
    }
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for AddressSpace {
    #[inline]
    fn partial_cmp(&self, other: &AddressSpace)
        -> ::core::option::Option<::core::cmp::Ordering> {
        ::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
    }
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for AddressSpace {
    #[inline]
    fn cmp(&self, other: &AddressSpace) -> ::core::cmp::Ordering {
        ::core::cmp::Ord::cmp(&self.0, &other.0)
    }
}Ord, #[automatically_derived]
impl ::core::hash::Hash for AddressSpace {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash)]
1729#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for
            AddressSpace {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    AddressSpace(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1730pub struct AddressSpace(pub u32);
1731
1732impl AddressSpace {
1733    /// LLVM's `0` address space.
1734    pub const ZERO: Self = AddressSpace(0);
1735    /// The address space for workgroup memory on nvptx and amdgpu.
1736    /// See e.g. the `gpu_launch_sized_workgroup_mem` intrinsic for details.
1737    pub const GPU_WORKGROUP: Self = AddressSpace(3);
1738}
1739
1740/// How many scalable vectors are in a `BackendRepr::ScalableVector`?
1741#[derive(#[automatically_derived]
impl ::core::clone::Clone for NumScalableVectors {
    #[inline]
    fn clone(&self) -> NumScalableVectors {
        let _: ::core::clone::AssertParamIsClone<u8>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for NumScalableVectors { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for NumScalableVectors {
    #[inline]
    fn eq(&self, other: &NumScalableVectors) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for NumScalableVectors {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<u8>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for NumScalableVectors {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.0, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for NumScalableVectors {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_tuple_field1_finish(f,
            "NumScalableVectors", &&self.0)
    }
}Debug)]
1742#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for
            NumScalableVectors {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    NumScalableVectors(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1743pub struct NumScalableVectors(pub u8);
1744
1745impl NumScalableVectors {
1746    /// Returns a `NumScalableVector` for a non-tuple scalable vector (e.g. a single vector).
1747    pub fn for_non_tuple() -> Self {
1748        NumScalableVectors(1)
1749    }
1750
1751    // Returns `NumScalableVectors` for values of two through eight, which are a valid number of
1752    // fields for a tuple of scalable vectors to have. `1` is a valid value of `NumScalableVectors`
1753    // but not for a tuple which would have a field count.
1754    pub fn from_field_count(count: usize) -> Option<Self> {
1755        match count {
1756            2..8 => Some(NumScalableVectors(count as u8)),
1757            _ => None,
1758        }
1759    }
1760}
1761
1762#[cfg(feature = "nightly")]
1763impl IntoDiagArg for NumScalableVectors {
1764    fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue {
1765        DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 {
1766            0 => {
    ::core::panicking::panic_fmt(format_args!("`NumScalableVectors(0)` is illformed"));
}panic!("`NumScalableVectors(0)` is illformed"),
1767            1 => "one",
1768            2 => "two",
1769            3 => "three",
1770            4 => "four",
1771            5 => "five",
1772            6 => "six",
1773            7 => "seven",
1774            8 => "eight",
1775            _ => {
    ::core::panicking::panic_fmt(format_args!("`NumScalableVectors(N)` for N>8 is illformed"));
}panic!("`NumScalableVectors(N)` for N>8 is illformed"),
1776        }))
1777    }
1778}
1779
1780/// The way we represent values to the backend
1781///
1782/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1783/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1784/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1785/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1786/// how the value will be lowered to the calling convention, in itself.
1787///
1788/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1789/// and larger values will usually prefer to be represented as memory.
1790#[derive(#[automatically_derived]
impl ::core::clone::Clone for BackendRepr {
    #[inline]
    fn clone(&self) -> BackendRepr {
        let _: ::core::clone::AssertParamIsClone<Scalar>;
        let _: ::core::clone::AssertParamIsClone<u64>;
        let _: ::core::clone::AssertParamIsClone<NumScalableVectors>;
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BackendRepr { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BackendRepr {
    #[inline]
    fn eq(&self, other: &BackendRepr) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (BackendRepr::Scalar(__self_0), BackendRepr::Scalar(__arg1_0))
                    => __self_0 == __arg1_0,
                (BackendRepr::ScalarPair(__self_0, __self_1),
                    BackendRepr::ScalarPair(__arg1_0, __arg1_1)) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                (BackendRepr::SimdScalableVector {
                    element: __self_0,
                    count: __self_1,
                    number_of_vectors: __self_2 },
                    BackendRepr::SimdScalableVector {
                    element: __arg1_0,
                    count: __arg1_1,
                    number_of_vectors: __arg1_2 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0 &&
                        __self_2 == __arg1_2,
                (BackendRepr::SimdVector { element: __self_0, count: __self_1
                    }, BackendRepr::SimdVector {
                    element: __arg1_0, count: __arg1_1 }) =>
                    __self_1 == __arg1_1 && __self_0 == __arg1_0,
                (BackendRepr::Memory { sized: __self_0 },
                    BackendRepr::Memory { sized: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for BackendRepr {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<u64>;
        let _: ::core::cmp::AssertParamIsEq<NumScalableVectors>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for BackendRepr {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::hash::Hash::hash(__self_0, state),
            BackendRepr::ScalarPair(__self_0, __self_1) => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::SimdScalableVector {
                element: __self_0,
                count: __self_1,
                number_of_vectors: __self_2 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state)
            }
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
        }
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for BackendRepr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            BackendRepr::Scalar(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Scalar",
                    &__self_0),
            BackendRepr::ScalarPair(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "ScalarPair", __self_0, &__self_1),
            BackendRepr::SimdScalableVector {
                element: __self_0,
                count: __self_1,
                number_of_vectors: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f,
                    "SimdScalableVector", "element", __self_0, "count",
                    __self_1, "number_of_vectors", &__self_2),
            BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f,
                    "SimdVector", "element", __self_0, "count", &__self_1),
            BackendRepr::Memory { sized: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Memory", "sized", &__self_0),
        }
    }
}Debug)]
1791#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for
            BackendRepr {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    BackendRepr::Scalar(ref __binding_0) => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::ScalarPair(ref __binding_0, ref __binding_1) =>
                        {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::SimdScalableVector {
                        element: ref __binding_0,
                        count: ref __binding_1,
                        number_of_vectors: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::SimdVector {
                        element: ref __binding_0, count: ref __binding_1 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                    }
                    BackendRepr::Memory { sized: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1792pub enum BackendRepr {
1793    Scalar(Scalar),
1794    ScalarPair(Scalar, Scalar),
1795    SimdScalableVector {
1796        element: Scalar,
1797        count: u64,
1798        number_of_vectors: NumScalableVectors,
1799    },
1800    SimdVector {
1801        element: Scalar,
1802        count: u64,
1803    },
1804    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1805    Memory {
1806        /// If true, the size is exact, otherwise it's only a lower bound.
1807        sized: bool,
1808    },
1809}
1810
1811impl BackendRepr {
1812    /// Returns `true` if the layout corresponds to an unsized type.
1813    #[inline]
1814    pub fn is_unsized(&self) -> bool {
1815        match *self {
1816            BackendRepr::Scalar(_)
1817            | BackendRepr::ScalarPair(..)
1818            // FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the
1819            // `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is
1820            // fully implemented, scalable vectors will remain `Sized`, they just won't be
1821            // `const Sized` - whether `is_unsized` continues to return `false` at that point will
1822            // need to be revisited and will depend on what `is_unsized` is used for.
1823            | BackendRepr::SimdScalableVector { .. }
1824            | BackendRepr::SimdVector { .. } => false,
1825            BackendRepr::Memory { sized } => !sized,
1826        }
1827    }
1828
1829    #[inline]
1830    pub fn is_sized(&self) -> bool {
1831        !self.is_unsized()
1832    }
1833
1834    /// Returns `true` if this is a single signed integer scalar.
1835    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1836    #[inline]
1837    pub fn is_signed(&self) -> bool {
1838        match self {
1839            BackendRepr::Scalar(scal) => scal.is_signed(),
1840            _ => {
    ::core::panicking::panic_fmt(format_args!("`is_signed` on non-scalar ABI {0:?}",
            self));
}panic!("`is_signed` on non-scalar ABI {self:?}"),
1841        }
1842    }
1843
1844    /// Returns `true` if this is a scalar type
1845    #[inline]
1846    pub fn is_scalar(&self) -> bool {
1847        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(_) => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(_))
1848    }
1849
1850    /// Returns `true` if this is a bool
1851    #[inline]
1852    pub fn is_bool(&self) -> bool {
1853        #[allow(non_exhaustive_omitted_patterns)] match *self {
    BackendRepr::Scalar(s) if s.is_bool() => true,
    _ => false,
}matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1854    }
1855
1856    /// The psABI alignment for a `Scalar` or `ScalarPair`
1857    ///
1858    /// `None` for other variants.
1859    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1860        match *self {
1861            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1862            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1863            // The align of a Vector can vary in surprising ways
1864            BackendRepr::SimdVector { .. }
1865            | BackendRepr::Memory { .. }
1866            | BackendRepr::SimdScalableVector { .. } => None,
1867        }
1868    }
1869
1870    /// The psABI size for a `Scalar` or `ScalarPair`
1871    ///
1872    /// `None` for other variants
1873    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1874        match *self {
1875            // No padding in scalars.
1876            BackendRepr::Scalar(s) => Some(s.size(cx)),
1877            // May have some padding between the pair.
1878            BackendRepr::ScalarPair(s1, s2) => {
1879                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1880                let size = (field2_offset + s2.size(cx)).align_to(
1881                    self.scalar_align(cx)
1882                        // We absolutely must have an answer here or everything is FUBAR.
1883                        .unwrap(),
1884                );
1885                Some(size)
1886            }
1887            // The size of a Vector can vary in surprising ways
1888            BackendRepr::SimdVector { .. }
1889            | BackendRepr::Memory { .. }
1890            | BackendRepr::SimdScalableVector { .. } => None,
1891        }
1892    }
1893
1894    /// Discard validity range information and allow undef.
1895    pub fn to_union(&self) -> Self {
1896        match *self {
1897            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1898            BackendRepr::ScalarPair(s1, s2) => {
1899                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1900            }
1901            BackendRepr::SimdVector { element, count } => {
1902                BackendRepr::SimdVector { element: element.to_union(), count }
1903            }
1904            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1905            BackendRepr::SimdScalableVector { element, count, number_of_vectors } => {
1906                BackendRepr::SimdScalableVector {
1907                    element: element.to_union(),
1908                    count,
1909                    number_of_vectors,
1910                }
1911            }
1912        }
1913    }
1914
1915    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1916        match (self, other) {
1917            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1918            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1919            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1920            (
1921                BackendRepr::SimdVector { element: element_l, count: count_l },
1922                BackendRepr::SimdVector { element: element_r, count: count_r },
1923            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1924            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1925                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1926            }
1927            // Everything else must be strictly identical.
1928            _ => self == other,
1929        }
1930    }
1931}
1932
1933// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1934#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &Variants<FieldIdx, VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (Variants::Single { index: __self_0 }, Variants::Single {
                    index: __arg1_0 }) => __self_0 == __arg1_0,
                (Variants::Multiple {
                    tag: __self_0,
                    tag_encoding: __self_1,
                    tag_field: __self_2,
                    variants: __self_3 }, Variants::Multiple {
                    tag: __arg1_0,
                    tag_encoding: __arg1_1,
                    tag_field: __arg1_2,
                    variants: __arg1_3 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1 &&
                            __self_2 == __arg1_2 && __self_3 == __arg1_3,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for Variants<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<Scalar>;
        let _: ::core::cmp::AssertParamIsEq<TagEncoding<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<FieldIdx>;
        let _:
                ::core::cmp::AssertParamIsEq<IndexVec<VariantIdx,
                LayoutData<FieldIdx, VariantIdx>>>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            Variants::Single { index: __self_0 } =>
                ::core::hash::Hash::hash(__self_0, state),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state);
                ::core::hash::Hash::hash(__self_3, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> Variants<FieldIdx, VariantIdx> {
        match self {
            Variants::Empty => Variants::Empty,
            Variants::Single { index: __self_0 } =>
                Variants::Single {
                    index: ::core::clone::Clone::clone(__self_0),
                },
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                Variants::Multiple {
                    tag: ::core::clone::Clone::clone(__self_0),
                    tag_encoding: ::core::clone::Clone::clone(__self_1),
                    tag_field: ::core::clone::Clone::clone(__self_2),
                    variants: ::core::clone::Clone::clone(__self_3),
                },
        }
    }
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx, VariantIdx: ::core::fmt::Debug + Idx>
    ::core::fmt::Debug for Variants<FieldIdx, VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            Variants::Empty => ::core::fmt::Formatter::write_str(f, "Empty"),
            Variants::Single { index: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Single", "index", &__self_0),
            Variants::Multiple {
                tag: __self_0,
                tag_encoding: __self_1,
                tag_field: __self_2,
                variants: __self_3 } =>
                ::core::fmt::Formatter::debug_struct_field4_finish(f,
                    "Multiple", "tag", __self_0, "tag_encoding", __self_1,
                    "tag_field", __self_2, "variants", &__self_3),
        }
    }
}Debug)]
1935#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx>
            ::rustc_data_structures::stable_hasher::HashStable for
            Variants<FieldIdx, VariantIdx> where
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable,
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    Variants::Empty => {}
                    Variants::Single { index: ref __binding_0 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                    }
                    Variants::Multiple {
                        tag: ref __binding_0,
                        tag_encoding: ref __binding_1,
                        tag_field: ref __binding_2,
                        variants: ref __binding_3 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1936pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1937    /// A type with no valid variants. Must be uninhabited.
1938    Empty,
1939
1940    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1941    Single {
1942        /// Always `0` for types that cannot have multiple variants.
1943        index: VariantIdx,
1944    },
1945
1946    /// Enum-likes with more than one variant: each variant comes with
1947    /// a *discriminant* (usually the same as the variant index but the user can
1948    /// assign explicit discriminant values). That discriminant is encoded
1949    /// as a *tag* on the machine. The layout of each variant is
1950    /// a struct, and they all have space reserved for the tag.
1951    /// For enums, the tag is the sole field of the layout.
1952    Multiple {
1953        tag: Scalar,
1954        tag_encoding: TagEncoding<VariantIdx>,
1955        tag_field: FieldIdx,
1956        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1957    },
1958}
1959
1960// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1961#[derive(#[automatically_derived]
impl<VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    TagEncoding<VariantIdx> {
    #[inline]
    fn eq(&self, other: &TagEncoding<VariantIdx>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (TagEncoding::Niche {
                    untagged_variant: __self_0,
                    niche_variants: __self_1,
                    niche_start: __self_2 }, TagEncoding::Niche {
                    untagged_variant: __arg1_0,
                    niche_variants: __arg1_1,
                    niche_start: __arg1_2 }) =>
                    __self_2 == __arg1_2 && __self_0 == __arg1_0 &&
                        __self_1 == __arg1_1,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<VariantIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
    TagEncoding<VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
        let _: ::core::cmp::AssertParamIsEq<RangeInclusive<VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<u128>;
    }
}Eq, #[automatically_derived]
impl<VariantIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
    TagEncoding<VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        ::core::hash::Hash::hash(&__self_discr, state);
        match self {
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } => {
                ::core::hash::Hash::hash(__self_0, state);
                ::core::hash::Hash::hash(__self_1, state);
                ::core::hash::Hash::hash(__self_2, state)
            }
            _ => {}
        }
    }
}Hash, #[automatically_derived]
impl<VariantIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
    TagEncoding<VariantIdx> {
    #[inline]
    fn clone(&self) -> TagEncoding<VariantIdx> {
        match self {
            TagEncoding::Direct => TagEncoding::Direct,
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                TagEncoding::Niche {
                    untagged_variant: ::core::clone::Clone::clone(__self_0),
                    niche_variants: ::core::clone::Clone::clone(__self_1),
                    niche_start: ::core::clone::Clone::clone(__self_2),
                },
        }
    }
}Clone, #[automatically_derived]
impl<VariantIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
    TagEncoding<VariantIdx> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            TagEncoding::Direct =>
                ::core::fmt::Formatter::write_str(f, "Direct"),
            TagEncoding::Niche {
                untagged_variant: __self_0,
                niche_variants: __self_1,
                niche_start: __self_2 } =>
                ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
                    "untagged_variant", __self_0, "niche_variants", __self_1,
                    "niche_start", &__self_2),
        }
    }
}Debug)]
1962#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<VariantIdx: Idx>
            ::rustc_data_structures::stable_hasher::HashStable for
            TagEncoding<VariantIdx> where
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
                match *self {
                    TagEncoding::Direct => {}
                    TagEncoding::Niche {
                        untagged_variant: ref __binding_0,
                        niche_variants: ref __binding_1,
                        niche_start: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
1963pub enum TagEncoding<VariantIdx: Idx> {
1964    /// The tag directly stores the discriminant, but possibly with a smaller layout
1965    /// (so converting the tag to the discriminant can require sign extension).
1966    Direct,
1967
1968    /// Niche (values invalid for a type) encoding the discriminant.
1969    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1970    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1971    ///
1972    /// The variant `untagged_variant` contains a niche at an arbitrary
1973    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1974    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1975    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1976    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1977    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1978    /// query implementation).
1979    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1980    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1981    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
1982    ///
1983    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
1984    /// `None` is the null pointer in the second tuple field, and
1985    /// `Some` is the identity function (with a non-null reference)
1986    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
1987    ///
1988    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1989    /// range cannot be represented; they must be uninhabited.
1990    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
1991    Niche {
1992        untagged_variant: VariantIdx,
1993        /// This range *may* contain `untagged_variant` or uninhabited variants;
1994        /// these are then just "dead values" and not used to encode anything.
1995        niche_variants: RangeInclusive<VariantIdx>,
1996        /// This is inbounds of the type of the niche field
1997        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1998        niche_start: u128,
1999    },
2000}
2001
2002#[derive(#[automatically_derived]
impl ::core::clone::Clone for Niche {
    #[inline]
    fn clone(&self) -> Niche {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Primitive>;
        let _: ::core::clone::AssertParamIsClone<WrappingRange>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Niche { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Niche {
    #[inline]
    fn eq(&self, other: &Niche) -> bool {
        self.offset == other.offset && self.value == other.value &&
            self.valid_range == other.valid_range
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Niche {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Primitive>;
        let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
    }
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Niche {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.offset, state);
        ::core::hash::Hash::hash(&self.value, state);
        ::core::hash::Hash::hash(&self.valid_range, state)
    }
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Niche {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
            "offset", &self.offset, "value", &self.value, "valid_range",
            &&self.valid_range)
    }
}Debug)]
2003#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl ::rustc_data_structures::stable_hasher::HashStable for Niche {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    Niche {
                        offset: ref __binding_0,
                        value: ref __binding_1,
                        valid_range: ref __binding_2 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
2004pub struct Niche {
2005    pub offset: Size,
2006    pub value: Primitive,
2007    pub valid_range: WrappingRange,
2008}
2009
2010impl Niche {
2011    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
2012        let Scalar::Initialized { value, valid_range } = scalar else { return None };
2013        let niche = Niche { offset, value, valid_range };
2014        if niche.available(cx) > 0 { Some(niche) } else { None }
2015    }
2016
2017    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
2018        let Self { value, valid_range: v, .. } = *self;
2019        let size = value.size(cx);
2020        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
2021        let max_value = size.unsigned_int_max();
2022
2023        // Find out how many values are outside the valid range.
2024        let niche = v.end.wrapping_add(1)..v.start;
2025        niche.end.wrapping_sub(niche.start) & max_value
2026    }
2027
2028    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
2029        if !(count > 0) { ::core::panicking::panic("assertion failed: count > 0") };assert!(count > 0);
2030
2031        let Self { value, valid_range: v, .. } = *self;
2032        let size = value.size(cx);
2033        if !(size.bits() <= 128) {
    ::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
2034        let max_value = size.unsigned_int_max();
2035
2036        let available = v.start.wrapping_sub(v.end).wrapping_sub(1) & max_value;
2037        if count > available {
2038            return None;
2039        }
2040
2041        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
2042        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
2043        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
2044        // and always taking the shortest path to niche zero. Having `None` in niche zero can
2045        // enable some special optimizations.
2046        //
2047        // Bound selection criteria:
2048        // 1. Select closest to zero given wrapping semantics.
2049        // 2. Avoid moving past zero if possible.
2050        //
2051        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
2052        // since they have to fit perfectly. If niche zero is already reserved, the selection of
2053        // bounds are of little interest.
2054        let move_start = |v: WrappingRange| {
2055            let start = v.start.wrapping_sub(count) & max_value;
2056            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
2057        };
2058        let move_end = |v: WrappingRange| {
2059            let start = v.end.wrapping_add(1) & max_value;
2060            let end = v.end.wrapping_add(count) & max_value;
2061            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
2062        };
2063        let distance_end_zero = max_value - v.end;
2064        // FIXME: this ought to work for `bool` too, but that seems to be hitting a miscompilation
2065        // <https://github.com/rust-lang/rust/pull/155473#issuecomment-4302036343>
2066        let is_bool = size.bytes() == 1 && v == WrappingRange { start: 0, end: 1 };
2067        if count == 1 && !is_bool {
2068            // We only need one, so just pick the one closest to zero.
2069            // Not only does that obviously use zero if it's possible, but it also
2070            // simplifies testing things like `Option<char>`, since looking for `-1`
2071            // is easier than looking for `1114112` (and matches clang's `WEOF`).
2072            let next_up = size.sign_extend(v.end.wrapping_add(1)).unsigned_abs();
2073            let next_down = size.sign_extend(v.start.wrapping_sub(1)).unsigned_abs();
2074            if next_down <= next_up { move_start(v) } else { move_end(v) }
2075        } else if v.start > v.end {
2076            // zero is unavailable because wrapping occurs
2077            move_end(v)
2078        } else if v.start <= distance_end_zero {
2079            if count <= v.start {
2080                move_start(v)
2081            } else {
2082                // moved past zero, use other bound
2083                move_end(v)
2084            }
2085        } else {
2086            let end = v.end.wrapping_add(count) & max_value;
2087            let overshot_zero = (1..=v.end).contains(&end);
2088            if overshot_zero {
2089                // moved past zero, use other bound
2090                move_start(v)
2091            } else {
2092                move_end(v)
2093            }
2094        }
2095    }
2096}
2097
2098// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2099#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
    VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
    LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn eq(&self, other: &LayoutData<FieldIdx, VariantIdx>) -> bool {
        self.uninhabited == other.uninhabited && self.fields == other.fields
                                        && self.variants == other.variants &&
                                    self.backend_repr == other.backend_repr &&
                                self.largest_niche == other.largest_niche &&
                            self.align == other.align && self.size == other.size &&
                    self.max_repr_align == other.max_repr_align &&
                self.unadjusted_abi_align == other.unadjusted_abi_align &&
            self.randomization_seed == other.randomization_seed
    }
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
    ::core::cmp::Eq for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<FieldsShape<FieldIdx>>;
        let _: ::core::cmp::AssertParamIsEq<Variants<FieldIdx, VariantIdx>>;
        let _: ::core::cmp::AssertParamIsEq<BackendRepr>;
        let _: ::core::cmp::AssertParamIsEq<Option<Niche>>;
        let _: ::core::cmp::AssertParamIsEq<bool>;
        let _: ::core::cmp::AssertParamIsEq<AbiAlign>;
        let _: ::core::cmp::AssertParamIsEq<Size>;
        let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
        let _: ::core::cmp::AssertParamIsEq<Align>;
        let _: ::core::cmp::AssertParamIsEq<Hash64>;
    }
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
    ::core::hash::Hash for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
        ::core::hash::Hash::hash(&self.fields, state);
        ::core::hash::Hash::hash(&self.variants, state);
        ::core::hash::Hash::hash(&self.backend_repr, state);
        ::core::hash::Hash::hash(&self.largest_niche, state);
        ::core::hash::Hash::hash(&self.uninhabited, state);
        ::core::hash::Hash::hash(&self.align, state);
        ::core::hash::Hash::hash(&self.size, state);
        ::core::hash::Hash::hash(&self.max_repr_align, state);
        ::core::hash::Hash::hash(&self.unadjusted_abi_align, state);
        ::core::hash::Hash::hash(&self.randomization_seed, state)
    }
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
    Idx> ::core::clone::Clone for LayoutData<FieldIdx, VariantIdx> {
    #[inline]
    fn clone(&self) -> LayoutData<FieldIdx, VariantIdx> {
        LayoutData {
            fields: ::core::clone::Clone::clone(&self.fields),
            variants: ::core::clone::Clone::clone(&self.variants),
            backend_repr: ::core::clone::Clone::clone(&self.backend_repr),
            largest_niche: ::core::clone::Clone::clone(&self.largest_niche),
            uninhabited: ::core::clone::Clone::clone(&self.uninhabited),
            align: ::core::clone::Clone::clone(&self.align),
            size: ::core::clone::Clone::clone(&self.size),
            max_repr_align: ::core::clone::Clone::clone(&self.max_repr_align),
            unadjusted_abi_align: ::core::clone::Clone::clone(&self.unadjusted_abi_align),
            randomization_seed: ::core::clone::Clone::clone(&self.randomization_seed),
        }
    }
}Clone)]
2100#[cfg_attr(feature = "nightly", derive(const _: () =
    {
        impl<FieldIdx: Idx, VariantIdx: Idx>
            ::rustc_data_structures::stable_hasher::HashStable for
            LayoutData<FieldIdx, VariantIdx> where
            FieldIdx: ::rustc_data_structures::stable_hasher::HashStable,
            VariantIdx: ::rustc_data_structures::stable_hasher::HashStable {
            #[inline]
            fn hash_stable<__Hcx: ::rustc_data_structures::stable_hasher::HashStableContext>(&self,
                __hcx: &mut __Hcx,
                __hasher:
                    &mut ::rustc_data_structures::stable_hasher::StableHasher) {
                match *self {
                    LayoutData {
                        fields: ref __binding_0,
                        variants: ref __binding_1,
                        backend_repr: ref __binding_2,
                        largest_niche: ref __binding_3,
                        uninhabited: ref __binding_4,
                        align: ref __binding_5,
                        size: ref __binding_6,
                        max_repr_align: ref __binding_7,
                        unadjusted_abi_align: ref __binding_8,
                        randomization_seed: ref __binding_9 } => {
                        { __binding_0.hash_stable(__hcx, __hasher); }
                        { __binding_1.hash_stable(__hcx, __hasher); }
                        { __binding_2.hash_stable(__hcx, __hasher); }
                        { __binding_3.hash_stable(__hcx, __hasher); }
                        { __binding_4.hash_stable(__hcx, __hasher); }
                        { __binding_5.hash_stable(__hcx, __hasher); }
                        { __binding_6.hash_stable(__hcx, __hasher); }
                        { __binding_7.hash_stable(__hcx, __hasher); }
                        { __binding_8.hash_stable(__hcx, __hasher); }
                        { __binding_9.hash_stable(__hcx, __hasher); }
                    }
                }
            }
        }
    };HashStable))]
2101pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2102    /// Says where the fields are located within the layout.
2103    pub fields: FieldsShape<FieldIdx>,
2104
2105    /// Encodes information about multi-variant layouts.
2106    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2107    /// shared between all variants. One of them will be the discriminant,
2108    /// but e.g. coroutines can have more.
2109    ///
2110    /// To access all fields of this layout, both `fields` and the fields of the active variant
2111    /// must be taken into account.
2112    pub variants: Variants<FieldIdx, VariantIdx>,
2113
2114    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2115    /// and encodes value restrictions via `valid_range`.
2116    ///
2117    /// Note that this is entirely orthogonal to the recursive structure defined by
2118    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2119    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2120    /// have to be taken into account to find all fields of this layout.
2121    pub backend_repr: BackendRepr,
2122
2123    /// The leaf scalar with the largest number of invalid values
2124    /// (i.e. outside of its `valid_range`), if it exists.
2125    pub largest_niche: Option<Niche>,
2126    /// Is this type known to be uninhabted?
2127    ///
2128    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2129    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2130    pub uninhabited: bool,
2131
2132    pub align: AbiAlign,
2133    pub size: Size,
2134
2135    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2136    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2137    /// requested, even if the requested alignment is equal to the natural alignment.
2138    pub max_repr_align: Option<Align>,
2139
2140    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2141    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2142    /// in some cases.
2143    pub unadjusted_abi_align: Align,
2144
2145    /// The randomization seed based on this type's own repr and its fields.
2146    ///
2147    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2148    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2149    /// types.
2150    ///
2151    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2152    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2153    /// to reorder its fields based on that information. The current implementation is a conservative
2154    /// approximation of this goal.
2155    pub randomization_seed: Hash64,
2156}
2157
2158impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2159    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2160    pub fn is_aggregate(&self) -> bool {
2161        match self.backend_repr {
2162            BackendRepr::Scalar(_)
2163            | BackendRepr::SimdVector { .. }
2164            | BackendRepr::SimdScalableVector { .. } => false,
2165            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2166        }
2167    }
2168
2169    /// Returns `true` if this is an uninhabited type
2170    pub fn is_uninhabited(&self) -> bool {
2171        self.uninhabited
2172    }
2173}
2174
2175impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2176where
2177    FieldsShape<FieldIdx>: fmt::Debug,
2178    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2179{
2180    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2181        // This is how `Layout` used to print before it become
2182        // `Interned<LayoutData>`. We print it like this to avoid having to update
2183        // expected output in a lot of tests.
2184        let LayoutData {
2185            size,
2186            align,
2187            backend_repr,
2188            fields,
2189            largest_niche,
2190            uninhabited,
2191            variants,
2192            max_repr_align,
2193            unadjusted_abi_align,
2194            randomization_seed,
2195        } = self;
2196        f.debug_struct("Layout")
2197            .field("size", size)
2198            .field("align", align)
2199            .field("backend_repr", backend_repr)
2200            .field("fields", fields)
2201            .field("largest_niche", largest_niche)
2202            .field("uninhabited", uninhabited)
2203            .field("variants", variants)
2204            .field("max_repr_align", max_repr_align)
2205            .field("unadjusted_abi_align", unadjusted_abi_align)
2206            .field("randomization_seed", randomization_seed)
2207            .finish()
2208    }
2209}
2210
2211#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerKind {
    #[inline]
    fn clone(&self) -> PointerKind {
        let _: ::core::clone::AssertParamIsClone<bool>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerKind {
    #[inline]
    fn eq(&self, other: &PointerKind) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (PointerKind::SharedRef { frozen: __self_0 },
                    PointerKind::SharedRef { frozen: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::MutableRef { unpin: __self_0 },
                    PointerKind::MutableRef { unpin: __arg1_0 }) =>
                    __self_0 == __arg1_0,
                (PointerKind::Box { unpin: __self_0, global: __self_1 },
                    PointerKind::Box { unpin: __arg1_0, global: __arg1_1 }) =>
                    __self_0 == __arg1_0 && __self_1 == __arg1_1,
                _ => unsafe { ::core::intrinsics::unreachable() }
            }
    }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerKind {
    #[inline]
    #[doc(hidden)]
    #[coverage(off)]
    fn assert_fields_are_eq(&self) {
        let _: ::core::cmp::AssertParamIsEq<bool>;
    }
}Eq, #[automatically_derived]
impl ::core::fmt::Debug for PointerKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            PointerKind::SharedRef { frozen: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "SharedRef", "frozen", &__self_0),
            PointerKind::MutableRef { unpin: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "MutableRef", "unpin", &__self_0),
            PointerKind::Box { unpin: __self_0, global: __self_1 } =>
                ::core::fmt::Formatter::debug_struct_field2_finish(f, "Box",
                    "unpin", __self_0, "global", &__self_1),
        }
    }
}Debug)]
2212pub enum PointerKind {
2213    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2214    SharedRef { frozen: bool },
2215    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2216    MutableRef { unpin: bool },
2217    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2218    /// uses the global allocator or a custom one.
2219    Box { unpin: bool, global: bool },
2220}
2221
2222/// Encodes extra information we have about a pointer.
2223///
2224/// Note that this information is advisory only, and backends are free to ignore it:
2225/// if the information is wrong, that can cause UB, but if the information is absent,
2226/// that must always be okay.
2227#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointeeInfo { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointeeInfo {
    #[inline]
    fn clone(&self) -> PointeeInfo {
        let _: ::core::clone::AssertParamIsClone<Option<PointerKind>>;
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointeeInfo {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "PointeeInfo",
            "safe", &self.safe, "size", &self.size, "align", &&self.align)
    }
}Debug)]
2228pub struct PointeeInfo {
2229    /// If this is `None`, then this is a raw pointer.
2230    pub safe: Option<PointerKind>,
2231    /// If `size` is not zero, then the pointer is either null or dereferenceable for this many bytes
2232    /// (independent of `safe`).
2233    ///
2234    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2235    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2236    /// while this function is still running.
2237    pub size: Size,
2238    /// The pointer is guaranteed to be aligned this much (independent of `safe`).
2239    pub align: Align,
2240}
2241
2242impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2243    /// Returns `true` if the layout corresponds to an unsized type.
2244    #[inline]
2245    pub fn is_unsized(&self) -> bool {
2246        self.backend_repr.is_unsized()
2247    }
2248
2249    #[inline]
2250    pub fn is_sized(&self) -> bool {
2251        self.backend_repr.is_sized()
2252    }
2253
2254    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2255    pub fn is_1zst(&self) -> bool {
2256        self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
2257    }
2258
2259    /// Returns `true` if the size of the type is only known at runtime.
2260    pub fn is_scalable_vector(&self) -> bool {
2261        #[allow(non_exhaustive_omitted_patterns)] match self.backend_repr {
    BackendRepr::SimdScalableVector { .. } => true,
    _ => false,
}matches!(self.backend_repr, BackendRepr::SimdScalableVector { .. })
2262    }
2263
2264    /// Returns the elements count of a scalable vector.
2265    pub fn scalable_vector_element_count(&self) -> Option<u64> {
2266        match self.backend_repr {
2267            BackendRepr::SimdScalableVector { count, .. } => Some(count),
2268            _ => None,
2269        }
2270    }
2271
2272    /// Returns `true` if the type is a ZST and not unsized.
2273    ///
2274    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2275    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2276    pub fn is_zst(&self) -> bool {
2277        match self.backend_repr {
2278            BackendRepr::Scalar(_)
2279            | BackendRepr::ScalarPair(..)
2280            | BackendRepr::SimdScalableVector { .. }
2281            | BackendRepr::SimdVector { .. } => false,
2282            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2283        }
2284    }
2285
2286    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2287    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2288    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2289    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2290    /// checks would otherwise be required.
2291    pub fn eq_abi(&self, other: &Self) -> bool {
2292        // The one thing that we are not capturing here is that for unsized types, the metadata must
2293        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2294        // 2nd point is quite hard to check though.
2295        self.size == other.size
2296            && self.is_sized() == other.is_sized()
2297            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2298            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2299            && self.align.abi == other.align.abi
2300            && self.max_repr_align == other.max_repr_align
2301            && self.unadjusted_abi_align == other.unadjusted_abi_align
2302    }
2303}
2304
2305#[derive(#[automatically_derived]
impl ::core::marker::Copy for StructKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for StructKind {
    #[inline]
    fn clone(&self) -> StructKind {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for StructKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            StructKind::AlwaysSized =>
                ::core::fmt::Formatter::write_str(f, "AlwaysSized"),
            StructKind::MaybeUnsized =>
                ::core::fmt::Formatter::write_str(f, "MaybeUnsized"),
            StructKind::Prefixed(__self_0, __self_1) =>
                ::core::fmt::Formatter::debug_tuple_field2_finish(f,
                    "Prefixed", __self_0, &__self_1),
        }
    }
}Debug)]
2306pub enum StructKind {
2307    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2308    AlwaysSized,
2309    /// A univariant, the last field of which may be coerced to unsized.
2310    MaybeUnsized,
2311    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2312    Prefixed(Size, Align),
2313}
2314
2315#[derive(#[automatically_derived]
impl ::core::clone::Clone for AbiFromStrErr {
    #[inline]
    fn clone(&self) -> AbiFromStrErr {
        match self {
            AbiFromStrErr::Unknown => AbiFromStrErr::Unknown,
            AbiFromStrErr::NoExplicitUnwind =>
                AbiFromStrErr::NoExplicitUnwind,
        }
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AbiFromStrErr {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                AbiFromStrErr::Unknown => "Unknown",
                AbiFromStrErr::NoExplicitUnwind => "NoExplicitUnwind",
            })
    }
}Debug)]
2316pub enum AbiFromStrErr {
2317    /// not a known ABI
2318    Unknown,
2319    /// no "-unwind" variant can be used here
2320    NoExplicitUnwind,
2321}