1#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
4#![cfg_attr(feature = "nightly", feature(step_trait))]
5use std::fmt;
40#[cfg(feature = "nightly")]
41use std::iter::Step;
42use std::num::{NonZeroUsize, ParseIntError};
43use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
44use std::str::FromStr;
45
46use bitflags::bitflags;
47#[cfg(feature = "nightly")]
48use rustc_data_structures::stable_hasher::StableOrd;
49#[cfg(feature = "nightly")]
50use rustc_error_messages::{DiagArgValue, IntoDiagArg};
51#[cfg(feature = "nightly")]
52use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, msg};
53use rustc_hashes::Hash64;
54use rustc_index::{Idx, IndexSlice, IndexVec};
55#[cfg(feature = "nightly")]
56use rustc_macros::{Decodable_NoContext, Encodable_NoContext, StableHash};
57#[cfg(feature = "nightly")]
58use rustc_span::{Symbol, sym};
59
60mod callconv;
61mod canon_abi;
62mod extern_abi;
63mod layout;
64#[cfg(test)]
65mod tests;
66
67pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
68pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
69#[cfg(feature = "nightly")]
70pub use extern_abi::CVariadicStatus;
71pub use extern_abi::{ExternAbi, all_names};
72pub use layout::{FIRST_VARIANT, FieldIdx, LayoutCalculator, LayoutCalculatorError, VariantIdx};
73#[cfg(feature = "nightly")]
74pub use layout::{Layout, TyAbiInterface, TyAndLayout};
75
76#[derive(#[automatically_derived]
impl ::core::clone::Clone for ReprFlags {
#[inline]
fn clone(&self) -> ReprFlags {
let _: ::core::clone::AssertParamIsClone<u8>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for ReprFlags { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprFlags {
#[inline]
fn eq(&self, other: &ReprFlags) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for ReprFlags {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u8>;
}
}Eq, #[automatically_derived]
impl ::core::default::Default for ReprFlags {
#[inline]
fn default() -> ReprFlags {
ReprFlags(::core::default::Default::default())
}
}Default)]
77#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for ReprFlags {
fn encode(&self, __encoder: &mut __E) {
match *self {
ReprFlags(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for ReprFlags {
fn decode(__decoder: &mut __D) -> Self {
ReprFlags(::rustc_serialize::Decodable::decode(__decoder))
}
}
};Decodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for ReprFlags
{
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ReprFlags(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
78pub struct ReprFlags(u8);
79
80impl ReprFlags {
#[allow(deprecated, non_upper_case_globals,)]
pub const IS_C: Self = Self::from_bits_retain(1 << 0);
#[allow(deprecated, non_upper_case_globals,)]
pub const IS_SIMD: Self = Self::from_bits_retain(1 << 1);
#[allow(deprecated, non_upper_case_globals,)]
pub const IS_TRANSPARENT: Self = Self::from_bits_retain(1 << 2);
#[doc = r" Internal only for now. If true, don't reorder fields."]
#[doc = r" On its own it does not prevent ABI optimizations."]
#[allow(deprecated, non_upper_case_globals,)]
pub const IS_LINEAR: Self = Self::from_bits_retain(1 << 3);
#[doc =
r" If true, the type's crate has opted into layout randomization."]
#[doc =
r" Other flags can still inhibit reordering and thus randomization."]
#[doc = r" The seed stored in `ReprOptions.field_shuffle_seed`."]
#[allow(deprecated, non_upper_case_globals,)]
pub const RANDOMIZE_LAYOUT: Self = Self::from_bits_retain(1 << 4);
#[doc =
r" If true, the type is always passed indirectly by non-Rustic ABIs."]
#[doc =
r" See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details."]
#[allow(deprecated, non_upper_case_globals,)]
pub const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS: Self =
Self::from_bits_retain(1 << 5);
#[allow(deprecated, non_upper_case_globals,)]
pub const IS_SCALABLE: Self = Self::from_bits_retain(1 << 6);
#[allow(deprecated, non_upper_case_globals,)]
pub const FIELD_ORDER_UNOPTIMIZABLE: Self =
Self::from_bits_retain(ReprFlags::IS_C.bits() |
ReprFlags::IS_SIMD.bits() | ReprFlags::IS_SCALABLE.bits() |
ReprFlags::IS_LINEAR.bits());
#[allow(deprecated, non_upper_case_globals,)]
pub const ABI_UNOPTIMIZABLE: Self =
Self::from_bits_retain(ReprFlags::IS_C.bits() |
ReprFlags::IS_SIMD.bits());
}
impl ::bitflags::Flags for ReprFlags {
const FLAGS: &'static [::bitflags::Flag<ReprFlags>] =
&[{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("IS_C", ReprFlags::IS_C)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("IS_SIMD", ReprFlags::IS_SIMD)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("IS_TRANSPARENT",
ReprFlags::IS_TRANSPARENT)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("IS_LINEAR", ReprFlags::IS_LINEAR)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("RANDOMIZE_LAYOUT",
ReprFlags::RANDOMIZE_LAYOUT)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS",
ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("IS_SCALABLE", ReprFlags::IS_SCALABLE)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("FIELD_ORDER_UNOPTIMIZABLE",
ReprFlags::FIELD_ORDER_UNOPTIMIZABLE)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("ABI_UNOPTIMIZABLE",
ReprFlags::ABI_UNOPTIMIZABLE)
}];
type Bits = u8;
fn bits(&self) -> u8 { ReprFlags::bits(self) }
fn from_bits_retain(bits: u8) -> ReprFlags {
ReprFlags::from_bits_retain(bits)
}
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: iter_without_into_iter,)]
const _: () =
{
#[allow(dead_code, deprecated, unused_attributes)]
impl ReprFlags {
#[inline]
pub const fn empty() -> Self {
Self(<u8 as ::bitflags::Bits>::EMPTY)
}
#[inline]
pub const fn all() -> Self {
let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
let mut i = 0;
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<ReprFlags as ::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
let _ = i;
Self(truncated)
}
#[inline]
pub const fn bits(&self) -> u8 { self.0 }
#[inline]
pub const fn from_bits(bits: u8)
-> ::bitflags::__private::core::option::Option<Self> {
let truncated = Self::from_bits_truncate(bits).0;
if truncated == bits {
::bitflags::__private::core::option::Option::Some(Self(bits))
} else { ::bitflags::__private::core::option::Option::None }
}
#[inline]
pub const fn from_bits_truncate(bits: u8) -> Self {
Self(bits & Self::all().0)
}
#[inline]
pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
#[inline]
pub fn from_name(name: &str)
-> ::bitflags::__private::core::option::Option<Self> {
{
if name == "IS_C" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_C.bits()));
}
};
;
{
if name == "IS_SIMD" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SIMD.bits()));
}
};
;
{
if name == "IS_TRANSPARENT" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_TRANSPARENT.bits()));
}
};
;
{
if name == "IS_LINEAR" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_LINEAR.bits()));
}
};
;
{
if name == "RANDOMIZE_LAYOUT" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::RANDOMIZE_LAYOUT.bits()));
}
};
;
{
if name == "PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS.bits()));
}
};
;
{
if name == "IS_SCALABLE" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::IS_SCALABLE.bits()));
}
};
;
{
if name == "FIELD_ORDER_UNOPTIMIZABLE" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE.bits()));
}
};
;
{
if name == "ABI_UNOPTIMIZABLE" {
return ::bitflags::__private::core::option::Option::Some(Self(ReprFlags::ABI_UNOPTIMIZABLE.bits()));
}
};
;
let _ = name;
::bitflags::__private::core::option::Option::None
}
#[inline]
pub const fn is_empty(&self) -> bool {
self.0 == <u8 as ::bitflags::Bits>::EMPTY
}
#[inline]
pub const fn is_all(&self) -> bool {
Self::all().0 | self.0 == self.0
}
#[inline]
pub const fn intersects(&self, other: Self) -> bool {
self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
}
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.0 & other.0 == other.0
}
#[inline]
pub fn insert(&mut self, other: Self) {
*self = Self(self.0).union(other);
}
#[inline]
pub fn remove(&mut self, other: Self) {
*self = Self(self.0).difference(other);
}
#[inline]
pub fn toggle(&mut self, other: Self) {
*self = Self(self.0).symmetric_difference(other);
}
#[inline]
pub fn set(&mut self, other: Self, value: bool) {
if value { self.insert(other); } else { self.remove(other); }
}
#[inline]
#[must_use]
pub const fn intersection(self, other: Self) -> Self {
Self(self.0 & other.0)
}
#[inline]
#[must_use]
pub const fn union(self, other: Self) -> Self {
Self(self.0 | other.0)
}
#[inline]
#[must_use]
pub const fn difference(self, other: Self) -> Self {
Self(self.0 & !other.0)
}
#[inline]
#[must_use]
pub const fn symmetric_difference(self, other: Self) -> Self {
Self(self.0 ^ other.0)
}
#[inline]
#[must_use]
pub const fn complement(self) -> Self {
Self::from_bits_truncate(!self.0)
}
}
impl ::bitflags::__private::core::fmt::Binary for ReprFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::Octal for ReprFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::LowerHex for ReprFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::UpperHex for ReprFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::ops::BitOr for ReprFlags {
type Output = Self;
#[inline]
fn bitor(self, other: ReprFlags) -> Self { self.union(other) }
}
impl ::bitflags::__private::core::ops::BitOrAssign for ReprFlags {
#[inline]
fn bitor_assign(&mut self, other: Self) { self.insert(other); }
}
impl ::bitflags::__private::core::ops::BitXor for ReprFlags {
type Output = Self;
#[inline]
fn bitxor(self, other: Self) -> Self {
self.symmetric_difference(other)
}
}
impl ::bitflags::__private::core::ops::BitXorAssign for ReprFlags {
#[inline]
fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
}
impl ::bitflags::__private::core::ops::BitAnd for ReprFlags {
type Output = Self;
#[inline]
fn bitand(self, other: Self) -> Self { self.intersection(other) }
}
impl ::bitflags::__private::core::ops::BitAndAssign for ReprFlags {
#[inline]
fn bitand_assign(&mut self, other: Self) {
*self =
Self::from_bits_retain(self.bits()).intersection(other);
}
}
impl ::bitflags::__private::core::ops::Sub for ReprFlags {
type Output = Self;
#[inline]
fn sub(self, other: Self) -> Self { self.difference(other) }
}
impl ::bitflags::__private::core::ops::SubAssign for ReprFlags {
#[inline]
fn sub_assign(&mut self, other: Self) { self.remove(other); }
}
impl ::bitflags::__private::core::ops::Not for ReprFlags {
type Output = Self;
#[inline]
fn not(self) -> Self { self.complement() }
}
impl ::bitflags::__private::core::iter::Extend<ReprFlags> for
ReprFlags {
fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(&mut self, iterator: T) {
for item in iterator { self.insert(item) }
}
}
impl ::bitflags::__private::core::iter::FromIterator<ReprFlags> for
ReprFlags {
fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(iterator: T) -> Self {
use ::bitflags::__private::core::iter::Extend;
let mut result = Self::empty();
result.extend(iterator);
result
}
}
impl ReprFlags {
#[inline]
pub const fn iter(&self) -> ::bitflags::iter::Iter<ReprFlags> {
::bitflags::iter::Iter::__private_const_new(<ReprFlags as
::bitflags::Flags>::FLAGS,
ReprFlags::from_bits_retain(self.bits()),
ReprFlags::from_bits_retain(self.bits()))
}
#[inline]
pub const fn iter_names(&self)
-> ::bitflags::iter::IterNames<ReprFlags> {
::bitflags::iter::IterNames::__private_const_new(<ReprFlags as
::bitflags::Flags>::FLAGS,
ReprFlags::from_bits_retain(self.bits()),
ReprFlags::from_bits_retain(self.bits()))
}
}
impl ::bitflags::__private::core::iter::IntoIterator for ReprFlags {
type Item = ReprFlags;
type IntoIter = ::bitflags::iter::Iter<ReprFlags>;
fn into_iter(self) -> Self::IntoIter { self.iter() }
}
};bitflags! {
81 impl ReprFlags: u8 {
82 const IS_C = 1 << 0;
83 const IS_SIMD = 1 << 1;
84 const IS_TRANSPARENT = 1 << 2;
85 const IS_LINEAR = 1 << 3;
88 const RANDOMIZE_LAYOUT = 1 << 4;
92 const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS = 1 << 5;
95 const IS_SCALABLE = 1 << 6;
96 const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits()
98 | ReprFlags::IS_SIMD.bits()
99 | ReprFlags::IS_SCALABLE.bits()
100 | ReprFlags::IS_LINEAR.bits();
101 const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
102 }
103}
104
105impl std::fmt::Debug for ReprFlags {
108 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
109 bitflags::parser::to_writer(self, f)
110 }
111}
112
113#[derive(#[automatically_derived]
impl ::core::marker::Copy for IntegerType { }Copy, #[automatically_derived]
impl ::core::clone::Clone for IntegerType {
#[inline]
fn clone(&self) -> IntegerType {
let _: ::core::clone::AssertParamIsClone<bool>;
let _: ::core::clone::AssertParamIsClone<Integer>;
*self
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for IntegerType {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
IntegerType::Pointer(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"Pointer", &__self_0),
IntegerType::Fixed(__self_0, __self_1) =>
::core::fmt::Formatter::debug_tuple_field2_finish(f, "Fixed",
__self_0, &__self_1),
}
}
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for IntegerType {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<bool>;
let _: ::core::cmp::AssertParamIsEq<Integer>;
}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for IntegerType {
#[inline]
fn eq(&self, other: &IntegerType) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(IntegerType::Pointer(__self_0),
IntegerType::Pointer(__arg1_0)) => __self_0 == __arg1_0,
(IntegerType::Fixed(__self_0, __self_1),
IntegerType::Fixed(__arg1_0, __arg1_1)) =>
__self_1 == __arg1_1 && __self_0 == __arg1_0,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}PartialEq)]
114#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for IntegerType {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
IntegerType::Pointer(ref __binding_0) => { 0usize }
IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
1usize
}
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
IntegerType::Pointer(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for IntegerType {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => {
IntegerType::Pointer(::rustc_serialize::Decodable::decode(__decoder))
}
1usize => {
IntegerType::Fixed(::rustc_serialize::Decodable::decode(__decoder),
::rustc_serialize::Decodable::decode(__decoder))
}
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `IntegerType`, expected 0..2, actual {0}",
n));
}
}
}
}
};Decodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for
IntegerType {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
IntegerType::Pointer(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
IntegerType::Fixed(ref __binding_0, ref __binding_1) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
115pub enum IntegerType {
116 Pointer(bool),
119 Fixed(Integer, bool),
122}
123
124impl IntegerType {
125 pub fn is_signed(&self) -> bool {
126 match self {
127 IntegerType::Pointer(b) => *b,
128 IntegerType::Fixed(_, b) => *b,
129 }
130 }
131}
132
133#[derive(#[automatically_derived]
impl ::core::marker::Copy for ScalableElt { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ScalableElt {
#[inline]
fn clone(&self) -> ScalableElt {
let _: ::core::clone::AssertParamIsClone<u16>;
*self
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ScalableElt {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ScalableElt::ElementCount(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"ElementCount", &__self_0),
ScalableElt::Container =>
::core::fmt::Formatter::write_str(f, "Container"),
}
}
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ScalableElt {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u16>;
}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ScalableElt {
#[inline]
fn eq(&self, other: &ScalableElt) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(ScalableElt::ElementCount(__self_0),
ScalableElt::ElementCount(__arg1_0)) =>
__self_0 == __arg1_0,
_ => true,
}
}
}PartialEq)]
134#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for ScalableElt {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
ScalableElt::ElementCount(ref __binding_0) => { 0usize }
ScalableElt::Container => { 1usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
ScalableElt::ElementCount(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
ScalableElt::Container => {}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for ScalableElt {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => {
ScalableElt::ElementCount(::rustc_serialize::Decodable::decode(__decoder))
}
1usize => { ScalableElt::Container }
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `ScalableElt`, expected 0..2, actual {0}",
n));
}
}
}
}
};Decodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for
ScalableElt {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
ScalableElt::ElementCount(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
ScalableElt::Container => {}
}
}
}
};StableHash))]
135pub enum ScalableElt {
136 ElementCount(u16),
138 Container,
141}
142
143#[derive(#[automatically_derived]
impl ::core::marker::Copy for ReprOptions { }Copy, #[automatically_derived]
impl ::core::clone::Clone for ReprOptions {
#[inline]
fn clone(&self) -> ReprOptions {
let _: ::core::clone::AssertParamIsClone<Option<IntegerType>>;
let _: ::core::clone::AssertParamIsClone<Option<Align>>;
let _: ::core::clone::AssertParamIsClone<Option<Align>>;
let _: ::core::clone::AssertParamIsClone<ReprFlags>;
let _: ::core::clone::AssertParamIsClone<Option<ScalableElt>>;
let _: ::core::clone::AssertParamIsClone<Hash64>;
*self
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for ReprOptions {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ =
&["int", "align", "pack", "flags", "scalable",
"field_shuffle_seed"];
let values: &[&dyn ::core::fmt::Debug] =
&[&self.int, &self.align, &self.pack, &self.flags, &self.scalable,
&&self.field_shuffle_seed];
::core::fmt::Formatter::debug_struct_fields_finish(f, "ReprOptions",
names, values)
}
}Debug, #[automatically_derived]
impl ::core::cmp::Eq for ReprOptions {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Option<IntegerType>>;
let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
let _: ::core::cmp::AssertParamIsEq<ReprFlags>;
let _: ::core::cmp::AssertParamIsEq<Option<ScalableElt>>;
let _: ::core::cmp::AssertParamIsEq<Hash64>;
}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialEq for ReprOptions {
#[inline]
fn eq(&self, other: &ReprOptions) -> bool {
self.int == other.int && self.align == other.align &&
self.pack == other.pack && self.flags == other.flags &&
self.scalable == other.scalable &&
self.field_shuffle_seed == other.field_shuffle_seed
}
}PartialEq, #[automatically_derived]
impl ::core::default::Default for ReprOptions {
#[inline]
fn default() -> ReprOptions {
ReprOptions {
int: ::core::default::Default::default(),
align: ::core::default::Default::default(),
pack: ::core::default::Default::default(),
flags: ::core::default::Default::default(),
scalable: ::core::default::Default::default(),
field_shuffle_seed: ::core::default::Default::default(),
}
}
}Default)]
145#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for ReprOptions {
fn encode(&self, __encoder: &mut __E) {
match *self {
ReprOptions {
int: ref __binding_0,
align: ref __binding_1,
pack: ref __binding_2,
flags: ref __binding_3,
scalable: ref __binding_4,
field_shuffle_seed: ref __binding_5 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_2,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_3,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_4,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_5,
__encoder);
}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for ReprOptions {
fn decode(__decoder: &mut __D) -> Self {
ReprOptions {
int: ::rustc_serialize::Decodable::decode(__decoder),
align: ::rustc_serialize::Decodable::decode(__decoder),
pack: ::rustc_serialize::Decodable::decode(__decoder),
flags: ::rustc_serialize::Decodable::decode(__decoder),
scalable: ::rustc_serialize::Decodable::decode(__decoder),
field_shuffle_seed: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for
ReprOptions {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ReprOptions {
int: ref __binding_0,
align: ref __binding_1,
pack: ref __binding_2,
flags: ref __binding_3,
scalable: ref __binding_4,
field_shuffle_seed: ref __binding_5 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
{ __binding_2.stable_hash(__hcx, __hasher); }
{ __binding_3.stable_hash(__hcx, __hasher); }
{ __binding_4.stable_hash(__hcx, __hasher); }
{ __binding_5.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
146pub struct ReprOptions {
147 pub int: Option<IntegerType>,
148 pub align: Option<Align>,
149 pub pack: Option<Align>,
150 pub flags: ReprFlags,
151 pub scalable: Option<ScalableElt>,
153 pub field_shuffle_seed: Hash64,
161}
162
163impl ReprOptions {
164 #[inline]
165 pub fn simd(&self) -> bool {
166 self.flags.contains(ReprFlags::IS_SIMD)
167 }
168
169 #[inline]
170 pub fn scalable(&self) -> bool {
171 self.flags.contains(ReprFlags::IS_SCALABLE)
172 }
173
174 #[inline]
175 pub fn c(&self) -> bool {
176 self.flags.contains(ReprFlags::IS_C)
177 }
178
179 #[inline]
180 pub fn packed(&self) -> bool {
181 self.pack.is_some()
182 }
183
184 #[inline]
185 pub fn transparent(&self) -> bool {
186 self.flags.contains(ReprFlags::IS_TRANSPARENT)
187 }
188
189 #[inline]
190 pub fn linear(&self) -> bool {
191 self.flags.contains(ReprFlags::IS_LINEAR)
192 }
193
194 pub fn discr_type(&self) -> IntegerType {
202 self.int.unwrap_or(IntegerType::Pointer(true))
203 }
204
205 pub fn inhibit_enum_layout_opt(&self) -> bool {
209 self.c() || self.int.is_some()
210 }
211
212 pub fn inhibit_newtype_abi_optimization(&self) -> bool {
213 self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
214 }
215
216 pub fn inhibit_struct_field_reordering(&self) -> bool {
219 self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
220 }
221
222 pub fn can_randomize_type_layout(&self) -> bool {
225 !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
226 }
227
228 pub fn inhibits_union_abi_opt(&self) -> bool {
230 self.c()
231 }
232}
233
234pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
240
241#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerSpec { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerSpec {
#[inline]
fn clone(&self) -> PointerSpec {
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<Align>;
let _: ::core::clone::AssertParamIsClone<bool>;
*self
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointerSpec {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "PointerSpec",
"pointer_size", &self.pointer_size, "pointer_align",
&self.pointer_align, "pointer_offset", &self.pointer_offset,
"_is_fat", &&self._is_fat)
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerSpec {
#[inline]
fn eq(&self, other: &PointerSpec) -> bool {
self._is_fat == other._is_fat &&
self.pointer_size == other.pointer_size &&
self.pointer_align == other.pointer_align &&
self.pointer_offset == other.pointer_offset
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerSpec {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<Align>;
let _: ::core::cmp::AssertParamIsEq<bool>;
}
}Eq)]
243pub struct PointerSpec {
244 pointer_size: Size,
246 pointer_align: Align,
248 pointer_offset: Size,
250 _is_fat: bool,
253}
254
255#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TargetDataLayout {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ =
&["endian", "i1_align", "i8_align", "i16_align", "i32_align",
"i64_align", "i128_align", "f16_align", "f32_align",
"f64_align", "f128_align", "aggregate_align",
"vector_align", "default_address_space",
"default_address_space_pointer_spec", "address_space_info",
"instruction_address_space", "c_enum_min_size"];
let values: &[&dyn ::core::fmt::Debug] =
&[&self.endian, &self.i1_align, &self.i8_align, &self.i16_align,
&self.i32_align, &self.i64_align, &self.i128_align,
&self.f16_align, &self.f32_align, &self.f64_align,
&self.f128_align, &self.aggregate_align, &self.vector_align,
&self.default_address_space,
&self.default_address_space_pointer_spec,
&self.address_space_info, &self.instruction_address_space,
&&self.c_enum_min_size];
::core::fmt::Formatter::debug_struct_fields_finish(f,
"TargetDataLayout", names, values)
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for TargetDataLayout {
#[inline]
fn eq(&self, other: &TargetDataLayout) -> bool {
self.endian == other.endian && self.i1_align == other.i1_align &&
self.i8_align == other.i8_align &&
self.i16_align == other.i16_align &&
self.i32_align == other.i32_align &&
self.i64_align == other.i64_align &&
self.i128_align == other.i128_align &&
self.f16_align == other.f16_align &&
self.f32_align == other.f32_align &&
self.f64_align == other.f64_align &&
self.f128_align == other.f128_align &&
self.aggregate_align == other.aggregate_align &&
self.vector_align == other.vector_align &&
self.default_address_space == other.default_address_space &&
self.default_address_space_pointer_spec ==
other.default_address_space_pointer_spec &&
self.address_space_info == other.address_space_info &&
self.instruction_address_space ==
other.instruction_address_space &&
self.c_enum_min_size == other.c_enum_min_size
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for TargetDataLayout {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Endian>;
let _: ::core::cmp::AssertParamIsEq<Align>;
let _: ::core::cmp::AssertParamIsEq<Vec<(Size, Align)>>;
let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
let _: ::core::cmp::AssertParamIsEq<PointerSpec>;
let _: ::core::cmp::AssertParamIsEq<Vec<(AddressSpace, PointerSpec)>>;
let _: ::core::cmp::AssertParamIsEq<Integer>;
}
}Eq)]
258pub struct TargetDataLayout {
259 pub endian: Endian,
260 pub i1_align: Align,
261 pub i8_align: Align,
262 pub i16_align: Align,
263 pub i32_align: Align,
264 pub i64_align: Align,
265 pub i128_align: Align,
266 pub f16_align: Align,
267 pub f32_align: Align,
268 pub f64_align: Align,
269 pub f128_align: Align,
270 pub aggregate_align: Align,
271
272 pub vector_align: Vec<(Size, Align)>,
274
275 pub default_address_space: AddressSpace,
276 pub default_address_space_pointer_spec: PointerSpec,
277
278 address_space_info: Vec<(AddressSpace, PointerSpec)>,
285
286 pub instruction_address_space: AddressSpace,
287
288 pub c_enum_min_size: Integer,
292}
293
294impl Default for TargetDataLayout {
295 fn default() -> TargetDataLayout {
297 let align = |bits| Align::from_bits(bits).unwrap();
298 TargetDataLayout {
299 endian: Endian::Big,
300 i1_align: align(8),
301 i8_align: align(8),
302 i16_align: align(16),
303 i32_align: align(32),
304 i64_align: align(32),
305 i128_align: align(32),
306 f16_align: align(16),
307 f32_align: align(32),
308 f64_align: align(64),
309 f128_align: align(128),
310 aggregate_align: align(8),
311 vector_align: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[(Size::from_bits(64), align(64)),
(Size::from_bits(128), align(128))]))vec![
312 (Size::from_bits(64), align(64)),
313 (Size::from_bits(128), align(128)),
314 ],
315 default_address_space: AddressSpace::ZERO,
316 default_address_space_pointer_spec: PointerSpec {
317 pointer_size: Size::from_bits(64),
318 pointer_align: align(64),
319 pointer_offset: Size::from_bits(64),
320 _is_fat: false,
321 },
322 address_space_info: ::alloc::vec::Vec::new()vec![],
323 instruction_address_space: AddressSpace::ZERO,
324 c_enum_min_size: Integer::I32,
325 }
326 }
327}
328
329pub enum TargetDataLayoutError<'a> {
330 InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
331 InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
332 MissingAlignment { cause: &'a str },
333 InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
334 InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
335 InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
336 InvalidBitsSize { err: String },
337 UnknownPointerSpecification { err: String },
338}
339
340#[cfg(feature = "nightly")]
341impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetDataLayoutError<'_> {
342 fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {
343 match self {
344 TargetDataLayoutError::InvalidAddressSpace { addr_space, err, cause } => {
345 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid address space `{$addr_space}` for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid address space `{$addr_space}` for `{$cause}` in \"data-layout\": {$err}"))
346 .with_arg("addr_space", addr_space)
347 .with_arg("cause", cause)
348 .with_arg("err", err)
349 }
350 TargetDataLayoutError::InvalidBits { kind, bit, cause, err } => {
351 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid {$kind} `{$bit}` for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid {$kind} `{$bit}` for `{$cause}` in \"data-layout\": {$err}"))
352 .with_arg("kind", kind)
353 .with_arg("bit", bit)
354 .with_arg("cause", cause)
355 .with_arg("err", err)
356 }
357 TargetDataLayoutError::MissingAlignment { cause } => {
358 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("missing alignment for `{$cause}` in \"data-layout\""))msg!("missing alignment for `{$cause}` in \"data-layout\""))
359 .with_arg("cause", cause)
360 }
361 TargetDataLayoutError::InvalidAlignment { cause, err } => {
362 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid alignment for `{$cause}` in \"data-layout\": {$err}"))msg!("invalid alignment for `{$cause}` in \"data-layout\": {$err}"))
363 .with_arg("cause", cause)
364 .with_arg("err", err.to_string())
365 }
366 TargetDataLayoutError::InconsistentTargetArchitecture { dl, target } => {
367 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("inconsistent target specification: \"data-layout\" claims architecture is {$dl}-endian, while \"target-endian\" is `{$target}`"))msg!("inconsistent target specification: \"data-layout\" claims architecture is {$dl}-endian, while \"target-endian\" is `{$target}`"))
368 .with_arg("dl", dl).with_arg("target", target)
369 }
370 TargetDataLayoutError::InconsistentTargetPointerWidth { pointer_size, target } => {
371 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("inconsistent target specification: \"data-layout\" claims pointers are {$pointer_size}-bit, while \"target-pointer-width\" is `{$target}`"))msg!("inconsistent target specification: \"data-layout\" claims pointers are {$pointer_size}-bit, while \"target-pointer-width\" is `{$target}`"))
372 .with_arg("pointer_size", pointer_size).with_arg("target", target)
373 }
374 TargetDataLayoutError::InvalidBitsSize { err } => {
375 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("{$err}"))msg!("{$err}")).with_arg("err", err)
376 }
377 TargetDataLayoutError::UnknownPointerSpecification { err } => {
378 Diag::new(dcx, level, rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("unknown pointer specification `{$err}` in datalayout string"))msg!("unknown pointer specification `{$err}` in datalayout string"))
379 .with_arg("err", err)
380 }
381 }
382 }
383}
384
385impl TargetDataLayout {
386 pub fn parse_from_llvm_datalayout_string<'a>(
392 input: &'a str,
393 default_address_space: AddressSpace,
394 ) -> Result<TargetDataLayout, TargetDataLayoutError<'a>> {
395 let parse_address_space = |s: &'a str, cause: &'a str| {
397 s.parse::<u32>().map(AddressSpace).map_err(|err| {
398 TargetDataLayoutError::InvalidAddressSpace { addr_space: s, cause, err }
399 })
400 };
401
402 let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
404 s.parse::<u64>().map_err(|err| TargetDataLayoutError::InvalidBits {
405 kind,
406 bit: s,
407 cause,
408 err,
409 })
410 };
411
412 let parse_size =
414 |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
415
416 let parse_align_str = |s: &'a str, cause: &'a str| {
418 let align_from_bits = |bits| {
419 Align::from_bits(bits)
420 .map_err(|err| TargetDataLayoutError::InvalidAlignment { cause, err })
421 };
422 let abi = parse_bits(s, "alignment", cause)?;
423 Ok(align_from_bits(abi)?)
424 };
425
426 let parse_align_seq = |s: &[&'a str], cause: &'a str| {
429 if s.is_empty() {
430 return Err(TargetDataLayoutError::MissingAlignment { cause });
431 }
432 parse_align_str(s[0], cause)
433 };
434
435 let mut dl = TargetDataLayout::default();
436 dl.default_address_space = default_address_space;
437
438 let mut i128_align_src = 64;
439 for spec in input.split('-') {
440 let spec_parts = spec.split(':').collect::<Vec<_>>();
441
442 match &*spec_parts {
443 ["e"] => dl.endian = Endian::Little,
444 ["E"] => dl.endian = Endian::Big,
445 [p] if p.starts_with('P') => {
446 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
447 }
448 ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
449 ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
450 ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
451 ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
452 ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
453 [p, s, a @ ..] if p.starts_with("p") => {
454 let mut p = p.strip_prefix('p').unwrap();
455 let mut _is_fat = false;
456
457 if p.starts_with('f') {
461 p = p.strip_prefix('f').unwrap();
462 _is_fat = true;
463 }
464
465 if p.starts_with(char::is_alphabetic) {
468 return Err(TargetDataLayoutError::UnknownPointerSpecification {
469 err: p.to_string(),
470 });
471 }
472
473 let addr_space = if !p.is_empty() {
474 parse_address_space(p, "p-")?
475 } else {
476 AddressSpace::ZERO
477 };
478
479 let pointer_size = parse_size(s, "p-")?;
480 let pointer_align = parse_align_seq(a, "p-")?;
481 let info = PointerSpec {
482 pointer_offset: pointer_size,
483 pointer_size,
484 pointer_align,
485 _is_fat,
486 };
487 if addr_space == default_address_space {
488 dl.default_address_space_pointer_spec = info;
489 } else {
490 match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
491 Some(e) => e.1 = info,
492 None => {
493 dl.address_space_info.push((addr_space, info));
494 }
495 }
496 }
497 }
498 [p, s, a, _pr, i] if p.starts_with("p") => {
499 let mut p = p.strip_prefix('p').unwrap();
500 let mut _is_fat = false;
501
502 if p.starts_with('f') {
506 p = p.strip_prefix('f').unwrap();
507 _is_fat = true;
508 }
509
510 if p.starts_with(char::is_alphabetic) {
513 return Err(TargetDataLayoutError::UnknownPointerSpecification {
514 err: p.to_string(),
515 });
516 }
517
518 let addr_space = if !p.is_empty() {
519 parse_address_space(p, "p")?
520 } else {
521 AddressSpace::ZERO
522 };
523
524 let info = PointerSpec {
525 pointer_size: parse_size(s, "p-")?,
526 pointer_align: parse_align_str(a, "p-")?,
527 pointer_offset: parse_size(i, "p-")?,
528 _is_fat,
529 };
530
531 if addr_space == default_address_space {
532 dl.default_address_space_pointer_spec = info;
533 } else {
534 match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
535 Some(e) => e.1 = info,
536 None => {
537 dl.address_space_info.push((addr_space, info));
538 }
539 }
540 }
541 }
542
543 [s, a @ ..] if s.starts_with('i') => {
544 let Ok(bits) = s[1..].parse::<u64>() else {
545 parse_size(&s[1..], "i")?; continue;
547 };
548 let a = parse_align_seq(a, s)?;
549 match bits {
550 1 => dl.i1_align = a,
551 8 => dl.i8_align = a,
552 16 => dl.i16_align = a,
553 32 => dl.i32_align = a,
554 64 => dl.i64_align = a,
555 _ => {}
556 }
557 if bits >= i128_align_src && bits <= 128 {
558 i128_align_src = bits;
561 dl.i128_align = a;
562 }
563 }
564 [s, a @ ..] if s.starts_with('v') => {
565 let v_size = parse_size(&s[1..], "v")?;
566 let a = parse_align_seq(a, s)?;
567 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
568 v.1 = a;
569 continue;
570 }
571 dl.vector_align.push((v_size, a));
573 }
574 _ => {} }
576 }
577
578 if (dl.instruction_address_space != dl.default_address_space)
581 && dl
582 .address_space_info
583 .iter()
584 .find(|(a, _)| *a == dl.instruction_address_space)
585 .is_none()
586 {
587 dl.address_space_info.push((
588 dl.instruction_address_space,
589 dl.default_address_space_pointer_spec.clone(),
590 ));
591 }
592
593 Ok(dl)
594 }
595
596 #[inline]
607 pub fn obj_size_bound(&self) -> u64 {
608 match self.pointer_size().bits() {
609 16 => 1 << 15,
610 32 => 1 << 31,
611 64 => 1 << 61,
612 bits => {
::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
613 }
614 }
615
616 #[inline]
626 pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
627 match self.pointer_size_in(address_space).bits() {
628 16 => 1 << 15,
629 32 => 1 << 31,
630 64 => 1 << 61,
631 bits => {
::core::panicking::panic_fmt(format_args!("obj_size_bound: unknown pointer bit size {0}",
bits));
}panic!("obj_size_bound: unknown pointer bit size {bits}"),
632 }
633 }
634
635 #[inline]
636 pub fn ptr_sized_integer(&self) -> Integer {
637 use Integer::*;
638 match self.pointer_offset().bits() {
639 16 => I16,
640 32 => I32,
641 64 => I64,
642 bits => {
::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
643 }
644 }
645
646 #[inline]
647 pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
648 use Integer::*;
649 match self.pointer_offset_in(address_space).bits() {
650 16 => I16,
651 32 => I32,
652 64 => I64,
653 bits => {
::core::panicking::panic_fmt(format_args!("ptr_sized_integer: unknown pointer bit size {0}",
bits));
}panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
654 }
655 }
656
657 #[inline]
659 fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
660 self.vector_align
661 .iter()
662 .find(|(size, _align)| *size == vec_size)
663 .map(|(_size, align)| *align)
664 }
665
666 #[inline]
668 pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
669 self.cabi_vector_align(vec_size)
670 .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
671 }
672
673 #[inline]
675 pub fn pointer_size(&self) -> Size {
676 self.default_address_space_pointer_spec.pointer_size
677 }
678
679 #[inline]
681 pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
682 if c == self.default_address_space {
683 return self.default_address_space_pointer_spec.pointer_size;
684 }
685
686 if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
687 e.1.pointer_size
688 } else {
689 {
::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
c));
};panic!("Use of unknown address space {c:?}");
690 }
691 }
692
693 #[inline]
695 pub fn pointer_offset(&self) -> Size {
696 self.default_address_space_pointer_spec.pointer_offset
697 }
698
699 #[inline]
701 pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
702 if c == self.default_address_space {
703 return self.default_address_space_pointer_spec.pointer_offset;
704 }
705
706 if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
707 e.1.pointer_offset
708 } else {
709 {
::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
c));
};panic!("Use of unknown address space {c:?}");
710 }
711 }
712
713 #[inline]
715 pub fn pointer_align(&self) -> AbiAlign {
716 AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
717 }
718
719 #[inline]
721 pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
722 AbiAlign::new(if c == self.default_address_space {
723 self.default_address_space_pointer_spec.pointer_align
724 } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
725 e.1.pointer_align
726 } else {
727 {
::core::panicking::panic_fmt(format_args!("Use of unknown address space {0:?}",
c));
};panic!("Use of unknown address space {c:?}");
728 })
729 }
730}
731
732pub trait HasDataLayout {
733 fn data_layout(&self) -> &TargetDataLayout;
734}
735
736impl HasDataLayout for TargetDataLayout {
737 #[inline]
738 fn data_layout(&self) -> &TargetDataLayout {
739 self
740 }
741}
742
743impl HasDataLayout for &TargetDataLayout {
745 #[inline]
746 fn data_layout(&self) -> &TargetDataLayout {
747 (**self).data_layout()
748 }
749}
750
751#[derive(#[automatically_derived]
impl ::core::marker::Copy for Endian { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Endian {
#[inline]
fn clone(&self) -> Endian { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Endian {
#[inline]
fn eq(&self, other: &Endian) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Endian {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq)]
753pub enum Endian {
754 Little,
755 Big,
756}
757
758impl Endian {
759 pub fn as_str(&self) -> &'static str {
760 match self {
761 Self::Little => "little",
762 Self::Big => "big",
763 }
764 }
765
766 #[cfg(feature = "nightly")]
767 pub fn desc_symbol(&self) -> Symbol {
768 match self {
769 Self::Little => sym::little,
770 Self::Big => sym::big,
771 }
772 }
773}
774
775impl fmt::Debug for Endian {
776 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
777 f.write_str(self.as_str())
778 }
779}
780
781impl FromStr for Endian {
782 type Err = String;
783
784 fn from_str(s: &str) -> Result<Self, Self::Err> {
785 match s {
786 "little" => Ok(Self::Little),
787 "big" => Ok(Self::Big),
788 _ => Err(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("unknown endian: \"{0}\"", s))
})format!(r#"unknown endian: "{s}""#)),
789 }
790 }
791}
792
793#[derive(#[automatically_derived]
impl ::core::marker::Copy for Size { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Size {
#[inline]
fn clone(&self) -> Size {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Size {
#[inline]
fn eq(&self, other: &Size) -> bool { self.raw == other.raw }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Size {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Size {
#[inline]
fn partial_cmp(&self, other: &Size)
-> ::core::option::Option<::core::cmp::Ordering> {
::core::cmp::PartialOrd::partial_cmp(&self.raw, &other.raw)
}
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Size {
#[inline]
fn cmp(&self, other: &Size) -> ::core::cmp::Ordering {
::core::cmp::Ord::cmp(&self.raw, &other.raw)
}
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Size {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.raw, state)
}
}Hash)]
795#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for Size {
fn encode(&self, __encoder: &mut __E) {
match *self {
Size { raw: ref __binding_0 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for Size {
fn decode(__decoder: &mut __D) -> Self {
Size { raw: ::rustc_serialize::Decodable::decode(__decoder) }
}
}
};Decodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for Size {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
Size { raw: ref __binding_0 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
796pub struct Size {
797 raw: u64,
798}
799
800#[cfg(feature = "nightly")]
801impl StableOrd for Size {
802 const CAN_USE_UNSTABLE_SORT: bool = true;
803
804 const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
807}
808
809impl fmt::Debug for Size {
811 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
812 f.write_fmt(format_args!("Size({0} bytes)", self.bytes()))write!(f, "Size({} bytes)", self.bytes())
813 }
814}
815
816impl Size {
817 pub const ZERO: Size = Size { raw: 0 };
818
819 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
822 let bits = bits.try_into().ok().unwrap();
823 Size { raw: bits.div_ceil(8) }
824 }
825
826 #[inline]
827 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
828 let bytes: u64 = bytes.try_into().ok().unwrap();
829 Size { raw: bytes }
830 }
831
832 #[inline]
833 pub fn bytes(self) -> u64 {
834 self.raw
835 }
836
837 #[inline]
838 pub fn bytes_usize(self) -> usize {
839 self.bytes().try_into().unwrap()
840 }
841
842 #[inline]
843 pub fn bits(self) -> u64 {
844 #[cold]
845 fn overflow(bytes: u64) -> ! {
846 {
::core::panicking::panic_fmt(format_args!("Size::bits: {0} bytes in bits doesn\'t fit in u64",
bytes));
}panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
847 }
848
849 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
850 }
851
852 #[inline]
853 pub fn bits_usize(self) -> usize {
854 self.bits().try_into().unwrap()
855 }
856
857 #[inline]
858 pub fn align_to(self, align: Align) -> Size {
859 let mask = align.bytes() - 1;
860 Size::from_bytes((self.bytes() + mask) & !mask)
861 }
862
863 #[inline]
864 pub fn is_aligned(self, align: Align) -> bool {
865 let mask = align.bytes() - 1;
866 self.bytes() & mask == 0
867 }
868
869 #[inline]
870 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
871 let dl = cx.data_layout();
872
873 let bytes = self.bytes().checked_add(offset.bytes())?;
874
875 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
876 }
877
878 #[inline]
879 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
880 let dl = cx.data_layout();
881
882 let bytes = self.bytes().checked_mul(count)?;
883 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
884 }
885
886 #[inline]
889 pub fn sign_extend(self, value: u128) -> i128 {
890 let size = self.bits();
891 if size == 0 {
892 return 0;
894 }
895 let shift = 128 - size;
897 ((value << shift) as i128) >> shift
900 }
901
902 #[inline]
904 pub fn truncate(self, value: u128) -> u128 {
905 let size = self.bits();
906 if size == 0 {
907 return 0;
909 }
910 let shift = 128 - size;
911 (value << shift) >> shift
913 }
914
915 #[inline]
916 pub fn signed_int_min(&self) -> i128 {
917 self.sign_extend(1_u128 << (self.bits() - 1))
918 }
919
920 #[inline]
921 pub fn signed_int_max(&self) -> i128 {
922 i128::MAX >> (128 - self.bits())
923 }
924
925 #[inline]
926 pub fn unsigned_int_max(&self) -> u128 {
927 u128::MAX >> (128 - self.bits())
928 }
929}
930
931impl Add for Size {
935 type Output = Size;
936 #[inline]
937 fn add(self, other: Size) -> Size {
938 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
939 {
::core::panicking::panic_fmt(format_args!("Size::add: {0} + {1} doesn\'t fit in u64",
self.bytes(), other.bytes()));
}panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
940 }))
941 }
942}
943
944impl Sub for Size {
945 type Output = Size;
946 #[inline]
947 fn sub(self, other: Size) -> Size {
948 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
949 {
::core::panicking::panic_fmt(format_args!("Size::sub: {0} - {1} would result in negative size",
self.bytes(), other.bytes()));
}panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
950 }))
951 }
952}
953
954impl Mul<Size> for u64 {
955 type Output = Size;
956 #[inline]
957 fn mul(self, size: Size) -> Size {
958 size * self
959 }
960}
961
962impl Mul<u64> for Size {
963 type Output = Size;
964 #[inline]
965 fn mul(self, count: u64) -> Size {
966 match self.bytes().checked_mul(count) {
967 Some(bytes) => Size::from_bytes(bytes),
968 None => {
::core::panicking::panic_fmt(format_args!("Size::mul: {0} * {1} doesn\'t fit in u64",
self.bytes(), count));
}panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
969 }
970 }
971}
972
973impl AddAssign for Size {
974 #[inline]
975 fn add_assign(&mut self, other: Size) {
976 *self = *self + other;
977 }
978}
979
980#[cfg(feature = "nightly")]
981impl Step for Size {
982 #[inline]
983 fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
984 u64::steps_between(&start.bytes(), &end.bytes())
985 }
986
987 #[inline]
988 fn forward_checked(start: Self, count: usize) -> Option<Self> {
989 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
990 }
991
992 #[inline]
993 fn forward(start: Self, count: usize) -> Self {
994 Self::from_bytes(u64::forward(start.bytes(), count))
995 }
996
997 #[inline]
998 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
999 Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
1000 }
1001
1002 #[inline]
1003 fn backward_checked(start: Self, count: usize) -> Option<Self> {
1004 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
1005 }
1006
1007 #[inline]
1008 fn backward(start: Self, count: usize) -> Self {
1009 Self::from_bytes(u64::backward(start.bytes(), count))
1010 }
1011
1012 #[inline]
1013 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
1014 Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
1015 }
1016}
1017
1018#[derive(#[automatically_derived]
impl ::core::marker::Copy for Align { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Align {
#[inline]
fn clone(&self) -> Align {
let _: ::core::clone::AssertParamIsClone<u8>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Align {
#[inline]
fn eq(&self, other: &Align) -> bool { self.pow2 == other.pow2 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Align {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u8>;
}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Align {
#[inline]
fn partial_cmp(&self, other: &Align)
-> ::core::option::Option<::core::cmp::Ordering> {
::core::cmp::PartialOrd::partial_cmp(&self.pow2, &other.pow2)
}
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Align {
#[inline]
fn cmp(&self, other: &Align) -> ::core::cmp::Ordering {
::core::cmp::Ord::cmp(&self.pow2, &other.pow2)
}
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Align {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.pow2, state)
}
}Hash)]
1020#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for Align {
fn encode(&self, __encoder: &mut __E) {
match *self {
Align { pow2: ref __binding_0 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for Align {
fn decode(__decoder: &mut __D) -> Self {
Align {
pow2: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for Align {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
Align { pow2: ref __binding_0 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1021pub struct Align {
1022 pow2: u8,
1023}
1024
1025impl fmt::Debug for Align {
1027 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1028 f.write_fmt(format_args!("Align({0} bytes)", self.bytes()))write!(f, "Align({} bytes)", self.bytes())
1029 }
1030}
1031
1032#[derive(#[automatically_derived]
impl ::core::clone::Clone for AlignFromBytesError {
#[inline]
fn clone(&self) -> AlignFromBytesError {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for AlignFromBytesError { }Copy)]
1033pub enum AlignFromBytesError {
1034 NotPowerOfTwo(u64),
1035 TooLarge(u64),
1036}
1037
1038impl fmt::Debug for AlignFromBytesError {
1039 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1040 fmt::Display::fmt(self, f)
1041 }
1042}
1043
1044impl fmt::Display for AlignFromBytesError {
1045 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1046 match self {
1047 AlignFromBytesError::NotPowerOfTwo(align) => f.write_fmt(format_args!("{0} is not a power of 2", align))write!(f, "{align} is not a power of 2"),
1048 AlignFromBytesError::TooLarge(align) => f.write_fmt(format_args!("{0} is too large", align))write!(f, "{align} is too large"),
1049 }
1050 }
1051}
1052
1053impl Align {
1054 pub const ONE: Align = Align { pow2: 0 };
1055 pub const EIGHT: Align = Align { pow2: 3 };
1056 pub const MAX: Align = Align { pow2: 29 };
1058
1059 #[inline]
1061 pub fn max_for_target(tdl: &TargetDataLayout) -> Align {
1062 let pointer_bits = tdl.pointer_size().bits();
1063 if let Ok(pointer_bits) = u8::try_from(pointer_bits)
1064 && pointer_bits <= Align::MAX.pow2
1065 {
1066 Align { pow2: pointer_bits - 1 }
1067 } else {
1068 Align::MAX
1069 }
1070 }
1071
1072 #[inline]
1073 pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1074 Align::from_bytes(Size::from_bits(bits).bytes())
1075 }
1076
1077 #[inline]
1078 pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1079 if align == 0 {
1081 return Ok(Align::ONE);
1082 }
1083
1084 #[cold]
1085 const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1086 AlignFromBytesError::NotPowerOfTwo(align)
1087 }
1088
1089 #[cold]
1090 const fn too_large(align: u64) -> AlignFromBytesError {
1091 AlignFromBytesError::TooLarge(align)
1092 }
1093
1094 let tz = align.trailing_zeros();
1095 if align != (1 << tz) {
1096 return Err(not_power_of_2(align));
1097 }
1098
1099 let pow2 = tz as u8;
1100 if pow2 > Self::MAX.pow2 {
1101 return Err(too_large(align));
1102 }
1103
1104 Ok(Align { pow2 })
1105 }
1106
1107 #[inline]
1108 pub const fn bytes(self) -> u64 {
1109 1 << self.pow2
1110 }
1111
1112 #[inline]
1113 pub fn bytes_usize(self) -> usize {
1114 self.bytes().try_into().unwrap()
1115 }
1116
1117 #[inline]
1118 pub const fn bits(self) -> u64 {
1119 self.bytes() * 8
1120 }
1121
1122 #[inline]
1123 pub fn bits_usize(self) -> usize {
1124 self.bits().try_into().unwrap()
1125 }
1126
1127 #[inline]
1132 pub fn max_aligned_factor(size: Size) -> Align {
1133 Align { pow2: size.bytes().trailing_zeros() as u8 }
1134 }
1135
1136 #[inline]
1138 pub fn restrict_for_offset(self, size: Size) -> Align {
1139 self.min(Align::max_aligned_factor(size))
1140 }
1141}
1142
1143#[derive(#[automatically_derived]
impl ::core::marker::Copy for AbiAlign { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AbiAlign {
#[inline]
fn clone(&self) -> AbiAlign {
let _: ::core::clone::AssertParamIsClone<Align>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AbiAlign {
#[inline]
fn eq(&self, other: &AbiAlign) -> bool { self.abi == other.abi }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AbiAlign {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Align>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for AbiAlign {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.abi, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for AbiAlign {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f, "AbiAlign",
"abi", &&self.abi)
}
}Debug)]
1153#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for AbiAlign {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
AbiAlign { abi: ref __binding_0 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1154pub struct AbiAlign {
1155 pub abi: Align,
1156}
1157
1158impl AbiAlign {
1159 #[inline]
1160 pub fn new(align: Align) -> AbiAlign {
1161 AbiAlign { abi: align }
1162 }
1163
1164 #[inline]
1165 pub fn min(self, other: AbiAlign) -> AbiAlign {
1166 AbiAlign { abi: self.abi.min(other.abi) }
1167 }
1168
1169 #[inline]
1170 pub fn max(self, other: AbiAlign) -> AbiAlign {
1171 AbiAlign { abi: self.abi.max(other.abi) }
1172 }
1173}
1174
1175impl Deref for AbiAlign {
1176 type Target = Align;
1177
1178 fn deref(&self) -> &Self::Target {
1179 &self.abi
1180 }
1181}
1182
1183#[derive(#[automatically_derived]
impl ::core::marker::Copy for Integer { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Integer {
#[inline]
fn clone(&self) -> Integer { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Integer {
#[inline]
fn eq(&self, other: &Integer) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Integer {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Integer {
#[inline]
fn partial_cmp(&self, other: &Integer)
-> ::core::option::Option<::core::cmp::Ordering> {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
}
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Integer {
#[inline]
fn cmp(&self, other: &Integer) -> ::core::cmp::Ordering {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
}
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Integer {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Integer {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
Integer::I8 => "I8",
Integer::I16 => "I16",
Integer::I32 => "I32",
Integer::I64 => "I64",
Integer::I128 => "I128",
})
}
}Debug)]
1185#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for Integer {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
Integer::I8 => { 0usize }
Integer::I16 => { 1usize }
Integer::I32 => { 2usize }
Integer::I64 => { 3usize }
Integer::I128 => { 4usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
Integer::I8 => {}
Integer::I16 => {}
Integer::I32 => {}
Integer::I64 => {}
Integer::I128 => {}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for Integer {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { Integer::I8 }
1usize => { Integer::I16 }
2usize => { Integer::I32 }
3usize => { Integer::I64 }
4usize => { Integer::I128 }
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `Integer`, expected 0..5, actual {0}",
n));
}
}
}
}
};Decodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for Integer {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
Integer::I8 => {}
Integer::I16 => {}
Integer::I32 => {}
Integer::I64 => {}
Integer::I128 => {}
}
}
}
};StableHash))]
1186pub enum Integer {
1187 I8,
1188 I16,
1189 I32,
1190 I64,
1191 I128,
1192}
1193
1194impl Integer {
1195 pub fn int_ty_str(self) -> &'static str {
1196 use Integer::*;
1197 match self {
1198 I8 => "i8",
1199 I16 => "i16",
1200 I32 => "i32",
1201 I64 => "i64",
1202 I128 => "i128",
1203 }
1204 }
1205
1206 pub fn uint_ty_str(self) -> &'static str {
1207 use Integer::*;
1208 match self {
1209 I8 => "u8",
1210 I16 => "u16",
1211 I32 => "u32",
1212 I64 => "u64",
1213 I128 => "u128",
1214 }
1215 }
1216
1217 #[inline]
1218 pub fn size(self) -> Size {
1219 use Integer::*;
1220 match self {
1221 I8 => Size::from_bytes(1),
1222 I16 => Size::from_bytes(2),
1223 I32 => Size::from_bytes(4),
1224 I64 => Size::from_bytes(8),
1225 I128 => Size::from_bytes(16),
1226 }
1227 }
1228
1229 pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1231 let dl = cx.data_layout();
1232
1233 match ity {
1234 IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1235 IntegerType::Fixed(x, _) => x,
1236 }
1237 }
1238
1239 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1240 use Integer::*;
1241 let dl = cx.data_layout();
1242
1243 AbiAlign::new(match self {
1244 I8 => dl.i8_align,
1245 I16 => dl.i16_align,
1246 I32 => dl.i32_align,
1247 I64 => dl.i64_align,
1248 I128 => dl.i128_align,
1249 })
1250 }
1251
1252 #[inline]
1254 pub fn signed_max(self) -> i128 {
1255 use Integer::*;
1256 match self {
1257 I8 => i8::MAX as i128,
1258 I16 => i16::MAX as i128,
1259 I32 => i32::MAX as i128,
1260 I64 => i64::MAX as i128,
1261 I128 => i128::MAX,
1262 }
1263 }
1264
1265 #[inline]
1267 pub fn signed_min(self) -> i128 {
1268 use Integer::*;
1269 match self {
1270 I8 => i8::MIN as i128,
1271 I16 => i16::MIN as i128,
1272 I32 => i32::MIN as i128,
1273 I64 => i64::MIN as i128,
1274 I128 => i128::MIN,
1275 }
1276 }
1277
1278 #[inline]
1280 pub fn fit_signed(x: i128) -> Integer {
1281 use Integer::*;
1282 match x {
1283 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1284 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1285 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1286 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1287 _ => I128,
1288 }
1289 }
1290
1291 #[inline]
1293 pub fn fit_unsigned(x: u128) -> Integer {
1294 use Integer::*;
1295 match x {
1296 0..=0x0000_0000_0000_00ff => I8,
1297 0..=0x0000_0000_0000_ffff => I16,
1298 0..=0x0000_0000_ffff_ffff => I32,
1299 0..=0xffff_ffff_ffff_ffff => I64,
1300 _ => I128,
1301 }
1302 }
1303
1304 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1306 use Integer::*;
1307 let dl = cx.data_layout();
1308
1309 [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1310 wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1311 })
1312 }
1313
1314 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1316 use Integer::*;
1317 let dl = cx.data_layout();
1318
1319 for candidate in [I64, I32, I16] {
1321 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1322 return candidate;
1323 }
1324 }
1325 I8
1326 }
1327
1328 #[inline]
1331 pub fn from_size(size: Size) -> Result<Self, String> {
1332 match size.bits() {
1333 8 => Ok(Integer::I8),
1334 16 => Ok(Integer::I16),
1335 32 => Ok(Integer::I32),
1336 64 => Ok(Integer::I64),
1337 128 => Ok(Integer::I128),
1338 _ => Err(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("rust does not support integers with {0} bits",
size.bits()))
})format!("rust does not support integers with {} bits", size.bits())),
1339 }
1340 }
1341}
1342
1343#[derive(#[automatically_derived]
impl ::core::marker::Copy for Float { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Float {
#[inline]
fn clone(&self) -> Float { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Float {
#[inline]
fn eq(&self, other: &Float) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Float {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for Float {
#[inline]
fn partial_cmp(&self, other: &Float)
-> ::core::option::Option<::core::cmp::Ordering> {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
::core::cmp::PartialOrd::partial_cmp(&__self_discr, &__arg1_discr)
}
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for Float {
#[inline]
fn cmp(&self, other: &Float) -> ::core::cmp::Ordering {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
::core::cmp::Ord::cmp(&__self_discr, &__arg1_discr)
}
}Ord, #[automatically_derived]
impl ::core::hash::Hash for Float {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Float {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
Float::F16 => "F16",
Float::F32 => "F32",
Float::F64 => "F64",
Float::F128 => "F128",
})
}
}Debug)]
1345#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for Float {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
Float::F16 => {}
Float::F32 => {}
Float::F64 => {}
Float::F128 => {}
}
}
}
};StableHash))]
1346pub enum Float {
1347 F16,
1348 F32,
1349 F64,
1350 F128,
1351}
1352
1353impl Float {
1354 pub fn size(self) -> Size {
1355 use Float::*;
1356
1357 match self {
1358 F16 => Size::from_bits(16),
1359 F32 => Size::from_bits(32),
1360 F64 => Size::from_bits(64),
1361 F128 => Size::from_bits(128),
1362 }
1363 }
1364
1365 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1366 use Float::*;
1367 let dl = cx.data_layout();
1368
1369 AbiAlign::new(match self {
1370 F16 => dl.f16_align,
1371 F32 => dl.f32_align,
1372 F64 => dl.f64_align,
1373 F128 => dl.f128_align,
1374 })
1375 }
1376}
1377
1378#[derive(#[automatically_derived]
impl ::core::marker::Copy for Primitive { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Primitive {
#[inline]
fn clone(&self) -> Primitive {
let _: ::core::clone::AssertParamIsClone<Integer>;
let _: ::core::clone::AssertParamIsClone<bool>;
let _: ::core::clone::AssertParamIsClone<Float>;
let _: ::core::clone::AssertParamIsClone<AddressSpace>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for Primitive {
#[inline]
fn eq(&self, other: &Primitive) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(Primitive::Int(__self_0, __self_1),
Primitive::Int(__arg1_0, __arg1_1)) =>
__self_1 == __arg1_1 && __self_0 == __arg1_0,
(Primitive::Float(__self_0), Primitive::Float(__arg1_0)) =>
__self_0 == __arg1_0,
(Primitive::Pointer(__self_0), Primitive::Pointer(__arg1_0))
=> __self_0 == __arg1_0,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Primitive {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Integer>;
let _: ::core::cmp::AssertParamIsEq<bool>;
let _: ::core::cmp::AssertParamIsEq<Float>;
let _: ::core::cmp::AssertParamIsEq<AddressSpace>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Primitive {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
Primitive::Int(__self_0, __self_1) => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
Primitive::Float(__self_0) =>
::core::hash::Hash::hash(__self_0, state),
Primitive::Pointer(__self_0) =>
::core::hash::Hash::hash(__self_0, state),
}
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Primitive {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
Primitive::Int(__self_0, __self_1) =>
::core::fmt::Formatter::debug_tuple_field2_finish(f, "Int",
__self_0, &__self_1),
Primitive::Float(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Float",
&__self_0),
Primitive::Pointer(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"Pointer", &__self_0),
}
}
}Debug)]
1380#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for Primitive
{
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
Primitive::Int(ref __binding_0, ref __binding_1) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
Primitive::Float(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
Primitive::Pointer(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1381pub enum Primitive {
1382 Int(Integer, bool),
1390 Float(Float),
1391 Pointer(AddressSpace),
1392}
1393
1394impl Primitive {
1395 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1396 use Primitive::*;
1397 let dl = cx.data_layout();
1398
1399 match self {
1400 Int(i, _) => i.size(),
1401 Float(f) => f.size(),
1402 Pointer(a) => dl.pointer_size_in(a),
1403 }
1404 }
1405
1406 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1407 use Primitive::*;
1408 let dl = cx.data_layout();
1409
1410 match self {
1411 Int(i, _) => i.align(dl),
1412 Float(f) => f.align(dl),
1413 Pointer(a) => dl.pointer_align_in(a),
1414 }
1415 }
1416}
1417
1418#[derive(#[automatically_derived]
impl ::core::clone::Clone for WrappingRange {
#[inline]
fn clone(&self) -> WrappingRange {
let _: ::core::clone::AssertParamIsClone<u128>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for WrappingRange { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for WrappingRange {
#[inline]
fn eq(&self, other: &WrappingRange) -> bool {
self.start == other.start && self.end == other.end
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for WrappingRange {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u128>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for WrappingRange {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.start, state);
::core::hash::Hash::hash(&self.end, state)
}
}Hash)]
1428#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for
WrappingRange {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
WrappingRange { start: ref __binding_0, end: ref __binding_1
} => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1429pub struct WrappingRange {
1430 pub start: u128,
1431 pub end: u128,
1432}
1433
1434impl WrappingRange {
1435 pub fn full(size: Size) -> Self {
1436 Self { start: 0, end: size.unsigned_int_max() }
1437 }
1438
1439 #[inline(always)]
1441 pub fn contains(&self, v: u128) -> bool {
1442 if self.start <= self.end {
1443 self.start <= v && v <= self.end
1444 } else {
1445 self.start <= v || v <= self.end
1446 }
1447 }
1448
1449 #[inline(always)]
1452 pub fn contains_range(&self, other: Self, size: Size) -> bool {
1453 if self.is_full_for(size) {
1454 true
1455 } else {
1456 let trunc = |x| size.truncate(x);
1457
1458 let delta = self.start;
1459 let max = trunc(self.end.wrapping_sub(delta));
1460
1461 let other_start = trunc(other.start.wrapping_sub(delta));
1462 let other_end = trunc(other.end.wrapping_sub(delta));
1463
1464 (other_start <= other_end) && (other_end <= max)
1468 }
1469 }
1470
1471 #[inline(always)]
1473 fn with_start(mut self, start: u128) -> Self {
1474 self.start = start;
1475 self
1476 }
1477
1478 #[inline(always)]
1480 fn with_end(mut self, end: u128) -> Self {
1481 self.end = end;
1482 self
1483 }
1484
1485 #[inline]
1491 fn is_full_for(&self, size: Size) -> bool {
1492 let max_value = size.unsigned_int_max();
1493 if true {
if !(self.start <= max_value && self.end <= max_value) {
::core::panicking::panic("assertion failed: self.start <= max_value && self.end <= max_value")
};
};debug_assert!(self.start <= max_value && self.end <= max_value);
1494 self.start == (self.end.wrapping_add(1) & max_value)
1495 }
1496
1497 #[inline]
1503 pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1504 if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1505 }
1506
1507 #[inline]
1516 pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1517 if self.is_full_for(size) {
1518 Err(..)
1519 } else {
1520 let start: i128 = size.sign_extend(self.start);
1521 let end: i128 = size.sign_extend(self.end);
1522 Ok(start <= end)
1523 }
1524 }
1525}
1526
1527impl fmt::Debug for WrappingRange {
1528 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1529 if self.start > self.end {
1530 fmt.write_fmt(format_args!("(..={0}) | ({1}..)", self.end, self.start))write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1531 } else {
1532 fmt.write_fmt(format_args!("{0}..={1}", self.start, self.end))write!(fmt, "{}..={}", self.start, self.end)?;
1533 }
1534 Ok(())
1535 }
1536}
1537
1538#[derive(#[automatically_derived]
impl ::core::clone::Clone for Scalar {
#[inline]
fn clone(&self) -> Scalar {
let _: ::core::clone::AssertParamIsClone<Primitive>;
let _: ::core::clone::AssertParamIsClone<WrappingRange>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Scalar { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Scalar {
#[inline]
fn eq(&self, other: &Scalar) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(Scalar::Initialized { value: __self_0, valid_range: __self_1
}, Scalar::Initialized {
value: __arg1_0, valid_range: __arg1_1 }) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
(Scalar::Union { value: __self_0 }, Scalar::Union {
value: __arg1_0 }) => __self_0 == __arg1_0,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Scalar {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Primitive>;
let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Scalar {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
{
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
Scalar::Union { value: __self_0 } =>
::core::hash::Hash::hash(__self_0, state),
}
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Scalar {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
Scalar::Initialized { value: __self_0, valid_range: __self_1 } =>
::core::fmt::Formatter::debug_struct_field2_finish(f,
"Initialized", "value", __self_0, "valid_range", &__self_1),
Scalar::Union { value: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f, "Union",
"value", &__self_0),
}
}
}Debug)]
1540#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for Scalar {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
Scalar::Initialized {
value: ref __binding_0, valid_range: ref __binding_1 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
Scalar::Union { value: ref __binding_0 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1541pub enum Scalar {
1542 Initialized {
1543 value: Primitive,
1544
1545 valid_range: WrappingRange,
1549 },
1550 Union {
1551 value: Primitive,
1557 },
1558}
1559
1560impl Scalar {
1561 #[inline]
1562 pub fn is_bool(&self) -> bool {
1563 use Integer::*;
1564 #[allow(non_exhaustive_omitted_patterns)] match self {
Scalar::Initialized {
value: Primitive::Int(I8, false),
valid_range: WrappingRange { start: 0, end: 1 } } => true,
_ => false,
}matches!(
1565 self,
1566 Scalar::Initialized {
1567 value: Primitive::Int(I8, false),
1568 valid_range: WrappingRange { start: 0, end: 1 }
1569 }
1570 )
1571 }
1572
1573 pub fn primitive(&self) -> Primitive {
1576 match *self {
1577 Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1578 }
1579 }
1580
1581 pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1582 self.primitive().align(cx)
1583 }
1584
1585 pub fn size(self, cx: &impl HasDataLayout) -> Size {
1586 self.primitive().size(cx)
1587 }
1588
1589 #[inline]
1590 pub fn to_union(&self) -> Self {
1591 Self::Union { value: self.primitive() }
1592 }
1593
1594 #[inline]
1595 pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1596 match *self {
1597 Scalar::Initialized { valid_range, .. } => valid_range,
1598 Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1599 }
1600 }
1601
1602 #[inline]
1603 pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1606 match self {
1607 Scalar::Initialized { valid_range, .. } => valid_range,
1608 Scalar::Union { .. } => {
::core::panicking::panic_fmt(format_args!("cannot change the valid range of a union"));
}panic!("cannot change the valid range of a union"),
1609 }
1610 }
1611
1612 #[inline]
1615 pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1616 match *self {
1617 Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1618 Scalar::Union { .. } => true,
1619 }
1620 }
1621
1622 #[inline]
1624 pub fn is_uninit_valid(&self) -> bool {
1625 match *self {
1626 Scalar::Initialized { .. } => false,
1627 Scalar::Union { .. } => true,
1628 }
1629 }
1630
1631 #[inline]
1633 pub fn is_signed(&self) -> bool {
1634 match self.primitive() {
1635 Primitive::Int(_, signed) => signed,
1636 _ => false,
1637 }
1638 }
1639}
1640
1641#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
FieldsShape<FieldIdx> {
#[inline]
fn eq(&self, other: &FieldsShape<FieldIdx>) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(FieldsShape::Union(__self_0), FieldsShape::Union(__arg1_0))
=> __self_0 == __arg1_0,
(FieldsShape::Array { stride: __self_0, count: __self_1 },
FieldsShape::Array { stride: __arg1_0, count: __arg1_1 }) =>
__self_1 == __arg1_1 && __self_0 == __arg1_0,
(FieldsShape::Arbitrary {
offsets: __self_0, in_memory_order: __self_1 },
FieldsShape::Arbitrary {
offsets: __arg1_0, in_memory_order: __arg1_1 }) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
FieldsShape<FieldIdx> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<NonZeroUsize>;
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<u64>;
let _: ::core::cmp::AssertParamIsEq<IndexVec<FieldIdx, Size>>;
let _: ::core::cmp::AssertParamIsEq<IndexVec<u32, FieldIdx>>;
}
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
FieldsShape<FieldIdx> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
FieldsShape::Union(__self_0) =>
::core::hash::Hash::hash(__self_0, state),
FieldsShape::Array { stride: __self_0, count: __self_1 } => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
FieldsShape::Arbitrary {
offsets: __self_0, in_memory_order: __self_1 } => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
_ => {}
}
}
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
FieldsShape<FieldIdx> {
#[inline]
fn clone(&self) -> FieldsShape<FieldIdx> {
match self {
FieldsShape::Primitive => FieldsShape::Primitive,
FieldsShape::Union(__self_0) =>
FieldsShape::Union(::core::clone::Clone::clone(__self_0)),
FieldsShape::Array { stride: __self_0, count: __self_1 } =>
FieldsShape::Array {
stride: ::core::clone::Clone::clone(__self_0),
count: ::core::clone::Clone::clone(__self_1),
},
FieldsShape::Arbitrary {
offsets: __self_0, in_memory_order: __self_1 } =>
FieldsShape::Arbitrary {
offsets: ::core::clone::Clone::clone(__self_0),
in_memory_order: ::core::clone::Clone::clone(__self_1),
},
}
}
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
FieldsShape<FieldIdx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
FieldsShape::Primitive =>
::core::fmt::Formatter::write_str(f, "Primitive"),
FieldsShape::Union(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Union",
&__self_0),
FieldsShape::Array { stride: __self_0, count: __self_1 } =>
::core::fmt::Formatter::debug_struct_field2_finish(f, "Array",
"stride", __self_0, "count", &__self_1),
FieldsShape::Arbitrary {
offsets: __self_0, in_memory_order: __self_1 } =>
::core::fmt::Formatter::debug_struct_field2_finish(f,
"Arbitrary", "offsets", __self_0, "in_memory_order",
&__self_1),
}
}
}Debug)]
1644#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<FieldIdx: Idx> ::rustc_data_structures::stable_hasher::StableHash
for FieldsShape<FieldIdx> where
FieldIdx: ::rustc_data_structures::stable_hasher::StableHash {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
FieldsShape::Primitive => {}
FieldsShape::Union(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
FieldsShape::Array {
stride: ref __binding_0, count: ref __binding_1 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
FieldsShape::Arbitrary {
offsets: ref __binding_0, in_memory_order: ref __binding_1 }
=> {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1645pub enum FieldsShape<FieldIdx: Idx> {
1646 Primitive,
1648
1649 Union(NonZeroUsize),
1651
1652 Array { stride: Size, count: u64 },
1654
1655 Arbitrary {
1663 offsets: IndexVec<FieldIdx, Size>,
1668
1669 in_memory_order: IndexVec<u32, FieldIdx>,
1677 },
1678}
1679
1680impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1681 #[inline]
1682 pub fn count(&self) -> usize {
1683 match *self {
1684 FieldsShape::Primitive => 0,
1685 FieldsShape::Union(count) => count.get(),
1686 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1687 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1688 }
1689 }
1690
1691 #[inline]
1692 pub fn offset(&self, i: usize) -> Size {
1693 match *self {
1694 FieldsShape::Primitive => {
1695 {
::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
format_args!("FieldsShape::offset: `Primitive`s have no fields")));
}unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1696 }
1697 FieldsShape::Union(count) => {
1698 if !(i < count.get()) {
{
::core::panicking::panic_fmt(format_args!("tried to access field {0} of union with {1} fields",
i, count));
}
};assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1699 Size::ZERO
1700 }
1701 FieldsShape::Array { stride, count } => {
1702 let i = u64::try_from(i).unwrap();
1703 if !(i < count) {
{
::core::panicking::panic_fmt(format_args!("tried to access field {0} of array with {1} fields",
i, count));
}
};assert!(i < count, "tried to access field {i} of array with {count} fields");
1704 stride * i
1705 }
1706 FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1707 }
1708 }
1709
1710 #[inline]
1712 pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1713 let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1717
1718 (0..pseudofield_count).map(move |i| match self {
1719 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1720 FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
1721 })
1722 }
1723}
1724
1725#[derive(#[automatically_derived]
impl ::core::marker::Copy for AddressSpace { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AddressSpace {
#[inline]
fn clone(&self) -> AddressSpace {
let _: ::core::clone::AssertParamIsClone<u32>;
*self
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AddressSpace {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "AddressSpace",
&&self.0)
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for AddressSpace {
#[inline]
fn eq(&self, other: &AddressSpace) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for AddressSpace {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u32>;
}
}Eq, #[automatically_derived]
impl ::core::cmp::PartialOrd for AddressSpace {
#[inline]
fn partial_cmp(&self, other: &AddressSpace)
-> ::core::option::Option<::core::cmp::Ordering> {
::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
}
}PartialOrd, #[automatically_derived]
impl ::core::cmp::Ord for AddressSpace {
#[inline]
fn cmp(&self, other: &AddressSpace) -> ::core::cmp::Ordering {
::core::cmp::Ord::cmp(&self.0, &other.0)
}
}Ord, #[automatically_derived]
impl ::core::hash::Hash for AddressSpace {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}Hash)]
1729#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for
AddressSpace {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
AddressSpace(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1730pub struct AddressSpace(pub u32);
1731
1732impl AddressSpace {
1733 pub const ZERO: Self = AddressSpace(0);
1735 pub const GPU_WORKGROUP: Self = AddressSpace(3);
1738}
1739
1740#[derive(#[automatically_derived]
impl ::core::clone::Clone for NumScalableVectors {
#[inline]
fn clone(&self) -> NumScalableVectors {
let _: ::core::clone::AssertParamIsClone<u8>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for NumScalableVectors { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for NumScalableVectors {
#[inline]
fn eq(&self, other: &NumScalableVectors) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for NumScalableVectors {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u8>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for NumScalableVectors {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for NumScalableVectors {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"NumScalableVectors", &&self.0)
}
}Debug)]
1742#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for
NumScalableVectors {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
NumScalableVectors(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1743pub struct NumScalableVectors(pub u8);
1744
1745impl NumScalableVectors {
1746 pub fn for_non_tuple() -> Self {
1748 NumScalableVectors(1)
1749 }
1750
1751 pub fn from_field_count(count: usize) -> Option<Self> {
1755 match count {
1756 2..8 => Some(NumScalableVectors(count as u8)),
1757 _ => None,
1758 }
1759 }
1760}
1761
1762#[cfg(feature = "nightly")]
1763impl IntoDiagArg for NumScalableVectors {
1764 fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue {
1765 DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 {
1766 0 => {
::core::panicking::panic_fmt(format_args!("`NumScalableVectors(0)` is illformed"));
}panic!("`NumScalableVectors(0)` is illformed"),
1767 1 => "one",
1768 2 => "two",
1769 3 => "three",
1770 4 => "four",
1771 5 => "five",
1772 6 => "six",
1773 7 => "seven",
1774 8 => "eight",
1775 _ => {
::core::panicking::panic_fmt(format_args!("`NumScalableVectors(N)` for N>8 is illformed"));
}panic!("`NumScalableVectors(N)` for N>8 is illformed"),
1776 }))
1777 }
1778}
1779
1780#[derive(#[automatically_derived]
impl ::core::clone::Clone for BackendRepr {
#[inline]
fn clone(&self) -> BackendRepr {
let _: ::core::clone::AssertParamIsClone<Scalar>;
let _: ::core::clone::AssertParamIsClone<u64>;
let _: ::core::clone::AssertParamIsClone<NumScalableVectors>;
let _: ::core::clone::AssertParamIsClone<bool>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BackendRepr { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BackendRepr {
#[inline]
fn eq(&self, other: &BackendRepr) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(BackendRepr::Scalar(__self_0), BackendRepr::Scalar(__arg1_0))
=> __self_0 == __arg1_0,
(BackendRepr::ScalarPair(__self_0, __self_1),
BackendRepr::ScalarPair(__arg1_0, __arg1_1)) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
(BackendRepr::SimdScalableVector {
element: __self_0,
count: __self_1,
number_of_vectors: __self_2 },
BackendRepr::SimdScalableVector {
element: __arg1_0,
count: __arg1_1,
number_of_vectors: __arg1_2 }) =>
__self_1 == __arg1_1 && __self_0 == __arg1_0 &&
__self_2 == __arg1_2,
(BackendRepr::SimdVector { element: __self_0, count: __self_1
}, BackendRepr::SimdVector {
element: __arg1_0, count: __arg1_1 }) =>
__self_1 == __arg1_1 && __self_0 == __arg1_0,
(BackendRepr::Memory { sized: __self_0 },
BackendRepr::Memory { sized: __arg1_0 }) =>
__self_0 == __arg1_0,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for BackendRepr {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Scalar>;
let _: ::core::cmp::AssertParamIsEq<u64>;
let _: ::core::cmp::AssertParamIsEq<NumScalableVectors>;
let _: ::core::cmp::AssertParamIsEq<bool>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for BackendRepr {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
BackendRepr::Scalar(__self_0) =>
::core::hash::Hash::hash(__self_0, state),
BackendRepr::ScalarPair(__self_0, __self_1) => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
BackendRepr::SimdScalableVector {
element: __self_0,
count: __self_1,
number_of_vectors: __self_2 } => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state);
::core::hash::Hash::hash(__self_2, state)
}
BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
{
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state)
}
BackendRepr::Memory { sized: __self_0 } =>
::core::hash::Hash::hash(__self_0, state),
}
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for BackendRepr {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
BackendRepr::Scalar(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Scalar",
&__self_0),
BackendRepr::ScalarPair(__self_0, __self_1) =>
::core::fmt::Formatter::debug_tuple_field2_finish(f,
"ScalarPair", __self_0, &__self_1),
BackendRepr::SimdScalableVector {
element: __self_0,
count: __self_1,
number_of_vectors: __self_2 } =>
::core::fmt::Formatter::debug_struct_field3_finish(f,
"SimdScalableVector", "element", __self_0, "count",
__self_1, "number_of_vectors", &__self_2),
BackendRepr::SimdVector { element: __self_0, count: __self_1 } =>
::core::fmt::Formatter::debug_struct_field2_finish(f,
"SimdVector", "element", __self_0, "count", &__self_1),
BackendRepr::Memory { sized: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"Memory", "sized", &__self_0),
}
}
}Debug)]
1791#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for
BackendRepr {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
BackendRepr::Scalar(ref __binding_0) => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
BackendRepr::ScalarPair(ref __binding_0, ref __binding_1) =>
{
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
BackendRepr::SimdScalableVector {
element: ref __binding_0,
count: ref __binding_1,
number_of_vectors: ref __binding_2 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
{ __binding_2.stable_hash(__hcx, __hasher); }
}
BackendRepr::SimdVector {
element: ref __binding_0, count: ref __binding_1 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
}
BackendRepr::Memory { sized: ref __binding_0 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1792pub enum BackendRepr {
1793 Scalar(Scalar),
1794 ScalarPair(Scalar, Scalar),
1795 SimdScalableVector {
1796 element: Scalar,
1797 count: u64,
1798 number_of_vectors: NumScalableVectors,
1799 },
1800 SimdVector {
1801 element: Scalar,
1802 count: u64,
1803 },
1804 Memory {
1806 sized: bool,
1808 },
1809}
1810
1811impl BackendRepr {
1812 #[inline]
1814 pub fn is_unsized(&self) -> bool {
1815 match *self {
1816 BackendRepr::Scalar(_)
1817 | BackendRepr::ScalarPair(..)
1818 | BackendRepr::SimdScalableVector { .. }
1824 | BackendRepr::SimdVector { .. } => false,
1825 BackendRepr::Memory { sized } => !sized,
1826 }
1827 }
1828
1829 #[inline]
1830 pub fn is_sized(&self) -> bool {
1831 !self.is_unsized()
1832 }
1833
1834 #[inline]
1837 pub fn is_signed(&self) -> bool {
1838 match self {
1839 BackendRepr::Scalar(scal) => scal.is_signed(),
1840 _ => {
::core::panicking::panic_fmt(format_args!("`is_signed` on non-scalar ABI {0:?}",
self));
}panic!("`is_signed` on non-scalar ABI {self:?}"),
1841 }
1842 }
1843
1844 #[inline]
1846 pub fn is_scalar(&self) -> bool {
1847 #[allow(non_exhaustive_omitted_patterns)] match *self {
BackendRepr::Scalar(_) => true,
_ => false,
}matches!(*self, BackendRepr::Scalar(_))
1848 }
1849
1850 #[inline]
1852 pub fn is_bool(&self) -> bool {
1853 #[allow(non_exhaustive_omitted_patterns)] match *self {
BackendRepr::Scalar(s) if s.is_bool() => true,
_ => false,
}matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1854 }
1855
1856 pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1860 match *self {
1861 BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1862 BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1863 BackendRepr::SimdVector { .. }
1865 | BackendRepr::Memory { .. }
1866 | BackendRepr::SimdScalableVector { .. } => None,
1867 }
1868 }
1869
1870 pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1874 match *self {
1875 BackendRepr::Scalar(s) => Some(s.size(cx)),
1877 BackendRepr::ScalarPair(s1, s2) => {
1879 let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1880 let size = (field2_offset + s2.size(cx)).align_to(
1881 self.scalar_align(cx)
1882 .unwrap(),
1884 );
1885 Some(size)
1886 }
1887 BackendRepr::SimdVector { .. }
1889 | BackendRepr::Memory { .. }
1890 | BackendRepr::SimdScalableVector { .. } => None,
1891 }
1892 }
1893
1894 pub fn to_union(&self) -> Self {
1896 match *self {
1897 BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1898 BackendRepr::ScalarPair(s1, s2) => {
1899 BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1900 }
1901 BackendRepr::SimdVector { element, count } => {
1902 BackendRepr::SimdVector { element: element.to_union(), count }
1903 }
1904 BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1905 BackendRepr::SimdScalableVector { element, count, number_of_vectors } => {
1906 BackendRepr::SimdScalableVector {
1907 element: element.to_union(),
1908 count,
1909 number_of_vectors,
1910 }
1911 }
1912 }
1913 }
1914
1915 pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1916 match (self, other) {
1917 (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1920 (
1921 BackendRepr::SimdVector { element: element_l, count: count_l },
1922 BackendRepr::SimdVector { element: element_r, count: count_r },
1923 ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1924 (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1925 l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1926 }
1927 _ => self == other,
1929 }
1930 }
1931}
1932
1933#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
Variants<FieldIdx, VariantIdx> {
#[inline]
fn eq(&self, other: &Variants<FieldIdx, VariantIdx>) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(Variants::Single { index: __self_0 }, Variants::Single {
index: __arg1_0 }) => __self_0 == __arg1_0,
(Variants::Multiple {
tag: __self_0,
tag_encoding: __self_1,
tag_field: __self_2,
variants: __self_3 }, Variants::Multiple {
tag: __arg1_0,
tag_encoding: __arg1_1,
tag_field: __arg1_2,
variants: __arg1_3 }) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1 &&
__self_2 == __arg1_2 && __self_3 == __arg1_3,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
::core::cmp::Eq for Variants<FieldIdx, VariantIdx> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
let _: ::core::cmp::AssertParamIsEq<Scalar>;
let _: ::core::cmp::AssertParamIsEq<TagEncoding<VariantIdx>>;
let _: ::core::cmp::AssertParamIsEq<FieldIdx>;
let _:
::core::cmp::AssertParamIsEq<IndexVec<VariantIdx,
LayoutData<FieldIdx, VariantIdx>>>;
}
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
::core::hash::Hash for Variants<FieldIdx, VariantIdx> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
Variants::Single { index: __self_0 } =>
::core::hash::Hash::hash(__self_0, state),
Variants::Multiple {
tag: __self_0,
tag_encoding: __self_1,
tag_field: __self_2,
variants: __self_3 } => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state);
::core::hash::Hash::hash(__self_2, state);
::core::hash::Hash::hash(__self_3, state)
}
_ => {}
}
}
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
Idx> ::core::clone::Clone for Variants<FieldIdx, VariantIdx> {
#[inline]
fn clone(&self) -> Variants<FieldIdx, VariantIdx> {
match self {
Variants::Empty => Variants::Empty,
Variants::Single { index: __self_0 } =>
Variants::Single {
index: ::core::clone::Clone::clone(__self_0),
},
Variants::Multiple {
tag: __self_0,
tag_encoding: __self_1,
tag_field: __self_2,
variants: __self_3 } =>
Variants::Multiple {
tag: ::core::clone::Clone::clone(__self_0),
tag_encoding: ::core::clone::Clone::clone(__self_1),
tag_field: ::core::clone::Clone::clone(__self_2),
variants: ::core::clone::Clone::clone(__self_3),
},
}
}
}Clone, #[automatically_derived]
impl<FieldIdx: ::core::fmt::Debug + Idx, VariantIdx: ::core::fmt::Debug + Idx>
::core::fmt::Debug for Variants<FieldIdx, VariantIdx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
Variants::Empty => ::core::fmt::Formatter::write_str(f, "Empty"),
Variants::Single { index: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"Single", "index", &__self_0),
Variants::Multiple {
tag: __self_0,
tag_encoding: __self_1,
tag_field: __self_2,
variants: __self_3 } =>
::core::fmt::Formatter::debug_struct_field4_finish(f,
"Multiple", "tag", __self_0, "tag_encoding", __self_1,
"tag_field", __self_2, "variants", &__self_3),
}
}
}Debug)]
1935#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<FieldIdx: Idx, VariantIdx: Idx>
::rustc_data_structures::stable_hasher::StableHash for
Variants<FieldIdx, VariantIdx> where
VariantIdx: ::rustc_data_structures::stable_hasher::StableHash,
FieldIdx: ::rustc_data_structures::stable_hasher::StableHash {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
Variants::Empty => {}
Variants::Single { index: ref __binding_0 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
}
Variants::Multiple {
tag: ref __binding_0,
tag_encoding: ref __binding_1,
tag_field: ref __binding_2,
variants: ref __binding_3 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
{ __binding_2.stable_hash(__hcx, __hasher); }
{ __binding_3.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1936pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1937 Empty,
1939
1940 Single {
1942 index: VariantIdx,
1944 },
1945
1946 Multiple {
1953 tag: Scalar,
1954 tag_encoding: TagEncoding<VariantIdx>,
1955 tag_field: FieldIdx,
1956 variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1957 },
1958}
1959
1960#[derive(#[automatically_derived]
impl<VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
TagEncoding<VariantIdx> {
#[inline]
fn eq(&self, other: &TagEncoding<VariantIdx>) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(TagEncoding::Niche {
untagged_variant: __self_0,
niche_variants: __self_1,
niche_start: __self_2 }, TagEncoding::Niche {
untagged_variant: __arg1_0,
niche_variants: __arg1_1,
niche_start: __arg1_2 }) =>
__self_2 == __arg1_2 && __self_0 == __arg1_0 &&
__self_1 == __arg1_1,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl<VariantIdx: ::core::cmp::Eq + Idx> ::core::cmp::Eq for
TagEncoding<VariantIdx> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<VariantIdx>;
let _: ::core::cmp::AssertParamIsEq<RangeInclusive<VariantIdx>>;
let _: ::core::cmp::AssertParamIsEq<u128>;
}
}Eq, #[automatically_derived]
impl<VariantIdx: ::core::hash::Hash + Idx> ::core::hash::Hash for
TagEncoding<VariantIdx> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state);
match self {
TagEncoding::Niche {
untagged_variant: __self_0,
niche_variants: __self_1,
niche_start: __self_2 } => {
::core::hash::Hash::hash(__self_0, state);
::core::hash::Hash::hash(__self_1, state);
::core::hash::Hash::hash(__self_2, state)
}
_ => {}
}
}
}Hash, #[automatically_derived]
impl<VariantIdx: ::core::clone::Clone + Idx> ::core::clone::Clone for
TagEncoding<VariantIdx> {
#[inline]
fn clone(&self) -> TagEncoding<VariantIdx> {
match self {
TagEncoding::Direct => TagEncoding::Direct,
TagEncoding::Niche {
untagged_variant: __self_0,
niche_variants: __self_1,
niche_start: __self_2 } =>
TagEncoding::Niche {
untagged_variant: ::core::clone::Clone::clone(__self_0),
niche_variants: ::core::clone::Clone::clone(__self_1),
niche_start: ::core::clone::Clone::clone(__self_2),
},
}
}
}Clone, #[automatically_derived]
impl<VariantIdx: ::core::fmt::Debug + Idx> ::core::fmt::Debug for
TagEncoding<VariantIdx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
TagEncoding::Direct =>
::core::fmt::Formatter::write_str(f, "Direct"),
TagEncoding::Niche {
untagged_variant: __self_0,
niche_variants: __self_1,
niche_start: __self_2 } =>
::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
"untagged_variant", __self_0, "niche_variants", __self_1,
"niche_start", &__self_2),
}
}
}Debug)]
1962#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<VariantIdx: Idx>
::rustc_data_structures::stable_hasher::StableHash for
TagEncoding<VariantIdx> where
VariantIdx: ::rustc_data_structures::stable_hasher::StableHash {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
TagEncoding::Direct => {}
TagEncoding::Niche {
untagged_variant: ref __binding_0,
niche_variants: ref __binding_1,
niche_start: ref __binding_2 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
{ __binding_2.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
1963pub enum TagEncoding<VariantIdx: Idx> {
1964 Direct,
1967
1968 Niche {
1992 untagged_variant: VariantIdx,
1993 niche_variants: RangeInclusive<VariantIdx>,
1996 niche_start: u128,
1999 },
2000}
2001
2002#[derive(#[automatically_derived]
impl ::core::clone::Clone for Niche {
#[inline]
fn clone(&self) -> Niche {
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<Primitive>;
let _: ::core::clone::AssertParamIsClone<WrappingRange>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for Niche { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for Niche {
#[inline]
fn eq(&self, other: &Niche) -> bool {
self.offset == other.offset && self.value == other.value &&
self.valid_range == other.valid_range
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for Niche {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<Primitive>;
let _: ::core::cmp::AssertParamIsEq<WrappingRange>;
}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for Niche {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.offset, state);
::core::hash::Hash::hash(&self.value, state);
::core::hash::Hash::hash(&self.valid_range, state)
}
}Hash, #[automatically_derived]
impl ::core::fmt::Debug for Niche {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "Niche",
"offset", &self.offset, "value", &self.value, "valid_range",
&&self.valid_range)
}
}Debug)]
2003#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for Niche {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
Niche {
offset: ref __binding_0,
value: ref __binding_1,
valid_range: ref __binding_2 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
{ __binding_2.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
2004pub struct Niche {
2005 pub offset: Size,
2006 pub value: Primitive,
2007 pub valid_range: WrappingRange,
2008}
2009
2010impl Niche {
2011 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
2012 let Scalar::Initialized { value, valid_range } = scalar else { return None };
2013 let niche = Niche { offset, value, valid_range };
2014 if niche.available(cx) > 0 { Some(niche) } else { None }
2015 }
2016
2017 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
2018 let Self { value, valid_range: v, .. } = *self;
2019 let size = value.size(cx);
2020 if !(size.bits() <= 128) {
::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
2021 let max_value = size.unsigned_int_max();
2022
2023 let niche = v.end.wrapping_add(1)..v.start;
2025 niche.end.wrapping_sub(niche.start) & max_value
2026 }
2027
2028 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
2029 if !(count > 0) { ::core::panicking::panic("assertion failed: count > 0") };assert!(count > 0);
2030
2031 let Self { value, valid_range: v, .. } = *self;
2032 let size = value.size(cx);
2033 if !(size.bits() <= 128) {
::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
2034 let max_value = size.unsigned_int_max();
2035
2036 let available = v.start.wrapping_sub(v.end).wrapping_sub(1) & max_value;
2037 if count > available {
2038 return None;
2039 }
2040
2041 let move_start = |v: WrappingRange| {
2055 let start = v.start.wrapping_sub(count) & max_value;
2056 Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
2057 };
2058 let move_end = |v: WrappingRange| {
2059 let start = v.end.wrapping_add(1) & max_value;
2060 let end = v.end.wrapping_add(count) & max_value;
2061 Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
2062 };
2063 let distance_end_zero = max_value - v.end;
2064 let is_bool = size.bytes() == 1 && v == WrappingRange { start: 0, end: 1 };
2067 if count == 1 && !is_bool {
2068 let next_up = size.sign_extend(v.end.wrapping_add(1)).unsigned_abs();
2073 let next_down = size.sign_extend(v.start.wrapping_sub(1)).unsigned_abs();
2074 if next_down <= next_up { move_start(v) } else { move_end(v) }
2075 } else if v.start > v.end {
2076 move_end(v)
2078 } else if v.start <= distance_end_zero {
2079 if count <= v.start {
2080 move_start(v)
2081 } else {
2082 move_end(v)
2084 }
2085 } else {
2086 let end = v.end.wrapping_add(count) & max_value;
2087 let overshot_zero = (1..=v.end).contains(&end);
2088 if overshot_zero {
2089 move_start(v)
2091 } else {
2092 move_end(v)
2093 }
2094 }
2095 }
2096}
2097
2098#[derive(#[automatically_derived]
impl<FieldIdx: ::core::cmp::PartialEq + Idx,
VariantIdx: ::core::cmp::PartialEq + Idx> ::core::cmp::PartialEq for
LayoutData<FieldIdx, VariantIdx> {
#[inline]
fn eq(&self, other: &LayoutData<FieldIdx, VariantIdx>) -> bool {
self.uninhabited == other.uninhabited && self.fields == other.fields
&& self.variants == other.variants &&
self.backend_repr == other.backend_repr &&
self.largest_niche == other.largest_niche &&
self.align == other.align && self.size == other.size &&
self.max_repr_align == other.max_repr_align &&
self.unadjusted_abi_align == other.unadjusted_abi_align &&
self.randomization_seed == other.randomization_seed
}
}PartialEq, #[automatically_derived]
impl<FieldIdx: ::core::cmp::Eq + Idx, VariantIdx: ::core::cmp::Eq + Idx>
::core::cmp::Eq for LayoutData<FieldIdx, VariantIdx> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<FieldsShape<FieldIdx>>;
let _: ::core::cmp::AssertParamIsEq<Variants<FieldIdx, VariantIdx>>;
let _: ::core::cmp::AssertParamIsEq<BackendRepr>;
let _: ::core::cmp::AssertParamIsEq<Option<Niche>>;
let _: ::core::cmp::AssertParamIsEq<bool>;
let _: ::core::cmp::AssertParamIsEq<AbiAlign>;
let _: ::core::cmp::AssertParamIsEq<Size>;
let _: ::core::cmp::AssertParamIsEq<Option<Align>>;
let _: ::core::cmp::AssertParamIsEq<Align>;
let _: ::core::cmp::AssertParamIsEq<Hash64>;
}
}Eq, #[automatically_derived]
impl<FieldIdx: ::core::hash::Hash + Idx, VariantIdx: ::core::hash::Hash + Idx>
::core::hash::Hash for LayoutData<FieldIdx, VariantIdx> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.fields, state);
::core::hash::Hash::hash(&self.variants, state);
::core::hash::Hash::hash(&self.backend_repr, state);
::core::hash::Hash::hash(&self.largest_niche, state);
::core::hash::Hash::hash(&self.uninhabited, state);
::core::hash::Hash::hash(&self.align, state);
::core::hash::Hash::hash(&self.size, state);
::core::hash::Hash::hash(&self.max_repr_align, state);
::core::hash::Hash::hash(&self.unadjusted_abi_align, state);
::core::hash::Hash::hash(&self.randomization_seed, state)
}
}Hash, #[automatically_derived]
impl<FieldIdx: ::core::clone::Clone + Idx, VariantIdx: ::core::clone::Clone +
Idx> ::core::clone::Clone for LayoutData<FieldIdx, VariantIdx> {
#[inline]
fn clone(&self) -> LayoutData<FieldIdx, VariantIdx> {
LayoutData {
fields: ::core::clone::Clone::clone(&self.fields),
variants: ::core::clone::Clone::clone(&self.variants),
backend_repr: ::core::clone::Clone::clone(&self.backend_repr),
largest_niche: ::core::clone::Clone::clone(&self.largest_niche),
uninhabited: ::core::clone::Clone::clone(&self.uninhabited),
align: ::core::clone::Clone::clone(&self.align),
size: ::core::clone::Clone::clone(&self.size),
max_repr_align: ::core::clone::Clone::clone(&self.max_repr_align),
unadjusted_abi_align: ::core::clone::Clone::clone(&self.unadjusted_abi_align),
randomization_seed: ::core::clone::Clone::clone(&self.randomization_seed),
}
}
}Clone)]
2100#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<FieldIdx: Idx, VariantIdx: Idx>
::rustc_data_structures::stable_hasher::StableHash for
LayoutData<FieldIdx, VariantIdx> where
FieldIdx: ::rustc_data_structures::stable_hasher::StableHash,
VariantIdx: ::rustc_data_structures::stable_hasher::StableHash {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
LayoutData {
fields: ref __binding_0,
variants: ref __binding_1,
backend_repr: ref __binding_2,
largest_niche: ref __binding_3,
uninhabited: ref __binding_4,
align: ref __binding_5,
size: ref __binding_6,
max_repr_align: ref __binding_7,
unadjusted_abi_align: ref __binding_8,
randomization_seed: ref __binding_9 } => {
{ __binding_0.stable_hash(__hcx, __hasher); }
{ __binding_1.stable_hash(__hcx, __hasher); }
{ __binding_2.stable_hash(__hcx, __hasher); }
{ __binding_3.stable_hash(__hcx, __hasher); }
{ __binding_4.stable_hash(__hcx, __hasher); }
{ __binding_5.stable_hash(__hcx, __hasher); }
{ __binding_6.stable_hash(__hcx, __hasher); }
{ __binding_7.stable_hash(__hcx, __hasher); }
{ __binding_8.stable_hash(__hcx, __hasher); }
{ __binding_9.stable_hash(__hcx, __hasher); }
}
}
}
}
};StableHash))]
2101pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2102 pub fields: FieldsShape<FieldIdx>,
2104
2105 pub variants: Variants<FieldIdx, VariantIdx>,
2113
2114 pub backend_repr: BackendRepr,
2122
2123 pub largest_niche: Option<Niche>,
2126 pub uninhabited: bool,
2131
2132 pub align: AbiAlign,
2133 pub size: Size,
2134
2135 pub max_repr_align: Option<Align>,
2139
2140 pub unadjusted_abi_align: Align,
2144
2145 pub randomization_seed: Hash64,
2156}
2157
2158impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2159 pub fn is_aggregate(&self) -> bool {
2161 match self.backend_repr {
2162 BackendRepr::Scalar(_)
2163 | BackendRepr::SimdVector { .. }
2164 | BackendRepr::SimdScalableVector { .. } => false,
2165 BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2166 }
2167 }
2168
2169 pub fn is_uninhabited(&self) -> bool {
2171 self.uninhabited
2172 }
2173}
2174
2175impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2176where
2177 FieldsShape<FieldIdx>: fmt::Debug,
2178 Variants<FieldIdx, VariantIdx>: fmt::Debug,
2179{
2180 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2181 let LayoutData {
2185 size,
2186 align,
2187 backend_repr,
2188 fields,
2189 largest_niche,
2190 uninhabited,
2191 variants,
2192 max_repr_align,
2193 unadjusted_abi_align,
2194 randomization_seed,
2195 } = self;
2196 f.debug_struct("Layout")
2197 .field("size", size)
2198 .field("align", align)
2199 .field("backend_repr", backend_repr)
2200 .field("fields", fields)
2201 .field("largest_niche", largest_niche)
2202 .field("uninhabited", uninhabited)
2203 .field("variants", variants)
2204 .field("max_repr_align", max_repr_align)
2205 .field("unadjusted_abi_align", unadjusted_abi_align)
2206 .field("randomization_seed", randomization_seed)
2207 .finish()
2208 }
2209}
2210
2211#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointerKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointerKind {
#[inline]
fn clone(&self) -> PointerKind {
let _: ::core::clone::AssertParamIsClone<bool>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for PointerKind {
#[inline]
fn eq(&self, other: &PointerKind) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(PointerKind::SharedRef { frozen: __self_0 },
PointerKind::SharedRef { frozen: __arg1_0 }) =>
__self_0 == __arg1_0,
(PointerKind::MutableRef { unpin: __self_0 },
PointerKind::MutableRef { unpin: __arg1_0 }) =>
__self_0 == __arg1_0,
(PointerKind::Box { unpin: __self_0, global: __self_1 },
PointerKind::Box { unpin: __arg1_0, global: __arg1_1 }) =>
__self_0 == __arg1_0 && __self_1 == __arg1_1,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PointerKind {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<bool>;
}
}Eq, #[automatically_derived]
impl ::core::fmt::Debug for PointerKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
PointerKind::SharedRef { frozen: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"SharedRef", "frozen", &__self_0),
PointerKind::MutableRef { unpin: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"MutableRef", "unpin", &__self_0),
PointerKind::Box { unpin: __self_0, global: __self_1 } =>
::core::fmt::Formatter::debug_struct_field2_finish(f, "Box",
"unpin", __self_0, "global", &__self_1),
}
}
}Debug)]
2212pub enum PointerKind {
2213 SharedRef { frozen: bool },
2215 MutableRef { unpin: bool },
2217 Box { unpin: bool, global: bool },
2220}
2221
2222#[derive(#[automatically_derived]
impl ::core::marker::Copy for PointeeInfo { }Copy, #[automatically_derived]
impl ::core::clone::Clone for PointeeInfo {
#[inline]
fn clone(&self) -> PointeeInfo {
let _: ::core::clone::AssertParamIsClone<Option<PointerKind>>;
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<Align>;
*self
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for PointeeInfo {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "PointeeInfo",
"safe", &self.safe, "size", &self.size, "align", &&self.align)
}
}Debug)]
2228pub struct PointeeInfo {
2229 pub safe: Option<PointerKind>,
2231 pub size: Size,
2238 pub align: Align,
2240}
2241
2242impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2243 #[inline]
2245 pub fn is_unsized(&self) -> bool {
2246 self.backend_repr.is_unsized()
2247 }
2248
2249 #[inline]
2250 pub fn is_sized(&self) -> bool {
2251 self.backend_repr.is_sized()
2252 }
2253
2254 pub fn is_1zst(&self) -> bool {
2256 self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
2257 }
2258
2259 pub fn is_scalable_vector(&self) -> bool {
2261 #[allow(non_exhaustive_omitted_patterns)] match self.backend_repr {
BackendRepr::SimdScalableVector { .. } => true,
_ => false,
}matches!(self.backend_repr, BackendRepr::SimdScalableVector { .. })
2262 }
2263
2264 pub fn scalable_vector_element_count(&self) -> Option<u64> {
2266 match self.backend_repr {
2267 BackendRepr::SimdScalableVector { count, .. } => Some(count),
2268 _ => None,
2269 }
2270 }
2271
2272 pub fn is_zst(&self) -> bool {
2277 match self.backend_repr {
2278 BackendRepr::Scalar(_)
2279 | BackendRepr::ScalarPair(..)
2280 | BackendRepr::SimdScalableVector { .. }
2281 | BackendRepr::SimdVector { .. } => false,
2282 BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2283 }
2284 }
2285
2286 pub fn eq_abi(&self, other: &Self) -> bool {
2292 self.size == other.size
2296 && self.is_sized() == other.is_sized()
2297 && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2298 && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2299 && self.align.abi == other.align.abi
2300 && self.max_repr_align == other.max_repr_align
2301 && self.unadjusted_abi_align == other.unadjusted_abi_align
2302 }
2303}
2304
2305#[derive(#[automatically_derived]
impl ::core::marker::Copy for StructKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for StructKind {
#[inline]
fn clone(&self) -> StructKind {
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<Align>;
*self
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for StructKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
StructKind::AlwaysSized =>
::core::fmt::Formatter::write_str(f, "AlwaysSized"),
StructKind::MaybeUnsized =>
::core::fmt::Formatter::write_str(f, "MaybeUnsized"),
StructKind::Prefixed(__self_0, __self_1) =>
::core::fmt::Formatter::debug_tuple_field2_finish(f,
"Prefixed", __self_0, &__self_1),
}
}
}Debug)]
2306pub enum StructKind {
2307 AlwaysSized,
2309 MaybeUnsized,
2311 Prefixed(Size, Align),
2313}
2314
2315#[derive(#[automatically_derived]
impl ::core::clone::Clone for AbiFromStrErr {
#[inline]
fn clone(&self) -> AbiFromStrErr {
match self {
AbiFromStrErr::Unknown => AbiFromStrErr::Unknown,
AbiFromStrErr::NoExplicitUnwind =>
AbiFromStrErr::NoExplicitUnwind,
}
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for AbiFromStrErr {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
AbiFromStrErr::Unknown => "Unknown",
AbiFromStrErr::NoExplicitUnwind => "NoExplicitUnwind",
})
}
}Debug)]
2316pub enum AbiFromStrErr {
2317 Unknown,
2319 NoExplicitUnwind,
2321}