1use std::collections::BTreeSet;
2use std::fmt::{self, Write};
3use std::ops::Deref;
4use std::{cmp, iter};
56use rustc_hashes::Hash64;
7use rustc_index::Idx;
8use rustc_index::bit_set::BitMatrix;
9use tracing::{debug, trace};
1011use crate::{
12AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
13LayoutData, Niche, NonZeroUsize, NumScalableVectors, Primitive, ReprOptions, Scalar, Size,
14StructKind, TagEncoding, TargetDataLayout, Variants, WrappingRange,
15};
1617mod coroutine;
18mod simple;
1920#[cfg(feature = "nightly")]
21mod ty;
2223#[cfg(feature = "nightly")]
24pub use ty::{Layout, TyAbiInterface, TyAndLayout};
2526impl ::std::fmt::Debug for FieldIdx {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
27/// The *source-order* index of a field in a variant.
28 ///
29 /// This is how most code after type checking refers to fields, rather than
30 /// using names (as names have hygiene complications and more complex lookup).
31 ///
32 /// Particularly for `repr(Rust)` types, this may not be the same as *layout* order.
33 /// (It is for `repr(C)` `struct`s, however.)
34 ///
35 /// For example, in the following types,
36 /// ```rust
37 /// # enum Never {}
38 /// # #[repr(u16)]
39 /// enum Demo1 {
40 /// Variant0 { a: Never, b: i32 } = 100,
41 /// Variant1 { c: u8, d: u64 } = 10,
42 /// }
43 /// struct Demo2 { e: u8, f: u16, g: u8 }
44 /// ```
45 /// `b` is `FieldIdx(1)` in `VariantIdx(0)`,
46 /// `d` is `FieldIdx(1)` in `VariantIdx(1)`, and
47 /// `f` is `FieldIdx(1)` in `VariantIdx(0)`.
48#[stable_hash]
49 #[encodable]
50 #[orderable]
51 #[gate_rustc_only]
52pub struct FieldIdx {}
53}5455impl FieldIdx {
56/// The second field, at index 1.
57 ///
58 /// For use alongside [`FieldIdx::ZERO`], particularly with scalar pairs.
59pub const ONE: FieldIdx = FieldIdx::from_u32(1);
60}
6162impl ::std::fmt::Debug for VariantIdx {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
63/// The *source-order* index of a variant in a type.
64 ///
65 /// For enums, these are always `0..variant_count`, regardless of any
66 /// custom discriminants that may have been defined, and including any
67 /// variants that may end up uninhabited due to field types. (Some of the
68 /// variants may not be present in a monomorphized ABI [`Variants`], but
69 /// those skipped variants are always counted when determining the *index*.)
70 ///
71 /// `struct`s, `tuples`, and `unions`s are considered to have a single variant
72 /// with variant index zero, aka [`FIRST_VARIANT`].
73#[stable_hash]
74 #[encodable]
75 #[orderable]
76 #[gate_rustc_only]
77pub struct VariantIdx {
78/// Equivalent to `VariantIdx(0)`.
79const FIRST_VARIANT = 0;
80 }
81}8283// A variant is absent if it's uninhabited and only has ZST fields.
84// Present uninhabited variants only require space for their fields,
85// but *not* an encoding of the discriminant (e.g., a tag value).
86// See issue #49298 for more details on the need to leave space
87// for non-ZST uninhabited data (mostly partial initialization).
88fn absent<'a, FieldIdx, VariantIdx, F>(fields: &IndexSlice<FieldIdx, F>) -> bool89where
90FieldIdx: Idx,
91 VariantIdx: Idx,
92 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
93{
94let uninhabited = fields.iter().any(|f| f.is_uninhabited());
95// We cannot ignore alignment; that might lead us to entirely discard a variant and
96 // produce an enum that is less aligned than it should be!
97let is_1zst = fields.iter().all(|f| f.is_1zst());
98uninhabited && is_1zst99}
100101/// Determines towards which end of a struct layout optimizations will try to place the best niches.
102enum NicheBias {
103 Start,
104 End,
105}
106107#[derive(#[automatically_derived]
impl<F: ::core::marker::Copy> ::core::marker::Copy for
LayoutCalculatorError<F> {
}Copy, #[automatically_derived]
impl<F: ::core::clone::Clone> ::core::clone::Clone for
LayoutCalculatorError<F> {
#[inline]
fn clone(&self) -> LayoutCalculatorError<F> {
match self {
LayoutCalculatorError::UnexpectedUnsized(__self_0) =>
LayoutCalculatorError::UnexpectedUnsized(::core::clone::Clone::clone(__self_0)),
LayoutCalculatorError::SizeOverflow =>
LayoutCalculatorError::SizeOverflow,
LayoutCalculatorError::EmptyUnion =>
LayoutCalculatorError::EmptyUnion,
LayoutCalculatorError::ReprConflict =>
LayoutCalculatorError::ReprConflict,
LayoutCalculatorError::ZeroLengthSimdType =>
LayoutCalculatorError::ZeroLengthSimdType,
LayoutCalculatorError::OversizedSimdType { max_lanes: __self_0 }
=>
LayoutCalculatorError::OversizedSimdType {
max_lanes: ::core::clone::Clone::clone(__self_0),
},
LayoutCalculatorError::NonPrimitiveSimdType(__self_0) =>
LayoutCalculatorError::NonPrimitiveSimdType(::core::clone::Clone::clone(__self_0)),
}
}
}Clone, #[automatically_derived]
impl<F: ::core::fmt::Debug> ::core::fmt::Debug for LayoutCalculatorError<F> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
LayoutCalculatorError::UnexpectedUnsized(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"UnexpectedUnsized", &__self_0),
LayoutCalculatorError::SizeOverflow =>
::core::fmt::Formatter::write_str(f, "SizeOverflow"),
LayoutCalculatorError::EmptyUnion =>
::core::fmt::Formatter::write_str(f, "EmptyUnion"),
LayoutCalculatorError::ReprConflict =>
::core::fmt::Formatter::write_str(f, "ReprConflict"),
LayoutCalculatorError::ZeroLengthSimdType =>
::core::fmt::Formatter::write_str(f, "ZeroLengthSimdType"),
LayoutCalculatorError::OversizedSimdType { max_lanes: __self_0 }
=>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"OversizedSimdType", "max_lanes", &__self_0),
LayoutCalculatorError::NonPrimitiveSimdType(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"NonPrimitiveSimdType", &__self_0),
}
}
}Debug, #[automatically_derived]
impl<F: ::core::cmp::PartialEq> ::core::cmp::PartialEq for
LayoutCalculatorError<F> {
#[inline]
fn eq(&self, other: &LayoutCalculatorError<F>) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(LayoutCalculatorError::UnexpectedUnsized(__self_0),
LayoutCalculatorError::UnexpectedUnsized(__arg1_0)) =>
__self_0 == __arg1_0,
(LayoutCalculatorError::OversizedSimdType {
max_lanes: __self_0 },
LayoutCalculatorError::OversizedSimdType {
max_lanes: __arg1_0 }) => __self_0 == __arg1_0,
(LayoutCalculatorError::NonPrimitiveSimdType(__self_0),
LayoutCalculatorError::NonPrimitiveSimdType(__arg1_0)) =>
__self_0 == __arg1_0,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl<F: ::core::cmp::Eq> ::core::cmp::Eq for LayoutCalculatorError<F> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<F>;
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}Eq)]
108pub enum LayoutCalculatorError<F> {
109/// An unsized type was found in a location where a sized type was expected.
110 ///
111 /// This is not always a compile error, for example if there is a `[T]: Sized`
112 /// bound in a where clause.
113 ///
114 /// Contains the field that was unexpectedly unsized.
115UnexpectedUnsized(F),
116117/// A type was too large for the target platform.
118SizeOverflow,
119120/// A union had no fields.
121EmptyUnion,
122123/// The fields or variants have irreconcilable reprs
124ReprConflict,
125126/// The length of an SIMD type is zero
127ZeroLengthSimdType,
128129/// The length of an SIMD type exceeds the maximum number of lanes
130OversizedSimdType { max_lanes: u64 },
131132/// An element type of an SIMD type isn't a primitive
133NonPrimitiveSimdType(F),
134}
135136impl<F> LayoutCalculatorError<F> {
137pub fn without_payload(&self) -> LayoutCalculatorError<()> {
138use LayoutCalculatorError::*;
139match *self {
140UnexpectedUnsized(_) => UnexpectedUnsized(()),
141SizeOverflow => SizeOverflow,
142EmptyUnion => EmptyUnion,
143ReprConflict => ReprConflict,
144ZeroLengthSimdType => ZeroLengthSimdType,
145OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
146NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
147 }
148 }
149150/// Format an untranslated diagnostic for this type
151 ///
152 /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
153pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
154use LayoutCalculatorError::*;
155f.write_str(match self {
156UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
157SizeOverflow => "size overflow",
158EmptyUnion => "type is a union with no fields",
159ReprConflict => "type has an invalid repr",
160ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
161"invalid simd type definition"
162}
163 })
164 }
165}
166167type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =
168Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;
169170#[derive(#[automatically_derived]
impl<Cx: ::core::clone::Clone> ::core::clone::Clone for LayoutCalculator<Cx> {
#[inline]
fn clone(&self) -> LayoutCalculator<Cx> {
LayoutCalculator { cx: ::core::clone::Clone::clone(&self.cx) }
}
}Clone, #[automatically_derived]
impl<Cx: ::core::marker::Copy> ::core::marker::Copy for LayoutCalculator<Cx> {
}Copy, #[automatically_derived]
impl<Cx: ::core::fmt::Debug> ::core::fmt::Debug for LayoutCalculator<Cx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f,
"LayoutCalculator", "cx", &&self.cx)
}
}Debug)]
171pub struct LayoutCalculator<Cx> {
172pub cx: Cx,
173}
174175impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
176pub fn new(cx: Cx) -> Self {
177Self { cx }
178 }
179180pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
181&self,
182 element: &LayoutData<FieldIdx, VariantIdx>,
183 count_if_sized: Option<u64>, // None for slices
184) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
185let count = count_if_sized.unwrap_or(0);
186let size =
187element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
188189Ok(LayoutData {
190 variants: Variants::Single { index: VariantIdx::new(0) },
191 fields: FieldsShape::Array { stride: element.size, count },
192 backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
193 largest_niche: element.largest_niche.filter(|_| count != 0),
194 uninhabited: element.uninhabited && count != 0,
195 align: element.align,
196size,
197 max_repr_align: None,
198 unadjusted_abi_align: element.align.abi,
199 randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
200 })
201 }
202203pub fn scalable_vector_type<FieldIdx, VariantIdx, F>(
204&self,
205 element: F,
206 count: u64,
207 number_of_vectors: NumScalableVectors,
208 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
209where
210FieldIdx: Idx,
211 VariantIdx: Idx,
212 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
213 {
214vector_type_layout(
215 SimdVectorKind::Scalable(number_of_vectors),
216self.cx.data_layout(),
217element,
218count,
219 )
220 }
221222pub fn simd_type<FieldIdx, VariantIdx, F>(
223&self,
224 element: F,
225 count: u64,
226 repr_packed: bool,
227 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
228where
229FieldIdx: Idx,
230 VariantIdx: Idx,
231 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
232 {
233let kind = if repr_packed { SimdVectorKind::PackedFixed } else { SimdVectorKind::Fixed };
234vector_type_layout(kind, self.cx.data_layout(), element, count)
235 }
236237/// Compute the layout for a coroutine.
238 ///
239 /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
240 /// fields may be shared between multiple variants (see the [`coroutine`] module for details).
241pub fn coroutine<
242'a,
243 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
244 VariantIdx: Idx,
245 FieldIdx: Idx,
246 LocalIdx: Idx,
247 >(
248&self,
249 local_layouts: &IndexSlice<LocalIdx, F>,
250 prefix_layouts: IndexVec<FieldIdx, F>,
251 variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
252 storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
253 tag_to_layout: impl Fn(Scalar) -> F,
254 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
255 coroutine::layout(
256self,
257local_layouts,
258prefix_layouts,
259variant_fields,
260storage_conflicts,
261tag_to_layout,
262 )
263 }
264265pub fn univariant<
266'a,
267 FieldIdx: Idx,
268 VariantIdx: Idx,
269 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
270 >(
271&self,
272 fields: &IndexSlice<FieldIdx, F>,
273 repr: &ReprOptions,
274 kind: StructKind,
275 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
276let dl = self.cx.data_layout();
277let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);
278// Enums prefer niches close to the beginning or the end of the variants so that other
279 // (smaller) data-carrying variants can be packed into the space after/before the niche.
280 // If the default field ordering does not give us a niche at the front then we do a second
281 // run and bias niches to the right and then check which one is closer to one of the
282 // struct's edges.
283if let Ok(layout) = &layout {
284// Don't try to calculate an end-biased layout for unsizable structs,
285 // otherwise we could end up with different layouts for
286 // Foo<Type> and Foo<dyn Trait> which would break unsizing.
287if !#[allow(non_exhaustive_omitted_patterns)] match kind {
StructKind::MaybeUnsized => true,
_ => false,
}matches!(kind, StructKind::MaybeUnsized) {
288if let Some(niche) = layout.largest_niche {
289let head_space = niche.offset.bytes();
290let niche_len = niche.value.size(dl).bytes();
291let tail_space = layout.size.bytes() - head_space - niche_len;
292293// This may end up doing redundant work if the niche is already in the last
294 // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
295 // to get the unpadded size so we try anyway.
296if fields.len() > 1 && head_space != 0 && tail_space > 0 {
297let alt_layout = self298 .univariant_biased(fields, repr, kind, NicheBias::End)
299 .expect("alt layout should always work");
300let alt_niche = alt_layout301 .largest_niche
302 .expect("alt layout should have a niche like the regular one");
303let alt_head_space = alt_niche.offset.bytes();
304let alt_niche_len = alt_niche.value.size(dl).bytes();
305let alt_tail_space =
306alt_layout.size.bytes() - alt_head_space - alt_niche_len;
307308if true {
match (&layout.size.bytes(), &alt_layout.size.bytes()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
309310let prefer_alt_layout =
311alt_head_space > head_space && alt_head_space > tail_space;
312313{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:313",
"rustc_abi::layout", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(313u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("sz: {0}, default_niche_at: {1}+{2}, default_tail_space: {3}, alt_niche_at/head_space: {4}+{5}, alt_tail: {6}, num_fields: {7}, better: {8}\nlayout: {9}\nalt_layout: {10}\n",
layout.size.bytes(), head_space, niche_len, tail_space,
alt_head_space, alt_niche_len, alt_tail_space,
layout.fields.count(), prefer_alt_layout,
self.format_field_niches(layout, fields),
self.format_field_niches(&alt_layout, fields)) as
&dyn Value))])
});
} else { ; }
};debug!(
314"sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
315 layout: {}\n\
316 alt_layout: {}\n",
317 layout.size.bytes(),
318 head_space,
319 niche_len,
320 tail_space,
321 alt_head_space,
322 alt_niche_len,
323 alt_tail_space,
324 layout.fields.count(),
325 prefer_alt_layout,
326self.format_field_niches(layout, fields),
327self.format_field_niches(&alt_layout, fields),
328 );
329330if prefer_alt_layout {
331return Ok(alt_layout);
332 }
333 }
334 }
335 }
336 }
337layout338 }
339340pub fn layout_of_struct_or_enum<
341'a,
342 FieldIdx: Idx,
343 VariantIdx: Idx,
344 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
345 >(
346&self,
347 repr: &ReprOptions,
348 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
349 is_enum: bool,
350 is_special_no_niche: bool,
351 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
352 discriminants: impl Iterator<Item = (VariantIdx, i128)>,
353 always_sized: bool,
354 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
355let (present_first, present_second) = {
356let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
357if !repr.inhibit_enum_layout_opt() && absent(v) { None } else { Some(i) }
358 });
359 (present_variants.next(), present_variants.next())
360 };
361let present_first = match present_first {
362Some(present_first) => present_first,
363// Uninhabited because it has no variants, or only absent ones.
364Noneif is_enum => {
365return Ok(LayoutData::never_type(&self.cx));
366 }
367// If it's a struct, still compute a layout so that we can still compute the
368 // field offsets.
369None => VariantIdx::new(0),
370 };
371372// take the struct path if it is an actual struct
373if !is_enum ||
374// or for optimizing univariant enums
375(present_second.is_none() && !repr.inhibit_enum_layout_opt())
376 {
377self.layout_of_struct(
378repr,
379variants,
380is_enum,
381is_special_no_niche,
382always_sized,
383present_first,
384 )
385 } else {
386// At this point, we have handled all unions and
387 // structs. (We have also handled univariant enums
388 // that allow representation optimization.)
389if !is_enum { ::core::panicking::panic("assertion failed: is_enum") };assert!(is_enum);
390self.layout_of_enum(repr, variants, discr_range_of_repr, discriminants)
391 }
392 }
393394pub fn layout_of_union<
395'a,
396 FieldIdx: Idx,
397 VariantIdx: Idx,
398 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
399 >(
400&self,
401 repr: &ReprOptions,
402 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
403 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
404let dl = self.cx.data_layout();
405let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
406let mut max_repr_align = repr.align;
407408// If all the non-ZST fields have the same repr and union repr optimizations aren't
409 // disabled, we can use that common repr for the union as a whole.
410struct AbiMismatch;
411let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() {
412// Can't optimize
413Err(AbiMismatch)
414 } else {
415Ok(None)
416 };
417418let mut size = Size::ZERO;
419let only_variant_idx = VariantIdx::new(0);
420let only_variant = &variants[only_variant_idx];
421for field in only_variant {
422if field.is_unsized() {
423return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
424 }
425426 align = align.max(field.align.abi);
427 max_repr_align = max_repr_align.max(field.max_repr_align);
428 size = cmp::max(size, field.size);
429430if field.is_zst() {
431// Nothing more to do for ZST fields
432continue;
433 }
434435if let Ok(common) = common_non_zst_repr_and_align {
436// Discard valid range information and allow undef
437let field_abi = field.backend_repr.to_union();
438439if let Some((common_abi, common_align)) = common {
440if common_abi != field_abi {
441// Different fields have different ABI: disable opt
442common_non_zst_repr_and_align = Err(AbiMismatch);
443 } else {
444// Fields with the same non-Aggregate ABI should also
445 // have the same alignment
446if !#[allow(non_exhaustive_omitted_patterns)] match common_abi {
BackendRepr::Memory { .. } => true,
_ => false,
}matches!(common_abi, BackendRepr::Memory { .. }) {
447match (&common_align, &field.align.abi) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("non-Aggregate field with matching ABI but differing alignment")));
}
}
};assert_eq!(
448 common_align, field.align.abi,
449"non-Aggregate field with matching ABI but differing alignment"
450);
451 }
452 }
453 } else {
454// First non-ZST field: record its ABI and alignment
455common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi)));
456 }
457 }
458 }
459460if let Some(pack) = repr.pack {
461align = align.min(pack);
462 }
463// The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
464 // See documentation on `LayoutData::unadjusted_abi_align`.
465let unadjusted_abi_align = align;
466if let Some(repr_align) = repr.align {
467align = align.max(repr_align);
468 }
469// `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
470let align = align;
471472// If all non-ZST fields have the same ABI, we may forward that ABI
473 // for the union as a whole, unless otherwise inhibited.
474let backend_repr = match common_non_zst_repr_and_align {
475Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
476Ok(Some((repr, _))) => match repr {
477// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
478BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)
479if repr.scalar_align(dl).unwrap() != align =>
480 {
481 BackendRepr::Memory { sized: true }
482 }
483// Vectors require at least element alignment, else disable the opt
484BackendRepr::SimdVector { element, count: _ } if element.align(dl).abi > align => {
485 BackendRepr::Memory { sized: true }
486 }
487// the alignment tests passed and we can use this
488BackendRepr::Scalar(..)
489 | BackendRepr::ScalarPair(..)
490 | BackendRepr::SimdVector { .. }
491 | BackendRepr::SimdScalableVector { .. }
492 | BackendRepr::Memory { .. } => repr,
493 },
494 };
495496let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
497return Err(LayoutCalculatorError::EmptyUnion);
498 };
499500let combined_seed = only_variant501 .iter()
502 .map(|v| v.randomization_seed)
503 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
504505Ok(LayoutData {
506 variants: Variants::Single { index: only_variant_idx },
507 fields: FieldsShape::Union(union_field_count),
508backend_repr,
509 largest_niche: None,
510 uninhabited: false,
511 align: AbiAlign::new(align),
512 size: size.align_to(align),
513max_repr_align,
514unadjusted_abi_align,
515 randomization_seed: combined_seed,
516 })
517 }
518519/// single-variant enums are just structs, if you think about it
520fn layout_of_struct<
521'a,
522 FieldIdx: Idx,
523 VariantIdx: Idx,
524 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
525 >(
526&self,
527 repr: &ReprOptions,
528 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
529 is_enum: bool,
530 is_special_no_niche: bool,
531 always_sized: bool,
532 present_first: VariantIdx,
533 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
534// Struct, or univariant enum equivalent to a struct.
535 // (Typechecking will reject discriminant-sizing attrs.)
536537let dl = self.cx.data_layout();
538let v = present_first;
539let kind = if is_enum || variants[v].is_empty() || always_sized {
540 StructKind::AlwaysSized541 } else {
542 StructKind::MaybeUnsized543 };
544545let mut st = self.univariant(&variants[v], repr, kind)?;
546st.variants = Variants::Single { index: v };
547548if is_special_no_niche {
549let hide_niches = |scalar: &mut _| match scalar {
550 Scalar::Initialized { value, valid_range } => {
551*valid_range = WrappingRange::full(value.size(dl))
552 }
553// Already doesn't have any niches
554Scalar::Union { .. } => {}
555 };
556match &mut st.backend_repr {
557 BackendRepr::Scalar(scalar) => hide_niches(scalar),
558 BackendRepr::ScalarPair(a, b) => {
559hide_niches(a);
560hide_niches(b);
561 }
562 BackendRepr::SimdVector { element, .. }
563 | BackendRepr::SimdScalableVector { element, .. } => hide_niches(element),
564 BackendRepr::Memory { sized: _ } => {}
565 }
566st.largest_niche = None;
567return Ok(st);
568 }
569570Ok(st)
571 }
572573fn layout_of_enum<
574'a,
575 FieldIdx: Idx,
576 VariantIdx: Idx,
577 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
578 >(
579&self,
580 repr: &ReprOptions,
581 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
582 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
583 discriminants: impl Iterator<Item = (VariantIdx, i128)>,
584 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
585let dl = self.cx.data_layout();
586// bail if the enum has an incoherent repr that cannot be computed
587if repr.packed() {
588return Err(LayoutCalculatorError::ReprConflict);
589 }
590591let calculate_niche_filling_layout = || -> Option<LayoutData<FieldIdx, VariantIdx>> {
592if repr.inhibit_enum_layout_opt() {
593return None;
594 }
595596if variants.len() < 2 {
597return None;
598 }
599600let mut align = dl.aggregate_align;
601let mut max_repr_align = repr.align;
602let mut unadjusted_abi_align = align;
603604let mut variant_layouts = variants605 .iter_enumerated()
606 .map(|(j, v)| {
607let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
608st.variants = Variants::Single { index: j };
609610align = align.max(st.align.abi);
611max_repr_align = max_repr_align.max(st.max_repr_align);
612unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
613614Some(st)
615 })
616 .collect::<Option<IndexVec<VariantIdx, _>>>()?;
617618let largest_variant_index = variant_layouts619 .iter_enumerated()
620 .max_by_key(|(_i, layout)| layout.size.bytes())
621 .map(|(i, _layout)| i)?;
622623let all_indices = variants.indices();
624let needs_disc =
625 |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
626let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
627 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
628629let count =
630 (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
631632// Use the largest niche in the largest variant.
633let niche = variant_layouts[largest_variant_index].largest_niche?;
634let (niche_start, niche_scalar) = niche.reserve(dl, count)?;
635let niche_offset = niche.offset;
636let niche_size = niche.value.size(dl);
637let size = variant_layouts[largest_variant_index].size.align_to(align);
638639let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
640if i == largest_variant_index {
641return true;
642 }
643644layout.largest_niche = None;
645646if layout.size <= niche_offset {
647// This variant will fit before the niche.
648return true;
649 }
650651// Determine if it'll fit after the niche.
652let this_align = layout.align.abi;
653let this_offset = (niche_offset + niche_size).align_to(this_align);
654655if this_offset + layout.size > size {
656return false;
657 }
658659// It'll fit, but we need to make some adjustments.
660match layout.fields {
661 FieldsShape::Arbitrary { ref mut offsets, .. } => {
662for offset in offsets.iter_mut() {
663*offset += this_offset;
664 }
665 }
666 FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
667{
::core::panicking::panic_fmt(format_args!("Layout of fields should be Arbitrary for variants"));
}panic!("Layout of fields should be Arbitrary for variants")668 }
669 }
670671// It can't be a Scalar or ScalarPair because the offset isn't 0.
672if !layout.is_uninhabited() {
673layout.backend_repr = BackendRepr::Memory { sized: true };
674 }
675layout.size += this_offset;
676677true
678});
679680if !all_variants_fit {
681return None;
682 }
683684let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
685686let others_zst = variant_layouts687 .iter_enumerated()
688 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
689let same_size = size == variant_layouts[largest_variant_index].size;
690let same_align = align == variant_layouts[largest_variant_index].align.abi;
691692let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited());
693let abi = if same_size && same_align && others_zst {
694match variant_layouts[largest_variant_index].backend_repr {
695// When the total alignment and size match, we can use the
696 // same ABI as the scalar variant with the reserved niche.
697BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
698 BackendRepr::ScalarPair(first, second) => {
699// Only the niche is guaranteed to be initialised,
700 // so use union layouts for the other primitive.
701if niche_offset == Size::ZERO {
702 BackendRepr::ScalarPair(niche_scalar, second.to_union())
703 } else {
704 BackendRepr::ScalarPair(first.to_union(), niche_scalar)
705 }
706 }
707_ => BackendRepr::Memory { sized: true },
708 }
709 } else {
710 BackendRepr::Memory { sized: true }
711 };
712713let combined_seed = variant_layouts714 .iter()
715 .map(|v| v.randomization_seed)
716 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
717718let layout = LayoutData {
719 variants: Variants::Multiple {
720 tag: niche_scalar,
721 tag_encoding: TagEncoding::Niche {
722 untagged_variant: largest_variant_index,
723niche_variants,
724niche_start,
725 },
726 tag_field: FieldIdx::new(0),
727 variants: variant_layouts,
728 },
729 fields: FieldsShape::Arbitrary {
730 offsets: [niche_offset].into(),
731 in_memory_order: [FieldIdx::new(0)].into(),
732 },
733 backend_repr: abi,
734largest_niche,
735uninhabited,
736size,
737 align: AbiAlign::new(align),
738max_repr_align,
739unadjusted_abi_align,
740 randomization_seed: combined_seed,
741 };
742743Some(layout)
744 };
745746let niche_filling_layout = calculate_niche_filling_layout();
747748let discr_type = repr.discr_type();
749let discr_int = Integer::from_attr(dl, discr_type);
750// Because we can only represent one range of valid values, we'll look for the
751 // largest range of invalid values and pick everything else as the range of valid
752 // values.
753754 // First we need to sort the possible discriminant values so that we can look for the largest gap:
755let valid_discriminants: BTreeSet<i128> = discriminants756 .filter(|&(i, _)| repr.c() || variants[i].iter().all(|f| !f.is_uninhabited()))
757 .map(|(_, val)| {
758if discr_type.is_signed() {
759// sign extend the raw representation to be an i128
760 // FIXME: do this at the discriminant iterator creation sites
761discr_int.size().sign_extend(valas u128)
762 } else {
763val764 }
765 })
766 .collect();
767{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:767",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(767u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["valid_discriminants"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&valid_discriminants)
as &dyn Value))])
});
} else { ; }
};trace!(?valid_discriminants);
768let discriminants = valid_discriminants.iter().copied();
769//let next_discriminants = discriminants.clone().cycle().skip(1);
770let next_discriminants =
771discriminants.clone().chain(valid_discriminants.first().copied()).skip(1);
772// Iterate over pairs of each discriminant together with the next one.
773 // Since they were sorted, we can now compute the niche sizes and pick the largest.
774let discriminants = discriminants.zip(next_discriminants);
775let largest_niche = discriminants.max_by_key(|&(start, end)| {
776{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:776",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(776u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["start", "end"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&start) as
&dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&end) as
&dyn Value))])
});
} else { ; }
};trace!(?start, ?end);
777// If this is a wraparound range, the niche size is `MAX - abs(diff)`, as the diff between
778 // the two end points is actually the size of the valid discriminants.
779let dist = if start > end {
780// Overflow can happen for 128 bit discriminants if `end` is negative.
781 // But in that case casting to `u128` still gets us the right value,
782 // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
783let dist = start.wrapping_sub(end);
784if discr_type.is_signed() {
785discr_int.signed_max().wrapping_sub(dist) as u128786 } else {
787discr_int.size().unsigned_int_max() - distas u128788 }
789 } else {
790// Overflow can happen for 128 bit discriminants if `start` is negative.
791 // But in that case casting to `u128` still gets us the right value,
792 // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
793end.wrapping_sub(start) as u128794 };
795{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:795",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(795u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["dist"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&dist) as
&dyn Value))])
});
} else { ; }
};trace!(?dist);
796dist797 });
798{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:798",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(798u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["largest_niche"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&largest_niche)
as &dyn Value))])
});
} else { ; }
};trace!(?largest_niche);
799800// `max` is the last valid discriminant before the largest niche
801 // `min` is the first valid discriminant after the largest niche
802let (max, min) = largest_niche803// We might have no inhabited variants, so pretend there's at least one.
804.unwrap_or((0, 0));
805let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::discr_range_of_repr(tcx, ty, &repr, min, max);
806807let mut align = dl.aggregate_align;
808let mut max_repr_align = repr.align;
809let mut unadjusted_abi_align = align;
810811let mut size = Size::ZERO;
812813// We're interested in the smallest alignment, so start large.
814let mut start_align = Align::from_bytes(256).unwrap();
815match (&Integer::for_align(dl, start_align), &None) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(Integer::for_align(dl, start_align), None);
816817// repr(C) on an enum tells us to make a (tag, union) layout,
818 // so we need to grow the prefix alignment to be at least
819 // the alignment of the union. (This value is used both for
820 // determining the alignment of the overall enum, and the
821 // determining the alignment of the payload after the tag.)
822let mut prefix_align = min_ity.align(dl).abi;
823if repr.c() {
824for fields in variants {
825for field in fields {
826 prefix_align = prefix_align.max(field.align.abi);
827 }
828 }
829 }
830831// Create the set of structs that represent each variant.
832let mut layout_variants = variants833 .iter_enumerated()
834 .map(|(i, field_layouts)| {
835let mut st = self.univariant(
836field_layouts,
837repr,
838 StructKind::Prefixed(min_ity.size(), prefix_align),
839 )?;
840st.variants = Variants::Single { index: i };
841// Find the first field we can't move later
842 // to make room for a larger discriminant.
843for field_idx in st.fields.index_by_increasing_offset() {
844let field = &field_layouts[FieldIdx::new(field_idx)];
845if !field.is_1zst() {
846 start_align = start_align.min(field.align.abi);
847break;
848 }
849 }
850size = cmp::max(size, st.size);
851align = align.max(st.align.abi);
852max_repr_align = max_repr_align.max(st.max_repr_align);
853unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
854Ok(st)
855 })
856 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
857858// Align the maximum variant size to the largest alignment.
859size = size.align_to(align);
860861// FIXME(oli-obk): deduplicate and harden these checks
862if size.bytes() >= dl.obj_size_bound() {
863return Err(LayoutCalculatorError::SizeOverflow);
864 }
865866let typeck_ity = Integer::from_attr(dl, repr.discr_type());
867if typeck_ity < min_ity {
868// It is a bug if Layout decided on a greater discriminant size than typeck for
869 // some reason at this point (based on values discriminant can take on). Mostly
870 // because this discriminant will be loaded, and then stored into variable of
871 // type calculated by typeck. Consider such case (a bug): typeck decided on
872 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
873 // discriminant values. That would be a bug, because then, in codegen, in order
874 // to store this 16-bit discriminant into 8-bit sized temporary some of the
875 // space necessary to represent would have to be discarded (or layout is wrong
876 // on thinking it needs 16 bits)
877{
::core::panicking::panic_fmt(format_args!("layout decided on a larger discriminant type ({0:?}) than typeck ({1:?})",
min_ity, typeck_ity));
};panic!(
878"layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
879);
880// However, it is fine to make discr type however large (as an optimisation)
881 // after this point – we’ll just truncate the value we load in codegen.
882}
883884// Check to see if we should use a different type for the
885 // discriminant. We can safely use a type with the same size
886 // as the alignment of the first field of each variant.
887 // We increase the size of the discriminant to avoid LLVM copying
888 // padding when it doesn't need to. This normally causes unaligned
889 // load/stores and excessive memcpy/memset operations. By using a
890 // bigger integer size, LLVM can be sure about its contents and
891 // won't be so conservative.
892893 // Use the initial field alignment
894let mut ity = if repr.c() || repr.int.is_some() {
895min_ity896 } else {
897Integer::for_align(dl, start_align).unwrap_or(min_ity)
898 };
899900// If the alignment is not larger than the chosen discriminant size,
901 // don't use the alignment as the final size.
902if ity <= min_ity {
903ity = min_ity;
904 } else {
905// Patch up the variants' first few fields.
906let old_ity_size = min_ity.size();
907let new_ity_size = ity.size();
908for variant in &mut layout_variants {
909match variant.fields {
910 FieldsShape::Arbitrary { ref mut offsets, .. } => {
911for i in offsets {
912if *i <= old_ity_size {
913match (&*i, &old_ity_size) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(*i, old_ity_size);
914*i = new_ity_size;
915 }
916 }
917// We might be making the struct larger.
918if variant.size <= old_ity_size {
919 variant.size = new_ity_size;
920 }
921 }
922 FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
923{
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
}panic!("encountered a non-arbitrary layout during enum layout")924 }
925 }
926 }
927 }
928929let tag_mask = ity.size().unsigned_int_max();
930let tag = Scalar::Initialized {
931 value: Primitive::Int(ity, signed),
932 valid_range: WrappingRange {
933 start: (minas u128 & tag_mask),
934 end: (maxas u128 & tag_mask),
935 },
936 };
937let mut abi = BackendRepr::Memory { sized: true };
938939let uninhabited = layout_variants.iter().all(|v| v.is_uninhabited());
940if tag.size(dl) == size {
941// Make sure we only use scalar layout when the enum is entirely its
942 // own tag (i.e. it has no padding nor any non-ZST variant fields).
943abi = BackendRepr::Scalar(tag);
944 } else {
945// Try to use a ScalarPair for all tagged enums.
946 // That's possible only if we can find a common primitive type for all variants.
947let mut common_prim = None;
948let mut common_prim_initialized_in_all_variants = true;
949for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
950let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
951{
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
};panic!("encountered a non-arbitrary layout during enum layout");
952 };
953// We skip *all* ZST here and later check if we are good in terms of alignment.
954 // This lets us handle some cases involving aligned ZST.
955let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
956let (field, offset) = match (fields.next(), fields.next()) {
957 (None, None) => {
958 common_prim_initialized_in_all_variants = false;
959continue;
960 }
961 (Some(pair), None) => pair,
962_ => {
963 common_prim = None;
964break;
965 }
966 };
967let prim = match field.backend_repr {
968 BackendRepr::Scalar(scalar) => {
969 common_prim_initialized_in_all_variants &=
970#[allow(non_exhaustive_omitted_patterns)] match scalar {
Scalar::Initialized { .. } => true,
_ => false,
}matches!(scalar, Scalar::Initialized { .. });
971 scalar.primitive()
972 }
973_ => {
974 common_prim = None;
975break;
976 }
977 };
978if let Some((old_prim, common_offset)) = common_prim {
979// All variants must be at the same offset
980if offset != common_offset {
981 common_prim = None;
982break;
983 }
984// This is pretty conservative. We could go fancier
985 // by realising that (u8, u8) could just cohabit with
986 // u16 or even u32.
987let new_prim = match (old_prim, prim) {
988// Allow all identical primitives.
989(x, y) if x == y => x,
990// Allow integers of the same size with differing signedness.
991 // We arbitrarily choose the signedness of the first variant.
992(p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
993// Allow integers mixed with pointers of the same layout.
994 // We must represent this using a pointer, to avoid
995 // roundtripping pointers through ptrtoint/inttoptr.
996(p @ Primitive::Pointer(_), i @ Primitive::Int(..))
997 | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
998if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
999 {
1000 p
1001 }
1002_ => {
1003 common_prim = None;
1004break;
1005 }
1006 };
1007// We may be updating the primitive here, for example from int->ptr.
1008common_prim = Some((new_prim, common_offset));
1009 } else {
1010 common_prim = Some((prim, offset));
1011 }
1012 }
1013if let Some((prim, offset)) = common_prim {
1014let prim_scalar = if common_prim_initialized_in_all_variants {
1015let size = prim.size(dl);
1016if !(size.bits() <= 128) {
::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
1017 Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
1018 } else {
1019// Common prim might be uninit.
1020Scalar::Union { value: prim }
1021 };
1022let pair =
1023 LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
1024let pair_offsets = match pair.fields {
1025 FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1026match (&in_memory_order.raw, &[FieldIdx::new(0), FieldIdx::new(1)]) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(in_memory_order.raw, [FieldIdx::new(0), FieldIdx::new(1)]);
1027offsets1028 }
1029_ => {
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
}panic!("encountered a non-arbitrary layout during enum layout"),
1030 };
1031if pair_offsets[FieldIdx::new(0)] == Size::ZERO1032 && pair_offsets[FieldIdx::new(1)] == *offset1033 && align == pair.align.abi
1034 && size == pair.size
1035 {
1036// We can use `ScalarPair` only when it matches our
1037 // already computed layout (including `#[repr(C)]`).
1038abi = pair.backend_repr;
1039 }
1040 }
1041 }
10421043// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1044 // variants to ensure they are consistent. This is because a downcast is
1045 // semantically a NOP, and thus should not affect layout.
1046if #[allow(non_exhaustive_omitted_patterns)] match abi {
BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) => true,
_ => false,
}matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
1047for variant in &mut layout_variants {
1048// We only do this for variants with fields; the others are not accessed anyway.
1049 // Also do not overwrite any already existing "clever" ABIs.
1050if variant.fields.count() > 0
1051 && #[allow(non_exhaustive_omitted_patterns)] match variant.backend_repr {
BackendRepr::Memory { .. } => true,
_ => false,
}matches!(variant.backend_repr, BackendRepr::Memory { .. })1052 {
1053 variant.backend_repr = abi;
1054// Also need to bump up the size and alignment, so that the entire value fits
1055 // in here.
1056variant.size = cmp::max(variant.size, size);
1057 variant.align.abi = cmp::max(variant.align.abi, align);
1058 }
1059 }
1060 }
10611062let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
10631064let combined_seed = layout_variants1065 .iter()
1066 .map(|v| v.randomization_seed)
1067 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
10681069let tagged_layout = LayoutData {
1070 variants: Variants::Multiple {
1071tag,
1072 tag_encoding: TagEncoding::Direct,
1073 tag_field: FieldIdx::new(0),
1074 variants: layout_variants,
1075 },
1076 fields: FieldsShape::Arbitrary {
1077 offsets: [Size::ZERO].into(),
1078 in_memory_order: [FieldIdx::new(0)].into(),
1079 },
1080largest_niche,
1081uninhabited,
1082 backend_repr: abi,
1083 align: AbiAlign::new(align),
1084size,
1085max_repr_align,
1086unadjusted_abi_align,
1087 randomization_seed: combined_seed,
1088 };
10891090let best_layout = match (tagged_layout, niche_filling_layout) {
1091 (tl, Some(nl)) => {
1092// Pick the smaller layout; otherwise,
1093 // pick the layout with the larger niche; otherwise,
1094 // pick tagged as it has simpler codegen.
1095use cmp::Ordering::*;
1096let niche_size = |l: &LayoutData<FieldIdx, VariantIdx>| {
1097l.largest_niche.map_or(0, |n| n.available(dl))
1098 };
1099match (tl.size.cmp(&nl.size), niche_size(&tl).cmp(&niche_size(&nl))) {
1100 (Greater, _) => nl,
1101 (Equal, Less) => nl,
1102_ => tl,
1103 }
1104 }
1105 (tl, None) => tl,
1106 };
11071108Ok(best_layout)
1109 }
11101111fn univariant_biased<
1112'a,
1113 FieldIdx: Idx,
1114 VariantIdx: Idx,
1115 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
1116 >(
1117&self,
1118 fields: &IndexSlice<FieldIdx, F>,
1119 repr: &ReprOptions,
1120 kind: StructKind,
1121 niche_bias: NicheBias,
1122 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
1123let dl = self.cx.data_layout();
1124let pack = repr.pack;
1125let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
1126let mut max_repr_align = repr.align;
1127let mut in_memory_order: IndexVec<u32, FieldIdx> = fields.indices().collect();
1128let optimize_field_order = !repr.inhibit_struct_field_reordering();
1129let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
1130let optimizing = &mut in_memory_order.raw[..end];
1131let fields_excluding_tail = &fields.raw[..end];
1132// unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
1133let field_seed = fields_excluding_tail1134 .iter()
1135 .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed));
11361137if optimize_field_order && fields.len() > 1 {
1138// If `-Z randomize-layout` was enabled for the type definition we can shuffle
1139 // the field ordering to try and catch some code making assumptions about layouts
1140 // we don't guarantee.
1141if repr.can_randomize_type_layout() && truecfg!(feature = "randomize") {
1142#[cfg(feature = "randomize")]
1143{
1144use rand::SeedableRng;
1145use rand::seq::SliceRandom;
1146// `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
1147 // ordering.
1148let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(
1149field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(),
1150 );
11511152// Shuffle the ordering of the fields.
1153optimizing.shuffle(&mut rng);
1154 }
1155// Otherwise we just leave things alone and actually optimize the type's fields
1156} else {
1157// To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
1158 // not depend on the layout of the tail.
1159let max_field_align =
1160fields_excluding_tail.iter().map(|f| f.align.bytes()).max().unwrap_or(1);
1161let largest_niche_size = fields_excluding_tail1162 .iter()
1163 .filter_map(|f| f.largest_niche)
1164 .map(|n| n.available(dl))
1165 .max()
1166 .unwrap_or(0);
11671168// Calculates a sort key to group fields by their alignment or possibly some
1169 // size-derived pseudo-alignment.
1170let alignment_group_key = |layout: &F| {
1171// The two branches here return values that cannot be meaningfully compared with
1172 // each other. However, we know that consistently for all executions of
1173 // `alignment_group_key`, one or the other branch will be taken, so this is okay.
1174if let Some(pack) = pack {
1175// Return the packed alignment in bytes.
1176layout.align.abi.min(pack).bytes()
1177 } else {
1178// Returns `log2(effective-align)`. The calculation assumes that size is an
1179 // integer multiple of align, except for ZSTs.
1180let align = layout.align.bytes();
1181let size = layout.size.bytes();
1182let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
1183// Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
1184let size_as_align = align.max(size).trailing_zeros();
1185let size_as_align = if largest_niche_size > 0 {
1186match niche_bias {
1187// Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
1188 // array to the front in the first case (for aligned loads) but keep
1189 // the bool in front in the second case for its niches.
1190NicheBias::Start => {
1191max_field_align.trailing_zeros().min(size_as_align)
1192 }
1193// When moving niches towards the end of the struct then for
1194 // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
1195 // in the align-1 group because its bool can be moved closer to the end.
1196NicheBias::Endif niche_size == largest_niche_size => {
1197align.trailing_zeros()
1198 }
1199 NicheBias::End => size_as_align,
1200 }
1201 } else {
1202size_as_align1203 };
1204size_as_alignas u641205 }
1206 };
12071208match kind {
1209 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
1210// Currently `LayoutData` only exposes a single niche so sorting is usually
1211 // sufficient to get one niche into the preferred position. If it ever
1212 // supported multiple niches then a more advanced pick-and-pack approach could
1213 // provide better results. But even for the single-niche cache it's not
1214 // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
1215 // bool to the front but it would require packing the tuple together with the
1216 // u16 to build a 4-byte group so that the u32 can be placed after it without
1217 // padding. This kind of packing can't be achieved by sorting.
1218optimizing.sort_by_key(|&x| {
1219let f = &fields[x];
1220let field_size = f.size.bytes();
1221let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1222let niche_size_key = match niche_bias {
1223// large niche first
1224NicheBias::Start => !niche_size,
1225// large niche last
1226NicheBias::End => niche_size,
1227 };
1228let inner_niche_offset_key = match niche_bias {
1229 NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
1230 NicheBias::End => f.largest_niche.map_or(0, |n| {
1231 !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
1232 }),
1233 };
12341235 (
1236// Then place largest alignments first.
1237cmp::Reverse(alignment_group_key(f)),
1238// Then prioritize niche placement within alignment group according to
1239 // `niche_bias_start`.
1240niche_size_key,
1241// Then among fields with equally-sized niches prefer the ones
1242 // closer to the start/end of the field.
1243inner_niche_offset_key,
1244 )
1245 });
1246 }
12471248 StructKind::Prefixed(..) => {
1249// Sort in ascending alignment so that the layout stays optimal
1250 // regardless of the prefix.
1251 // And put the largest niche in an alignment group at the end
1252 // so it can be used as discriminant in jagged enums
1253optimizing.sort_by_key(|&x| {
1254let f = &fields[x];
1255let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1256 (alignment_group_key(f), niche_size)
1257 });
1258 }
1259 }
12601261// FIXME(Kixiron): We can always shuffle fields within a given alignment class
1262 // regardless of the status of `-Z randomize-layout`
1263}
1264 }
1265// in_memory_order holds field indices by increasing memory offset.
1266 // That is, if field 5 has offset 0, the first element of in_memory_order is 5.
1267 // We now write field offsets to the corresponding offset slot;
1268 // field 5 with offset 0 puts 0 in offsets[5].
1269let mut unsized_field = None::<&F>;
1270let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
1271let mut offset = Size::ZERO;
1272let mut largest_niche = None;
1273let mut largest_niche_available = 0;
1274if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1275let prefix_align =
1276if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
1277align = align.max(prefix_align);
1278offset = prefix_size.align_to(prefix_align);
1279 }
1280for &i in &in_memory_order {
1281let field = &fields[i];
1282if let Some(unsized_field) = unsized_field {
1283return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
1284 }
12851286if field.is_unsized() {
1287if let StructKind::MaybeUnsized = kind {
1288 unsized_field = Some(field);
1289 } else {
1290return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
1291 }
1292 }
12931294// Invariant: offset < dl.obj_size_bound() <= 1<<61
1295let field_align = if let Some(pack) = pack {
1296 field.align.min(AbiAlign::new(pack))
1297 } else {
1298 field.align
1299 };
1300 offset = offset.align_to(field_align.abi);
1301 align = align.max(field_align.abi);
1302 max_repr_align = max_repr_align.max(field.max_repr_align);
13031304{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:1304",
"rustc_abi::layout", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(1304u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("univariant offset: {0:?} field: {1:#?}",
offset, field) as &dyn Value))])
});
} else { ; }
};debug!("univariant offset: {:?} field: {:#?}", offset, field);
1305 offsets[i] = offset;
13061307if let Some(mut niche) = field.largest_niche {
1308let available = niche.available(dl);
1309// Pick up larger niches.
1310let prefer_new_niche = match niche_bias {
1311 NicheBias::Start => available > largest_niche_available,
1312// if there are several niches of the same size then pick the last one
1313NicheBias::End => available >= largest_niche_available,
1314 };
1315if prefer_new_niche {
1316 largest_niche_available = available;
1317 niche.offset += offset;
1318 largest_niche = Some(niche);
1319 }
1320 }
13211322 offset =
1323 offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;
1324 }
13251326// The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1327 // See documentation on `LayoutData::unadjusted_abi_align`.
1328let unadjusted_abi_align = align;
1329if let Some(repr_align) = repr.align {
1330align = align.max(repr_align);
1331 }
1332// `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1333let align = align;
13341335{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:1335",
"rustc_abi::layout", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(1335u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("univariant min_size: {0:?}",
offset) as &dyn Value))])
});
} else { ; }
};debug!("univariant min_size: {:?}", offset);
1336let min_size = offset;
1337let size = min_size.align_to(align);
1338// FIXME(oli-obk): deduplicate and harden these checks
1339if size.bytes() >= dl.obj_size_bound() {
1340return Err(LayoutCalculatorError::SizeOverflow);
1341 }
1342let mut layout_of_single_non_zst_field = None;
1343let sized = unsized_field.is_none();
1344let mut abi = BackendRepr::Memory { sized };
13451346let optimize_abi = !repr.inhibit_newtype_abi_optimization();
13471348// Try to make this a Scalar/ScalarPair.
1349if sized && size.bytes() > 0 {
1350// We skip *all* ZST here and later check if we are good in terms of alignment.
1351 // This lets us handle some cases involving aligned ZST.
1352let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
13531354match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1355// We have exactly one non-ZST field.
1356(Some((i, field)), None, None) => {
1357layout_of_single_non_zst_field = Some(field);
13581359// Field fills the struct and it has a scalar or scalar pair ABI.
1360if offsets[i].bytes() == 0 && align == field.align.abi && size == field.size {
1361match field.backend_repr {
1362// For plain scalars, or vectors of them, we can't unpack
1363 // newtypes for `#[repr(C)]`, as that affects C ABIs.
1364BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. }
1365if optimize_abi =>
1366 {
1367abi = field.backend_repr;
1368 }
1369// But scalar pairs are Rust-specific and get
1370 // treated as aggregates by C ABIs anyway.
1371BackendRepr::ScalarPair(..) => {
1372abi = field.backend_repr;
1373 }
1374_ => {}
1375 }
1376 }
1377 }
13781379// Two non-ZST fields, and they're both scalars.
1380(Some((i, a)), Some((j, b)), None) => {
1381match (a.backend_repr, b.backend_repr) {
1382 (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
1383// Order by the memory placement, not source order.
1384let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1385 ((i, a), (j, b))
1386 } else {
1387 ((j, b), (i, a))
1388 };
1389let pair =
1390 LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
1391let pair_offsets = match pair.fields {
1392 FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1393match (&in_memory_order.raw, &[FieldIdx::new(0), FieldIdx::new(1)]) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(
1394 in_memory_order.raw,
1395 [FieldIdx::new(0), FieldIdx::new(1)]
1396 );
1397offsets1398 }
1399 FieldsShape::Primitive1400 | FieldsShape::Array { .. }
1401 | FieldsShape::Union(..) => {
1402{
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
}panic!("encountered a non-arbitrary layout during enum layout")1403 }
1404 };
1405if offsets[i] == pair_offsets[FieldIdx::new(0)]
1406 && offsets[j] == pair_offsets[FieldIdx::new(1)]
1407 && align == pair.align.abi
1408 && size == pair.size
1409 {
1410// We can use `ScalarPair` only when it matches our
1411 // already computed layout (including `#[repr(C)]`).
1412abi = pair.backend_repr;
1413 }
1414 }
1415_ => {}
1416 }
1417 }
14181419_ => {}
1420 }
1421 }
1422let uninhabited = fields.iter().any(|f| f.is_uninhabited());
14231424let unadjusted_abi_align = if repr.transparent() {
1425match layout_of_single_non_zst_field {
1426Some(l) => l.unadjusted_abi_align,
1427None => {
1428// `repr(transparent)` with all ZST fields.
1429align1430 }
1431 }
1432 } else {
1433unadjusted_abi_align1434 };
14351436let seed = field_seed.wrapping_add(repr.field_shuffle_seed);
14371438Ok(LayoutData {
1439 variants: Variants::Single { index: VariantIdx::new(0) },
1440 fields: FieldsShape::Arbitrary { offsets, in_memory_order },
1441 backend_repr: abi,
1442largest_niche,
1443uninhabited,
1444 align: AbiAlign::new(align),
1445size,
1446max_repr_align,
1447unadjusted_abi_align,
1448 randomization_seed: seed,
1449 })
1450 }
14511452fn format_field_niches<
1453'a,
1454 FieldIdx: Idx,
1455 VariantIdx: Idx,
1456 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1457 >(
1458&self,
1459 layout: &LayoutData<FieldIdx, VariantIdx>,
1460 fields: &IndexSlice<FieldIdx, F>,
1461 ) -> String {
1462let dl = self.cx.data_layout();
1463let mut s = String::new();
1464for i in layout.fields.index_by_increasing_offset() {
1465let offset = layout.fields.offset(i);
1466let f = &fields[FieldIdx::new(i)];
1467s.write_fmt(format_args!("[o{0}a{1}s{2}", offset.bytes(), f.align.bytes(),
f.size.bytes()))write!(s, "[o{}a{}s{}", offset.bytes(), f.align.bytes(), f.size.bytes()).unwrap();
1468if let Some(n) = f.largest_niche {
1469s.write_fmt(format_args!(" n{0}b{1}s{2}", n.offset.bytes(),
n.available(dl).ilog2(), n.value.size(dl).bytes()))write!(
1470 s,
1471" n{}b{}s{}",
1472 n.offset.bytes(),
1473 n.available(dl).ilog2(),
1474 n.value.size(dl).bytes()
1475 )1476 .unwrap();
1477 }
1478s.write_fmt(format_args!("] "))write!(s, "] ").unwrap();
1479 }
1480s1481 }
1482}
14831484enum SimdVectorKind {
1485/// `#[rustc_scalable_vector]`
1486Scalable(NumScalableVectors),
1487/// `#[repr(simd, packed)]`
1488PackedFixed,
1489/// `#[repr(simd)]`
1490Fixed,
1491}
14921493fn vector_type_layout<FieldIdx, VariantIdx, F>(
1494 kind: SimdVectorKind,
1495 dl: &TargetDataLayout,
1496 element: F,
1497 count: u64,
1498) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
1499where
1500FieldIdx: Idx,
1501 VariantIdx: Idx,
1502 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1503{
1504let elt = element.as_ref();
1505if count == 0 {
1506return Err(LayoutCalculatorError::ZeroLengthSimdType);
1507 } else if count > crate::MAX_SIMD_LANES {
1508return Err(LayoutCalculatorError::OversizedSimdType { max_lanes: crate::MAX_SIMD_LANES });
1509 }
15101511let BackendRepr::Scalar(element) = elt.backend_repr else {
1512return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
1513 };
15141515// Compute the size and alignment of the vector
1516let size =
1517elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
1518let (repr, align) = match kind {
1519 SimdVectorKind::Scalable(number_of_vectors) => (
1520 BackendRepr::SimdScalableVector { element, count, number_of_vectors },
1521dl.llvmlike_vector_align(size),
1522 ),
1523// Non-power-of-two vectors have padding up to the next power-of-two.
1524 // If we're a packed repr, remove the padding while keeping the alignment as close
1525 // to a vector as possible.
1526SimdVectorKind::PackedFixedif !count.is_power_of_two() => {
1527 (BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size))
1528 }
1529 SimdVectorKind::PackedFixed | SimdVectorKind::Fixed => {
1530 (BackendRepr::SimdVector { element, count }, dl.llvmlike_vector_align(size))
1531 }
1532 };
1533let size = size.align_to(align);
15341535Ok(LayoutData {
1536 variants: Variants::Single { index: VariantIdx::new(0) },
1537 fields: FieldsShape::Arbitrary {
1538 offsets: [Size::ZERO].into(),
1539 in_memory_order: [FieldIdx::new(0)].into(),
1540 },
1541 backend_repr: repr,
1542 largest_niche: elt.largest_niche,
1543 uninhabited: false,
1544size,
1545 align: AbiAlign::new(align),
1546 max_repr_align: None,
1547 unadjusted_abi_align: elt.align.abi,
1548 randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
1549 })
1550}