Skip to content

Commit

Permalink
Auto merge of rust-lang#137446 - matthiaskrgr:rollup-16moy6v, r=matth…
Browse files Browse the repository at this point in the history
…iaskrgr

Rollup of 6 pull requests

Successful merges:

 - rust-lang#135501 (Inject `compiler_builtins` during postprocessing and ensure it is made private)
 - rust-lang#137121 (stabilize `(const_)ptr_sub_ptr`)
 - rust-lang#137180 (Give `global_asm` a fake body to store typeck results, represent `sym fn` as a hir expr to fix `sym fn` operands with lifetimes)
 - rust-lang#137256 (compiler: untangle SIMD alignment assumptions)
 - rust-lang#137383 (stabilize `unsigned_is_multiple_of`)
 - rust-lang#137415 (Remove invalid suggestion of into_iter for extern macro)

r? `@ghost`
`@rustbot` modify labels: rollup
  • Loading branch information
bors committed Feb 23, 2025
2 parents 0769736 + e780b89 commit bca5f37
Show file tree
Hide file tree
Showing 94 changed files with 703 additions and 538 deletions.
2 changes: 1 addition & 1 deletion compiler/rustc_abi/src/callconv/reg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ impl Reg {
128 => dl.f128_align.abi,
_ => panic!("unsupported float: {self:?}"),
},
RegKind::Vector => dl.vector_align(self.size).abi,
RegKind::Vector => dl.llvmlike_vector_align(self.size).abi,
}
}
}
37 changes: 23 additions & 14 deletions compiler/rustc_abi/src/layout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -310,10 +310,10 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut max_repr_align = repr.align;

// If all the non-ZST fields have the same ABI and union ABI optimizations aren't
// disabled, we can use that common ABI for the union as a whole.
// If all the non-ZST fields have the same repr and union repr optimizations aren't
// disabled, we can use that common repr for the union as a whole.
struct AbiMismatch;
let mut common_non_zst_abi_and_align = if repr.inhibits_union_abi_opt() {
let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() {
// Can't optimize
Err(AbiMismatch)
} else {
Expand All @@ -337,14 +337,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
continue;
}

if let Ok(common) = common_non_zst_abi_and_align {
if let Ok(common) = common_non_zst_repr_and_align {
// Discard valid range information and allow undef
let field_abi = field.backend_repr.to_union();

if let Some((common_abi, common_align)) = common {
if common_abi != field_abi {
// Different fields have different ABI: disable opt
common_non_zst_abi_and_align = Err(AbiMismatch);
common_non_zst_repr_and_align = Err(AbiMismatch);
} else {
// Fields with the same non-Aggregate ABI should also
// have the same alignment
Expand All @@ -357,7 +357,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
}
} else {
// First non-ZST field: record its ABI and alignment
common_non_zst_abi_and_align = Ok(Some((field_abi, field.align.abi)));
common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi)));
}
}
}
Expand All @@ -376,16 +376,25 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {

// If all non-ZST fields have the same ABI, we may forward that ABI
// for the union as a whole, unless otherwise inhibited.
let abi = match common_non_zst_abi_and_align {
let backend_repr = match common_non_zst_repr_and_align {
Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
Ok(Some((abi, _))) => {
if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
Ok(Some((repr, _))) => match repr {
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)
if repr.scalar_align(dl).unwrap() != align.abi =>
{
BackendRepr::Memory { sized: true }
} else {
abi
}
}
// Vectors require at least element alignment, else disable the opt
BackendRepr::Vector { element, count: _ } if element.align(dl).abi > align.abi => {
BackendRepr::Memory { sized: true }
}
// the alignment tests passed and we can use this
BackendRepr::Scalar(..)
| BackendRepr::ScalarPair(..)
| BackendRepr::Vector { .. }
| BackendRepr::Memory { .. } => repr,
},
};

let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
Expand All @@ -400,7 +409,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
Ok(LayoutData {
variants: Variants::Single { index: only_variant_idx },
fields: FieldsShape::Union(union_field_count),
backend_repr: abi,
backend_repr,
largest_niche: None,
uninhabited: false,
align,
Expand Down
93 changes: 49 additions & 44 deletions compiler/rustc_abi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -408,16 +408,21 @@ impl TargetDataLayout {
}
}

/// psABI-mandated alignment for a vector type, if any
#[inline]
pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
for &(size, align) in &self.vector_align {
if size == vec_size {
return align;
}
}
// Default to natural alignment, which is what LLVM does.
// That is, use the size, rounded up to a power of 2.
AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAndPrefAlign> {
self.vector_align
.iter()
.find(|(size, _align)| *size == vec_size)
.map(|(_size, align)| *align)
}

/// an alignment resembling the one LLVM would pick for a vector
#[inline]
pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
self.cabi_vector_align(vec_size).unwrap_or(AbiAndPrefAlign::new(
Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(),
))
}
}

Expand Down Expand Up @@ -810,20 +815,19 @@ impl Align {
self.bits().try_into().unwrap()
}

/// Computes the best alignment possible for the given offset
/// (the largest power of two that the offset is a multiple of).
/// Obtain the greatest factor of `size` that is an alignment
/// (the largest power of two the Size is a multiple of).
///
/// N.B., for an offset of `0`, this happens to return `2^64`.
/// Note that all numbers are factors of 0
#[inline]
pub fn max_for_offset(offset: Size) -> Align {
Align { pow2: offset.bytes().trailing_zeros() as u8 }
pub fn max_aligned_factor(size: Size) -> Align {
Align { pow2: size.bytes().trailing_zeros() as u8 }
}

/// Lower the alignment, if necessary, such that the given offset
/// is aligned to it (the offset is a multiple of the alignment).
/// Reduces Align to an aligned factor of `size`.
#[inline]
pub fn restrict_for_offset(self, offset: Size) -> Align {
self.min(Align::max_for_offset(offset))
pub fn restrict_for_offset(self, size: Size) -> Align {
self.min(Align::max_aligned_factor(size))
}
}

Expand Down Expand Up @@ -1455,37 +1459,38 @@ impl BackendRepr {
matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
}

/// Returns the fixed alignment of this ABI, if any is mandated.
pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
Some(match *self {
BackendRepr::Scalar(s) => s.align(cx),
BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
BackendRepr::Vector { element, count } => {
cx.data_layout().vector_align(element.size(cx) * count)
}
BackendRepr::Memory { .. } => return None,
})
/// The psABI alignment for a `Scalar` or `ScalarPair`
///
/// `None` for other variants.
pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
match *self {
BackendRepr::Scalar(s) => Some(s.align(cx).abi),
BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
// The align of a Vector can vary in surprising ways
BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None,
}
}

/// Returns the fixed size of this ABI, if any is mandated.
pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
Some(match *self {
BackendRepr::Scalar(s) => {
// No padding in scalars.
s.size(cx)
}
/// The psABI size for a `Scalar` or `ScalarPair`
///
/// `None` for other variants
pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
match *self {
// No padding in scalars.
BackendRepr::Scalar(s) => Some(s.size(cx)),
// May have some padding between the pair.
BackendRepr::ScalarPair(s1, s2) => {
// May have some padding between the pair.
let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
(field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
let size = (field2_offset + s2.size(cx)).align_to(
self.scalar_align(cx)
// We absolutely must have an answer here or everything is FUBAR.
.unwrap(),
);
Some(size)
}
BackendRepr::Vector { element, count } => {
// No padding in vectors, except possibly for trailing padding
// to make the size a multiple of align (e.g. for vectors of size 3).
(element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
}
BackendRepr::Memory { .. } => return None,
})
// The size of a Vector can vary in surprising ways
BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None,
}
}

/// Discard validity range information and allow undef.
Expand Down
18 changes: 2 additions & 16 deletions compiler/rustc_ast_lowering/src/asm.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
use std::collections::hash_map::Entry;
use std::fmt::Write;

use rustc_ast::ptr::P;
use rustc_ast::*;
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_session::parse::feature_err;
use rustc_span::{Span, kw, sym};
use rustc_span::{Span, sym};
use rustc_target::asm;

use super::LoweringContext;
Expand Down Expand Up @@ -230,20 +229,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
tokens: None,
};

// Wrap the expression in an AnonConst.
let parent_def_id = self.current_hir_id_owner.def_id;
let node_id = self.next_node_id();
self.create_def(
parent_def_id,
node_id,
kw::Empty,
DefKind::AnonConst,
*op_sp,
);
let anon_const = AnonConst { id: node_id, value: P(expr) };
hir::InlineAsmOperand::SymFn {
anon_const: self.lower_anon_const_to_anon_const(&anon_const),
}
hir::InlineAsmOperand::SymFn { expr: self.lower_expr(&expr) }
}
}
InlineAsmOperand::Label { block } => {
Expand Down
7 changes: 6 additions & 1 deletion compiler/rustc_ast_lowering/src/item.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,12 @@ impl<'hir> LoweringContext<'_, 'hir> {
.arena
.alloc_from_iter(fm.items.iter().map(|x| self.lower_foreign_item_ref(x))),
},
ItemKind::GlobalAsm(asm) => hir::ItemKind::GlobalAsm(self.lower_inline_asm(span, asm)),
ItemKind::GlobalAsm(asm) => {
let asm = self.lower_inline_asm(span, asm);
let fake_body =
self.lower_body(|this| (&[], this.expr(span, hir::ExprKind::InlineAsm(asm))));
hir::ItemKind::GlobalAsm { asm, fake_body }
}
ItemKind::TyAlias(box TyAlias { generics, where_clauses, ty, .. }) => {
// We lower
//
Expand Down
29 changes: 24 additions & 5 deletions compiler/rustc_borrowck/src/universal_regions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,11 @@ pub(crate) enum DefiningTy<'tcx> {
/// The MIR represents an inline const. The signature has no inputs and a
/// single return value found via `InlineConstArgs::ty`.
InlineConst(DefId, GenericArgsRef<'tcx>),

// Fake body for a global asm. Not particularly useful or interesting,
// but we need it so we can properly store the typeck results of the asm
// operands, which aren't associated with a body otherwise.
GlobalAsm(DefId),
}

impl<'tcx> DefiningTy<'tcx> {
Expand All @@ -138,9 +143,10 @@ impl<'tcx> DefiningTy<'tcx> {
DefiningTy::Closure(_, args) => args.as_closure().upvar_tys(),
DefiningTy::CoroutineClosure(_, args) => args.as_coroutine_closure().upvar_tys(),
DefiningTy::Coroutine(_, args) => args.as_coroutine().upvar_tys(),
DefiningTy::FnDef(..) | DefiningTy::Const(..) | DefiningTy::InlineConst(..) => {
ty::List::empty()
}
DefiningTy::FnDef(..)
| DefiningTy::Const(..)
| DefiningTy::InlineConst(..)
| DefiningTy::GlobalAsm(_) => ty::List::empty(),
}
}

Expand All @@ -152,7 +158,10 @@ impl<'tcx> DefiningTy<'tcx> {
DefiningTy::Closure(..)
| DefiningTy::CoroutineClosure(..)
| DefiningTy::Coroutine(..) => 1,
DefiningTy::FnDef(..) | DefiningTy::Const(..) | DefiningTy::InlineConst(..) => 0,
DefiningTy::FnDef(..)
| DefiningTy::Const(..)
| DefiningTy::InlineConst(..)
| DefiningTy::GlobalAsm(_) => 0,
}
}

Expand All @@ -171,7 +180,8 @@ impl<'tcx> DefiningTy<'tcx> {
| DefiningTy::Coroutine(def_id, ..)
| DefiningTy::FnDef(def_id, ..)
| DefiningTy::Const(def_id, ..)
| DefiningTy::InlineConst(def_id, ..) => def_id,
| DefiningTy::InlineConst(def_id, ..)
| DefiningTy::GlobalAsm(def_id) => def_id,
}
}
}
Expand Down Expand Up @@ -411,6 +421,7 @@ impl<'tcx> UniversalRegions<'tcx> {
tcx.def_path_str_with_args(def_id, args),
));
}
DefiningTy::GlobalAsm(_) => unreachable!(),
}
}

Expand Down Expand Up @@ -633,6 +644,8 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
DefiningTy::InlineConst(self.mir_def.to_def_id(), args)
}
}

BodyOwnerKind::GlobalAsm => DefiningTy::GlobalAsm(self.mir_def.to_def_id()),
}
}

Expand Down Expand Up @@ -666,6 +679,8 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
}

DefiningTy::FnDef(_, args) | DefiningTy::Const(_, args) => args,

DefiningTy::GlobalAsm(_) => ty::List::empty(),
};

let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
Expand Down Expand Up @@ -802,6 +817,10 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
let ty = args.as_inline_const().ty();
ty::Binder::dummy(tcx.mk_type_list(&[ty]))
}

DefiningTy::GlobalAsm(def_id) => {
ty::Binder::dummy(tcx.mk_type_list(&[tcx.type_of(def_id).instantiate_identity()]))
}
};

// FIXME(#129952): We probably want a more principled approach here.
Expand Down
Loading

0 comments on commit bca5f37

Please sign in to comment.