Merge commit 'baee6b338b' into sync-from-ra

This commit is contained in:
Laurențiu Nicola 2023-08-07 12:03:15 +03:00
parent 0155385b57
commit aa55ce9567
139 changed files with 4248 additions and 1042 deletions

View file

@ -1186,6 +1186,25 @@ fn pattern_matching_ergonomics() {
);
}
#[test]
fn destructing_assignment() {
check_number(
r#"
//- minicore: add
const fn f(i: &mut u8) -> &mut u8 {
*i += 1;
i
}
const GOAL: u8 = {
let mut i = 4;
_ = f(&mut i);
i
};
"#,
5,
);
}
#[test]
fn let_else() {
check_number(
@ -1428,14 +1447,14 @@ fn builtin_derive_macro() {
#[derive(Clone)]
struct Y {
field1: i32,
field2: u8,
field2: ((i32, u8), i64),
}
const GOAL: u8 = {
let x = X(2, Z::Foo(Y { field1: 4, field2: 5 }), 8);
let x = X(2, Z::Foo(Y { field1: 4, field2: ((32, 5), 12) }), 8);
let x = x.clone();
let Z::Foo(t) = x.1;
t.field2
t.field2.0 .1
};
"#,
5,
@ -1632,6 +1651,34 @@ const GOAL: i32 = {
);
}
#[test]
fn closure_capture_unsized_type() {
check_number(
r#"
//- minicore: fn, copy, slice, index, coerce_unsized
fn f<T: A>(x: &<T as A>::Ty) -> &<T as A>::Ty {
let c = || &*x;
c()
}
trait A {
type Ty;
}
impl A for i32 {
type Ty = [u8];
}
const GOAL: u8 = {
let k: &[u8] = &[1, 2, 3];
let k = f::<i32>(k);
k[0] + k[1] + k[2]
}
"#,
6,
);
}
#[test]
fn closure_and_impl_fn() {
check_number(
@ -1717,6 +1764,24 @@ fn function_pointer_in_constants() {
);
}
#[test]
fn function_pointer_and_niche_optimization() {
check_number(
r#"
//- minicore: option
const GOAL: i32 = {
let f: fn(i32) -> i32 = |x| x + 2;
let init = Some(f);
match init {
Some(t) => t(3),
None => 222,
}
};
"#,
5,
);
}
#[test]
fn function_pointer() {
check_number(
@ -2331,11 +2396,14 @@ fn const_loop() {
fn const_transfer_memory() {
check_number(
r#"
const A1: &i32 = &2;
const A2: &i32 = &5;
const GOAL: i32 = *A1 + *A2;
//- minicore: slice, index, coerce_unsized
const A1: &i32 = &1;
const A2: &i32 = &10;
const A3: [&i32; 3] = [&1, &2, &100];
const A4: (i32, &i32) = (1, &1000);
const GOAL: i32 = *A1 + *A2 + *A3[2] + *A4.1;
"#,
7,
1111,
);
}
@ -2521,12 +2589,16 @@ fn const_trait_assoc() {
);
check_number(
r#"
//- minicore: size_of
//- minicore: size_of, fn
//- /a/lib.rs crate:a
use core::mem::size_of;
pub struct S<T>(T);
impl<T> S<T> {
pub const X: usize = core::mem::size_of::<T>();
pub const X: usize = {
let k: T;
let f = || core::mem::size_of::<T>();
f()
};
}
//- /main.rs crate:main deps:a
use a::{S};
@ -2602,9 +2674,9 @@ fn exec_limits() {
}
sum
}
const GOAL: i32 = f(10000);
const GOAL: i32 = f(1000);
"#,
10000 * 10000,
1000 * 1000,
);
}
@ -2651,7 +2723,7 @@ fn unsized_field() {
//- minicore: coerce_unsized, index, slice, transmute
use core::mem::transmute;
struct Slice([u8]);
struct Slice([usize]);
struct Slice2(Slice);
impl Slice2 {
@ -2659,19 +2731,19 @@ fn unsized_field() {
&self.0
}
fn as_bytes(&self) -> &[u8] {
fn as_bytes(&self) -> &[usize] {
&self.as_inner().0
}
}
const GOAL: u8 = unsafe {
let x: &[u8] = &[1, 2, 3];
const GOAL: usize = unsafe {
let x: &[usize] = &[1, 2, 3];
let x: &Slice2 = transmute(x);
let x = x.as_bytes();
x[0] + x[1] + x[2]
x[0] + x[1] + x[2] + x.len() * 100
};
"#,
6,
306,
);
}

View file

@ -251,6 +251,28 @@ fn wrapping_add() {
);
}
#[test]
fn ptr_offset_from() {
check_number(
r#"
//- minicore: index, slice, coerce_unsized
extern "rust-intrinsic" {
pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
pub fn ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize;
}
const GOAL: isize = {
let x = [1, 2, 3, 4, 5i32];
let r1 = -ptr_offset_from(&x[0], &x[4]);
let r2 = ptr_offset_from(&x[3], &x[1]);
let r3 = ptr_offset_from_unsigned(&x[3], &x[0]) as isize;
r3 * 100 + r2 * 10 + r1
};
"#,
324,
);
}
#[test]
fn saturating() {
check_number(
@ -438,6 +460,8 @@ fn atomic() {
pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T;
pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
pub fn atomic_fence_seqcst();
pub fn atomic_singlethreadfence_acqrel();
}
fn should_not_reach() {
@ -452,6 +476,7 @@ fn atomic() {
if (30, true) != atomic_cxchg_release_seqcst(&mut y, 30, 40) {
should_not_reach();
}
atomic_fence_seqcst();
if (40, false) != atomic_cxchg_release_seqcst(&mut y, 30, 50) {
should_not_reach();
}
@ -459,6 +484,7 @@ fn atomic() {
should_not_reach();
}
let mut z = atomic_xsub_seqcst(&mut x, -200);
atomic_singlethreadfence_acqrel();
atomic_xor_seqcst(&mut x, 1024);
atomic_load_seqcst(&x) + z * 3 + atomic_load_seqcst(&y) * 2
};

View file

@ -176,6 +176,7 @@ impl<'a> DeclValidator<'a> {
AttrDefId::ImplId(iid) => Some(iid.lookup(self.db.upcast()).container.into()),
AttrDefId::ExternBlockId(id) => Some(id.lookup(self.db.upcast()).container.into()),
AttrDefId::ExternCrateId(id) => Some(id.lookup(self.db.upcast()).container.into()),
AttrDefId::UseId(id) => Some(id.lookup(self.db.upcast()).container.into()),
// These warnings should not explore macro definitions at all
AttrDefId::MacroId(_) => None,
AttrDefId::AdtId(aid) => match aid {

View file

@ -48,22 +48,15 @@ use crate::{
};
pub trait HirWrite: fmt::Write {
fn start_location_link(&mut self, location: ModuleDefId);
fn end_location_link(&mut self);
fn start_location_link(&mut self, _location: ModuleDefId) {}
fn end_location_link(&mut self) {}
}
// String will ignore link metadata
impl HirWrite for String {
fn start_location_link(&mut self, _: ModuleDefId) {}
fn end_location_link(&mut self) {}
}
impl HirWrite for String {}
// `core::Formatter` will ignore metadata
impl HirWrite for fmt::Formatter<'_> {
fn start_location_link(&mut self, _: ModuleDefId) {}
fn end_location_link(&mut self) {}
}
impl HirWrite for fmt::Formatter<'_> {}
pub struct HirFormatter<'a> {
pub db: &'a dyn HirDatabase,
@ -885,6 +878,13 @@ impl HirDisplay for Ty {
TyKind::FnDef(def, parameters) => {
let def = from_chalk(db, *def);
let sig = db.callable_item_signature(def).substitute(Interner, parameters);
if f.display_target.is_source_code() {
// `FnDef` is anonymous and there's no surface syntax for it. Show it as a
// function pointer type.
return sig.hir_fmt(f);
}
f.start_location_link(def.into());
match def {
CallableDefId::FunctionId(ff) => {

View file

@ -13,6 +13,15 @@
//! to certain types. To record this, we use the union-find implementation from
//! the `ena` crate, which is extracted from rustc.
mod cast;
pub(crate) mod closure;
mod coerce;
mod expr;
mod mutability;
mod pat;
mod path;
pub(crate) mod unify;
use std::{convert::identity, ops::Index};
use chalk_ir::{
@ -60,15 +69,8 @@ pub use coerce::could_coerce;
#[allow(unreachable_pub)]
pub use unify::could_unify;
pub(crate) use self::closure::{CaptureKind, CapturedItem, CapturedItemWithoutTy};
pub(crate) mod unify;
mod path;
mod expr;
mod pat;
mod coerce;
pub(crate) mod closure;
mod mutability;
use cast::CastCheck;
pub(crate) use closure::{CaptureKind, CapturedItem, CapturedItemWithoutTy};
/// The entry point of type inference.
pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<InferenceResult> {
@ -508,6 +510,8 @@ pub(crate) struct InferenceContext<'a> {
diverges: Diverges,
breakables: Vec<BreakableContext>,
deferred_cast_checks: Vec<CastCheck>,
// fields related to closure capture
current_captures: Vec<CapturedItemWithoutTy>,
current_closure: Option<ClosureId>,
@ -582,7 +586,8 @@ impl<'a> InferenceContext<'a> {
resolver,
diverges: Diverges::Maybe,
breakables: Vec::new(),
current_captures: vec![],
deferred_cast_checks: Vec::new(),
current_captures: Vec::new(),
current_closure: None,
deferred_closures: FxHashMap::default(),
closure_dependencies: FxHashMap::default(),
@ -594,7 +599,7 @@ impl<'a> InferenceContext<'a> {
// used this function for another workaround, mention it here. If you really need this function and believe that
// there is no problem in it being `pub(crate)`, remove this comment.
pub(crate) fn resolve_all(self) -> InferenceResult {
let InferenceContext { mut table, mut result, .. } = self;
let InferenceContext { mut table, mut result, deferred_cast_checks, .. } = self;
// Destructure every single field so whenever new fields are added to `InferenceResult` we
// don't forget to handle them here.
let InferenceResult {
@ -622,6 +627,13 @@ impl<'a> InferenceContext<'a> {
table.fallback_if_possible();
// Comment from rustc:
// Even though coercion casts provide type hints, we check casts after fallback for
// backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
for cast in deferred_cast_checks {
cast.check(&mut table);
}
// FIXME resolve obligations as well (use Guidance if necessary)
table.resolve_obligations_as_possible();

View file

@ -0,0 +1,46 @@
//! Type cast logic. Basically coercion + additional casts.
use crate::{infer::unify::InferenceTable, Interner, Ty, TyExt, TyKind};
#[derive(Clone, Debug)]
pub(super) struct CastCheck {
expr_ty: Ty,
cast_ty: Ty,
}
impl CastCheck {
pub(super) fn new(expr_ty: Ty, cast_ty: Ty) -> Self {
Self { expr_ty, cast_ty }
}
pub(super) fn check(self, table: &mut InferenceTable<'_>) {
// FIXME: This function currently only implements the bits that influence the type
// inference. We should return the adjustments on success and report diagnostics on error.
let expr_ty = table.resolve_ty_shallow(&self.expr_ty);
let cast_ty = table.resolve_ty_shallow(&self.cast_ty);
if expr_ty.contains_unknown() || cast_ty.contains_unknown() {
return;
}
if table.coerce(&expr_ty, &cast_ty).is_ok() {
return;
}
if check_ref_to_ptr_cast(expr_ty, cast_ty, table) {
// Note that this type of cast is actually split into a coercion to a
// pointer type and a cast:
// &[T; N] -> *[T; N] -> *T
return;
}
// FIXME: Check other kinds of non-coercion casts and report error if any?
}
}
fn check_ref_to_ptr_cast(expr_ty: Ty, cast_ty: Ty, table: &mut InferenceTable<'_>) -> bool {
let Some((expr_inner_ty, _, _)) = expr_ty.as_reference() else { return false; };
let Some((cast_inner_ty, _)) = cast_ty.as_raw_ptr() else { return false; };
let TyKind::Array(expr_elt_ty, _) = expr_inner_ty.kind(Interner) else { return false; };
table.coerce(expr_elt_ty, cast_inner_ty).is_ok()
}

View file

@ -488,10 +488,6 @@ impl InferenceContext<'_> {
self.consume_expr(*tail);
}
}
Expr::While { condition, body, label: _ } => {
self.consume_expr(*condition);
self.consume_expr(*body);
}
Expr::Call { callee, args, is_assignee_expr: _ } => {
self.consume_expr(*callee);
self.consume_exprs(args.iter().copied());

View file

@ -46,8 +46,8 @@ use crate::{
};
use super::{
coerce::auto_deref_adjust_steps, find_breakable, BreakableContext, Diverges, Expectation,
InferenceContext, InferenceDiagnostic, TypeMismatch,
cast::CastCheck, coerce::auto_deref_adjust_steps, find_breakable, BreakableContext, Diverges,
Expectation, InferenceContext, InferenceDiagnostic, TypeMismatch,
};
impl InferenceContext<'_> {
@ -198,19 +198,6 @@ impl InferenceContext<'_> {
None => self.result.standard_types.never.clone(),
}
}
&Expr::While { condition, body, label } => {
self.with_breakable_ctx(BreakableKind::Loop, None, label, |this| {
this.infer_expr(
condition,
&Expectation::HasType(this.result.standard_types.bool_.clone()),
);
this.infer_expr(body, &Expectation::HasType(TyBuilder::unit()));
});
// the body may not run, so it diverging doesn't mean we diverge
self.diverges = Diverges::Maybe;
TyBuilder::unit()
}
Expr::Closure { body, args, ret_type, arg_types, closure_kind, capture_by: _ } => {
assert_eq!(args.len(), arg_types.len());
@ -574,16 +561,8 @@ impl InferenceContext<'_> {
}
Expr::Cast { expr, type_ref } => {
let cast_ty = self.make_ty(type_ref);
// FIXME: propagate the "castable to" expectation
let inner_ty = self.infer_expr_no_expect(*expr);
match (inner_ty.kind(Interner), cast_ty.kind(Interner)) {
(TyKind::Ref(_, _, inner), TyKind::Raw(_, cast)) => {
// FIXME: record invalid cast diagnostic in case of mismatch
self.unify(inner, cast);
}
// FIXME check the other kinds of cast...
_ => (),
}
let expr_ty = self.infer_expr(*expr, &Expectation::Castable(cast_ty.clone()));
self.deferred_cast_checks.push(CastCheck::new(expr_ty, cast_ty.clone()));
cast_ty
}
Expr::Ref { expr, rawness, mutability } => {
@ -1592,7 +1571,7 @@ impl InferenceContext<'_> {
output: Ty,
inputs: Vec<Ty>,
) -> Vec<Ty> {
if let Some(expected_ty) = expected_output.to_option(&mut self.table) {
if let Some(expected_ty) = expected_output.only_has_type(&mut self.table) {
self.table.fudge_inference(|table| {
if table.try_unify(&expected_ty, &output).is_ok() {
table.resolve_with_fallback(inputs, &|var, kind, _, _| match kind {

View file

@ -69,10 +69,6 @@ impl InferenceContext<'_> {
self.infer_mut_expr(*tail, Mutability::Not);
}
}
&Expr::While { condition: c, body, label: _ } => {
self.infer_mut_expr(c, Mutability::Not);
self.infer_mut_expr(body, Mutability::Not);
}
Expr::MethodCall { receiver: it, method_name: _, args, generic_args: _ }
| Expr::Call { callee: it, args, is_assignee_expr: _ } => {
self.infer_mut_not_expr_iter(args.iter().copied().chain(Some(*it)));

View file

@ -14,7 +14,7 @@ use triomphe::Arc;
use crate::{
consteval::try_const_usize, db::HirDatabase, infer::normalize, layout::adt::struct_variant_idx,
utils::ClosureSubst, Interner, Substitution, TraitEnvironment, Ty,
utils::ClosureSubst, Interner, ProjectionTy, Substitution, TraitEnvironment, Ty,
};
pub use self::{
@ -279,7 +279,15 @@ pub fn layout_of_ty_query(
// return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
// }
let unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone());
let mut unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone());
if let TyKind::AssociatedType(id, subst) = unsized_part.kind(Interner) {
unsized_part = TyKind::Alias(chalk_ir::AliasTy::Projection(ProjectionTy {
associated_ty_id: *id,
substitution: subst.clone(),
}))
.intern(Interner);
}
unsized_part = normalize(db, trait_env.clone(), unsized_part);
let metadata = match unsized_part.kind(Interner) {
TyKind::Slice(_) | TyKind::Str => {
scalar_unit(dl, Primitive::Int(dl.ptr_sized_integer(), false))
@ -362,8 +370,16 @@ pub fn layout_of_ty_query(
return Err(LayoutError::NotImplemented)
}
TyKind::Error => return Err(LayoutError::HasErrorType),
TyKind::AssociatedType(_, _)
| TyKind::Alias(_)
TyKind::AssociatedType(id, subst) => {
// Try again with `TyKind::Alias` to normalize the associated type.
let ty = TyKind::Alias(chalk_ir::AliasTy::Projection(ProjectionTy {
associated_ty_id: *id,
substitution: subst.clone(),
}))
.intern(Interner);
return db.layout_of_ty(ty, trait_env);
}
TyKind::Alias(_)
| TyKind::Placeholder(_)
| TyKind::BoundVar(_)
| TyKind::InferenceVar(_, _) => return Err(LayoutError::HasPlaceholder),

View file

@ -234,6 +234,7 @@ impl Place {
self.local == child.local && child.projection.starts_with(&self.projection)
}
/// The place itself is not included
fn iterate_over_parents(&self) -> impl Iterator<Item = Place> + '_ {
(0..self.projection.len())
.map(|x| &self.projection[0..x])

View file

@ -1,6 +1,13 @@
//! This module provides a MIR interpreter, which is used in const eval.
use std::{borrow::Cow, cell::RefCell, collections::HashMap, fmt::Write, iter, mem, ops::Range};
use std::{
borrow::Cow,
cell::RefCell,
collections::{HashMap, HashSet},
fmt::Write,
iter, mem,
ops::Range,
};
use base_db::{CrateId, FileId};
use chalk_ir::Mutability;
@ -39,7 +46,8 @@ use crate::{
use super::{
return_slot, AggregateKind, BasicBlockId, BinOp, CastKind, LocalId, MirBody, MirLowerError,
MirSpan, Operand, Place, ProjectionElem, Rvalue, StatementKind, TerminatorKind, UnOp,
MirSpan, Operand, Place, PlaceElem, ProjectionElem, Rvalue, StatementKind, TerminatorKind,
UnOp,
};
mod shim;
@ -68,18 +76,22 @@ pub struct VTableMap {
}
impl VTableMap {
const OFFSET: usize = 1000; // We should add some offset to ids to make 0 (null) an invalid id.
fn id(&mut self, ty: Ty) -> usize {
if let Some(it) = self.ty_to_id.get(&ty) {
return *it;
}
let id = self.id_to_ty.len();
let id = self.id_to_ty.len() + VTableMap::OFFSET;
self.id_to_ty.push(ty.clone());
self.ty_to_id.insert(ty, id);
id
}
pub(crate) fn ty(&self, id: usize) -> Result<&Ty> {
self.id_to_ty.get(id).ok_or(MirEvalError::InvalidVTableId(id))
id.checked_sub(VTableMap::OFFSET)
.and_then(|id| self.id_to_ty.get(id))
.ok_or(MirEvalError::InvalidVTableId(id))
}
fn ty_of_bytes(&self, bytes: &[u8]) -> Result<&Ty> {
@ -116,13 +128,18 @@ impl TlsData {
}
struct StackFrame {
body: Arc<MirBody>,
locals: Locals,
destination: Option<BasicBlockId>,
prev_stack_ptr: usize,
span: (MirSpan, DefWithBodyId),
}
#[derive(Clone)]
enum MirOrDynIndex {
Mir(Arc<MirBody>),
Dyn(usize),
}
pub struct Evaluator<'a> {
db: &'a dyn HirDatabase,
trait_env: Arc<TraitEnvironment>,
@ -141,6 +158,17 @@ pub struct Evaluator<'a> {
stdout: Vec<u8>,
stderr: Vec<u8>,
layout_cache: RefCell<FxHashMap<Ty, Arc<Layout>>>,
projected_ty_cache: RefCell<FxHashMap<(Ty, PlaceElem), Ty>>,
not_special_fn_cache: RefCell<FxHashSet<FunctionId>>,
mir_or_dyn_index_cache: RefCell<FxHashMap<(FunctionId, Substitution), MirOrDynIndex>>,
/// Constantly dropping and creating `Locals` is very costly. We store
/// old locals that we normaly want to drop here, to reuse their allocations
/// later.
unused_locals_store: RefCell<FxHashMap<DefWithBodyId, Vec<Locals>>>,
cached_ptr_size: usize,
cached_fn_trait_func: Option<FunctionId>,
cached_fn_mut_trait_func: Option<FunctionId>,
cached_fn_once_trait_func: Option<FunctionId>,
crate_id: CrateId,
// FIXME: This is a workaround, see the comment on `interpret_mir`
assert_placeholder_ty_is_unused: bool,
@ -313,6 +341,7 @@ pub enum MirEvalError {
InvalidVTableId(usize),
CoerceUnsizedError(Ty),
LangItemNotFound(LangItem),
BrokenLayout(Layout),
}
impl MirEvalError {
@ -399,6 +428,7 @@ impl MirEvalError {
| MirEvalError::TargetDataLayoutNotAvailable
| MirEvalError::CoerceUnsizedError(_)
| MirEvalError::LangItemNotFound(_)
| MirEvalError::BrokenLayout(_)
| MirEvalError::InvalidVTableId(_) => writeln!(f, "{:?}", err)?,
}
Ok(())
@ -433,6 +463,7 @@ impl std::fmt::Debug for MirEvalError {
Self::CoerceUnsizedError(arg0) => {
f.debug_tuple("CoerceUnsizedError").field(arg0).finish()
}
Self::BrokenLayout(arg0) => f.debug_tuple("BrokenLayout").field(arg0).finish(),
Self::InvalidVTableId(arg0) => f.debug_tuple("InvalidVTableId").field(arg0).finish(),
Self::NotSupported(arg0) => f.debug_tuple("NotSupported").field(arg0).finish(),
Self::InvalidConst(arg0) => {
@ -464,8 +495,16 @@ impl DropFlags {
fn remove_place(&mut self, p: &Place) -> bool {
// FIXME: replace parents with parts
if let Some(parent) = p.iterate_over_parents().find(|it| self.need_drop.contains(&it)) {
self.need_drop.remove(&parent);
return true;
}
self.need_drop.remove(p)
}
fn clear(&mut self) {
self.need_drop.clear();
}
}
#[derive(Debug)]
@ -508,6 +547,11 @@ pub fn interpret_mir(
)
}
#[cfg(test)]
const EXECUTION_LIMIT: usize = 100_000;
#[cfg(not(test))]
const EXECUTION_LIMIT: usize = 10_000_000;
impl Evaluator<'_> {
pub fn new<'a>(
db: &'a dyn HirDatabase,
@ -531,9 +575,29 @@ impl Evaluator<'_> {
stderr: vec![],
assert_placeholder_ty_is_unused,
stack_depth_limit: 100,
execution_limit: 1000_000,
execution_limit: EXECUTION_LIMIT,
memory_limit: 1000_000_000, // 2GB, 1GB for stack and 1GB for heap
layout_cache: RefCell::new(HashMap::default()),
projected_ty_cache: RefCell::new(HashMap::default()),
not_special_fn_cache: RefCell::new(HashSet::default()),
mir_or_dyn_index_cache: RefCell::new(HashMap::default()),
unused_locals_store: RefCell::new(HashMap::default()),
cached_ptr_size: match db.target_data_layout(crate_id) {
Some(it) => it.pointer_size.bytes_usize(),
None => 8,
},
cached_fn_trait_func: db
.lang_item(crate_id, LangItem::Fn)
.and_then(|x| x.as_trait())
.and_then(|x| db.trait_data(x).method_by_name(&name![call])),
cached_fn_mut_trait_func: db
.lang_item(crate_id, LangItem::FnMut)
.and_then(|x| x.as_trait())
.and_then(|x| db.trait_data(x).method_by_name(&name![call_mut])),
cached_fn_once_trait_func: db
.lang_item(crate_id, LangItem::FnOnce)
.and_then(|x| x.as_trait())
.and_then(|x| db.trait_data(x).method_by_name(&name![call_once])),
}
}
@ -554,10 +618,34 @@ impl Evaluator<'_> {
}
fn ptr_size(&self) -> usize {
match self.db.target_data_layout(self.crate_id) {
Some(it) => it.pointer_size.bytes_usize(),
None => 8,
self.cached_ptr_size
}
fn projected_ty(&self, ty: Ty, proj: PlaceElem) -> Ty {
let pair = (ty, proj);
if let Some(r) = self.projected_ty_cache.borrow().get(&pair) {
return r.clone();
}
let (ty, proj) = pair;
let r = proj.projected_ty(
ty.clone(),
self.db,
|c, subst, f| {
let (def, _) = self.db.lookup_intern_closure(c.into());
let infer = self.db.infer(def);
let (captures, _) = infer.closure_info(&c);
let parent_subst = ClosureSubst(subst).parent_subst();
captures
.get(f)
.expect("broken closure field")
.ty
.clone()
.substitute(Interner, parent_subst)
},
self.crate_id,
);
self.projected_ty_cache.borrow_mut().insert((ty, proj), r.clone());
r
}
fn place_addr_and_ty_and_metadata<'a>(
@ -570,23 +658,7 @@ impl Evaluator<'_> {
let mut metadata: Option<IntervalOrOwned> = None; // locals are always sized
for proj in &*p.projection {
let prev_ty = ty.clone();
ty = proj.projected_ty(
ty,
self.db,
|c, subst, f| {
let (def, _) = self.db.lookup_intern_closure(c.into());
let infer = self.db.infer(def);
let (captures, _) = infer.closure_info(&c);
let parent_subst = ClosureSubst(subst).parent_subst();
captures
.get(f)
.expect("broken closure field")
.ty
.clone()
.substitute(Interner, parent_subst)
},
self.crate_id,
);
ty = self.projected_ty(ty, proj.clone());
match proj {
ProjectionElem::Deref => {
metadata = if self.size_align_of(&ty, locals)?.is_none() {
@ -680,8 +752,10 @@ impl Evaluator<'_> {
.offset(u32::from(f.local_id.into_raw()) as usize)
.bytes_usize();
addr = addr.offset(offset);
// FIXME: support structs with unsized fields
metadata = None;
// Unsized field metadata is equal to the metadata of the struct
if self.size_align_of(&ty, locals)?.is_some() {
metadata = None;
}
}
ProjectionElem::OpaqueCast(_) => not_supported!("opaque cast"),
}
@ -702,9 +776,7 @@ impl Evaluator<'_> {
}
fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Arc<Layout>> {
self.db.layout_of_adt(adt, subst.clone(), self.trait_env.clone()).map_err(|e| {
MirEvalError::LayoutError(e, TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner))
})
self.layout(&TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner))
}
fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals) -> Result<Ty> {
@ -740,18 +812,18 @@ impl Evaluator<'_> {
return Err(MirEvalError::StackOverflow);
}
let mut current_block_idx = body.start_block;
let (mut locals, prev_stack_ptr) = self.create_locals_for_body(body.clone(), None)?;
let (mut locals, prev_stack_ptr) = self.create_locals_for_body(&body, None)?;
self.fill_locals_for_body(&body, &mut locals, args)?;
let prev_code_stack = mem::take(&mut self.code_stack);
let span = (MirSpan::Unknown, body.owner);
self.code_stack.push(StackFrame { body, locals, destination: None, prev_stack_ptr, span });
self.code_stack.push(StackFrame { locals, destination: None, prev_stack_ptr, span });
'stack: loop {
let Some(mut my_stack_frame) = self.code_stack.pop() else {
not_supported!("missing stack frame");
};
let e = (|| {
let mut locals = &mut my_stack_frame.locals;
let body = &*my_stack_frame.body;
let body = locals.body.clone();
loop {
let current_block = &body.basic_blocks[current_block_idx];
if let Some(it) = self.execution_limit.checked_sub(1) {
@ -820,7 +892,7 @@ impl Evaluator<'_> {
locals.drop_flags.add_place(destination.clone());
if let Some(stack_frame) = stack_frame {
self.code_stack.push(my_stack_frame);
current_block_idx = stack_frame.body.start_block;
current_block_idx = stack_frame.locals.body.start_block;
self.code_stack.push(stack_frame);
return Ok(None);
} else {
@ -861,18 +933,24 @@ impl Evaluator<'_> {
let my_code_stack = mem::replace(&mut self.code_stack, prev_code_stack);
let mut error_stack = vec![];
for frame in my_code_stack.into_iter().rev() {
if let DefWithBodyId::FunctionId(f) = frame.body.owner {
if let DefWithBodyId::FunctionId(f) = frame.locals.body.owner {
error_stack.push((Either::Left(f), frame.span.0, frame.span.1));
}
}
return Err(MirEvalError::InFunction(Box::new(e), error_stack));
}
};
let return_interval = my_stack_frame.locals.ptr[return_slot()];
self.unused_locals_store
.borrow_mut()
.entry(my_stack_frame.locals.body.owner)
.or_default()
.push(my_stack_frame.locals);
match my_stack_frame.destination {
None => {
self.code_stack = prev_code_stack;
self.stack_depth_limit += 1;
return Ok(my_stack_frame.locals.ptr[return_slot()].get(self)?.to_vec());
return Ok(return_interval.get(self)?.to_vec());
}
Some(bb) => {
// We don't support const promotion, so we can't truncate the stack yet.
@ -910,39 +988,45 @@ impl Evaluator<'_> {
fn create_locals_for_body(
&mut self,
body: Arc<MirBody>,
body: &Arc<MirBody>,
destination: Option<Interval>,
) -> Result<(Locals, usize)> {
let mut locals =
Locals { ptr: ArenaMap::new(), body: body.clone(), drop_flags: DropFlags::default() };
let (locals_ptr, stack_size) = {
match self.unused_locals_store.borrow_mut().entry(body.owner).or_default().pop() {
None => Locals {
ptr: ArenaMap::new(),
body: body.clone(),
drop_flags: DropFlags::default(),
},
Some(mut l) => {
l.drop_flags.clear();
l.body = body.clone();
l
}
};
let stack_size = {
let mut stack_ptr = self.stack.len();
let addr = body
.locals
.iter()
.map(|(id, it)| {
if id == return_slot() {
if let Some(destination) = destination {
return Ok((id, destination));
}
for (id, it) in body.locals.iter() {
if id == return_slot() {
if let Some(destination) = destination {
locals.ptr.insert(id, destination);
continue;
}
let (size, align) = self.size_align_of_sized(
&it.ty,
&locals,
"no unsized local in extending stack",
)?;
while stack_ptr % align != 0 {
stack_ptr += 1;
}
let my_ptr = stack_ptr;
stack_ptr += size;
Ok((id, Interval { addr: Stack(my_ptr), size }))
})
.collect::<Result<ArenaMap<LocalId, _>>>()?;
let stack_size = stack_ptr - self.stack.len();
(addr, stack_size)
}
let (size, align) = self.size_align_of_sized(
&it.ty,
&locals,
"no unsized local in extending stack",
)?;
while stack_ptr % align != 0 {
stack_ptr += 1;
}
let my_ptr = stack_ptr;
stack_ptr += size;
locals.ptr.insert(id, Interval { addr: Stack(my_ptr), size });
}
stack_ptr - self.stack.len()
};
locals.ptr = locals_ptr;
let prev_stack_pointer = self.stack.len();
if stack_size > self.memory_limit {
return Err(MirEvalError::Panic(format!(
@ -1543,12 +1627,18 @@ impl Evaluator<'_> {
) -> Result<Vec<u8>> {
let mut result = vec![0; size];
if let Some((offset, size, value)) = tag {
result[offset..offset + size].copy_from_slice(&value.to_le_bytes()[0..size]);
match result.get_mut(offset..offset + size) {
Some(it) => it.copy_from_slice(&value.to_le_bytes()[0..size]),
None => return Err(MirEvalError::BrokenLayout(variant_layout.clone())),
}
}
for (i, op) in values.enumerate() {
let offset = variant_layout.fields.offset(i).bytes_usize();
let op = op.get(&self)?;
result[offset..offset + op.len()].copy_from_slice(op);
match result.get_mut(offset..offset + op.len()) {
Some(it) => it.copy_from_slice(op),
None => return Err(MirEvalError::BrokenLayout(variant_layout.clone())),
}
}
Ok(result)
}
@ -1671,6 +1761,11 @@ impl Evaluator<'_> {
}
fn size_align_of(&self, ty: &Ty, locals: &Locals) -> Result<Option<(usize, usize)>> {
if let Some(layout) = self.layout_cache.borrow().get(ty) {
return Ok(layout
.is_sized()
.then(|| (layout.size.bytes_usize(), layout.align.abi.bytes() as usize)));
}
if let DefWithBodyId::VariantId(f) = locals.body.owner {
if let Some((adt, _)) = ty.as_adt() {
if AdtId::from(f.parent) == adt {
@ -1731,16 +1826,15 @@ impl Evaluator<'_> {
}
fn detect_fn_trait(&self, def: FunctionId) -> Option<FnTrait> {
use LangItem::*;
let ItemContainerId::TraitId(parent) = self.db.lookup_intern_function(def).container else {
return None;
};
let l = self.db.lang_attr(parent.into())?;
match l {
FnOnce => Some(FnTrait::FnOnce),
FnMut => Some(FnTrait::FnMut),
Fn => Some(FnTrait::Fn),
_ => None,
let def = Some(def);
if def == self.cached_fn_trait_func {
Some(FnTrait::Fn)
} else if def == self.cached_fn_mut_trait_func {
Some(FnTrait::FnMut)
} else if def == self.cached_fn_once_trait_func {
Some(FnTrait::FnOnce)
} else {
None
}
}
@ -1796,6 +1890,17 @@ impl Evaluator<'_> {
}
}
}
chalk_ir::TyKind::Array(inner, len) => {
let len = match try_const_usize(this.db, &len) {
Some(it) => it as usize,
None => not_supported!("non evaluatable array len in patching addresses"),
};
let size = this.size_of_sized(inner, locals, "inner of array")?;
for i in 0..len {
let offset = i * size;
rec(this, &bytes[offset..offset + size], inner, locals, mm)?;
}
}
chalk_ir::TyKind::Tuple(_, subst) => {
let layout = this.layout(ty)?;
for (id, ty) in subst.iter(Interner).enumerate() {
@ -1904,10 +2009,31 @@ impl Evaluator<'_> {
AdtId::UnionId(_) => (),
AdtId::EnumId(_) => (),
},
TyKind::Tuple(_, subst) => {
for (id, ty) in subst.iter(Interner).enumerate() {
let ty = ty.assert_ty_ref(Interner); // Tuple only has type argument
let offset = layout.fields.offset(id).bytes_usize();
self.patch_addresses(patch_map, old_vtable, addr.offset(offset), ty, locals)?;
}
}
TyKind::Array(inner, len) => {
let len = match try_const_usize(self.db, &len) {
Some(it) => it as usize,
None => not_supported!("non evaluatable array len in patching addresses"),
};
let size = self.size_of_sized(inner, locals, "inner of array")?;
for i in 0..len {
self.patch_addresses(
patch_map,
old_vtable,
addr.offset(i * size),
inner,
locals,
)?;
}
}
TyKind::AssociatedType(_, _)
| TyKind::Scalar(_)
| TyKind::Tuple(_, _)
| TyKind::Array(_, _)
| TyKind::Slice(_)
| TyKind::Raw(_, _)
| TyKind::OpaqueType(_, _)
@ -2051,6 +2177,40 @@ impl Evaluator<'_> {
}
}
fn get_mir_or_dyn_index(
&self,
def: FunctionId,
generic_args: Substitution,
locals: &Locals,
span: MirSpan,
) -> Result<MirOrDynIndex> {
let pair = (def, generic_args);
if let Some(r) = self.mir_or_dyn_index_cache.borrow().get(&pair) {
return Ok(r.clone());
}
let (def, generic_args) = pair;
let r = if let Some(self_ty_idx) =
is_dyn_method(self.db, self.trait_env.clone(), def, generic_args.clone())
{
MirOrDynIndex::Dyn(self_ty_idx)
} else {
let (imp, generic_args) =
self.db.lookup_impl_method(self.trait_env.clone(), def, generic_args.clone());
let mir_body = self
.db
.monomorphized_mir_body(imp.into(), generic_args, self.trait_env.clone())
.map_err(|e| {
MirEvalError::InFunction(
Box::new(MirEvalError::MirLowerError(imp, e)),
vec![(Either::Left(imp), span, locals.body.owner)],
)
})?;
MirOrDynIndex::Mir(mir_body)
};
self.mir_or_dyn_index_cache.borrow_mut().insert((def, generic_args), r.clone());
Ok(r)
}
fn exec_fn_with_args(
&mut self,
def: FunctionId,
@ -2072,93 +2232,76 @@ impl Evaluator<'_> {
return Ok(None);
}
let arg_bytes = args.iter().map(|it| IntervalOrOwned::Borrowed(it.interval));
if let Some(self_ty_idx) =
is_dyn_method(self.db, self.trait_env.clone(), def, generic_args.clone())
{
// In the layout of current possible receiver, which at the moment of writing this code is one of
// `&T`, `&mut T`, `Box<T>`, `Rc<T>`, `Arc<T>`, and `Pin<P>` where `P` is one of possible recievers,
// the vtable is exactly in the `[ptr_size..2*ptr_size]` bytes. So we can use it without branching on
// the type.
let first_arg = arg_bytes.clone().next().unwrap();
let first_arg = first_arg.get(self)?;
let ty =
self.vtable_map.ty_of_bytes(&first_arg[self.ptr_size()..self.ptr_size() * 2])?;
let mut args_for_target = args.to_vec();
args_for_target[0] = IntervalAndTy {
interval: args_for_target[0].interval.slice(0..self.ptr_size()),
ty: ty.clone(),
};
let ty = GenericArgData::Ty(ty.clone()).intern(Interner);
let generics_for_target = Substitution::from_iter(
Interner,
generic_args.iter(Interner).enumerate().map(|(i, it)| {
if i == self_ty_idx {
&ty
} else {
it
}
}),
);
return self.exec_fn_with_args(
def,
&args_for_target,
generics_for_target,
match self.get_mir_or_dyn_index(def, generic_args.clone(), locals, span)? {
MirOrDynIndex::Dyn(self_ty_idx) => {
// In the layout of current possible receiver, which at the moment of writing this code is one of
// `&T`, `&mut T`, `Box<T>`, `Rc<T>`, `Arc<T>`, and `Pin<P>` where `P` is one of possible recievers,
// the vtable is exactly in the `[ptr_size..2*ptr_size]` bytes. So we can use it without branching on
// the type.
let first_arg = arg_bytes.clone().next().unwrap();
let first_arg = first_arg.get(self)?;
let ty = self
.vtable_map
.ty_of_bytes(&first_arg[self.ptr_size()..self.ptr_size() * 2])?;
let mut args_for_target = args.to_vec();
args_for_target[0] = IntervalAndTy {
interval: args_for_target[0].interval.slice(0..self.ptr_size()),
ty: ty.clone(),
};
let ty = GenericArgData::Ty(ty.clone()).intern(Interner);
let generics_for_target = Substitution::from_iter(
Interner,
generic_args.iter(Interner).enumerate().map(|(i, it)| {
if i == self_ty_idx {
&ty
} else {
it
}
}),
);
return self.exec_fn_with_args(
def,
&args_for_target,
generics_for_target,
locals,
destination,
target_bb,
span,
);
}
MirOrDynIndex::Mir(body) => self.exec_looked_up_function(
body,
locals,
def,
arg_bytes,
span,
destination,
target_bb,
span,
);
),
}
let (imp, generic_args) =
self.db.lookup_impl_method(self.trait_env.clone(), def, generic_args);
self.exec_looked_up_function(
generic_args,
locals,
imp,
arg_bytes,
span,
destination,
target_bb,
)
}
fn exec_looked_up_function(
&mut self,
generic_args: Substitution,
mir_body: Arc<MirBody>,
locals: &Locals,
imp: FunctionId,
def: FunctionId,
arg_bytes: impl Iterator<Item = IntervalOrOwned>,
span: MirSpan,
destination: Interval,
target_bb: Option<BasicBlockId>,
) -> Result<Option<StackFrame>> {
let def = imp.into();
let mir_body = self
.db
.monomorphized_mir_body(def, generic_args, self.trait_env.clone())
.map_err(|e| {
MirEvalError::InFunction(
Box::new(MirEvalError::MirLowerError(imp, e)),
vec![(Either::Left(imp), span, locals.body.owner)],
)
})?;
Ok(if let Some(target_bb) = target_bb {
let (mut locals, prev_stack_ptr) =
self.create_locals_for_body(mir_body.clone(), Some(destination))?;
self.create_locals_for_body(&mir_body, Some(destination))?;
self.fill_locals_for_body(&mir_body, &mut locals, arg_bytes.into_iter())?;
let span = (span, locals.body.owner);
Some(StackFrame {
body: mir_body,
locals,
destination: Some(target_bb),
prev_stack_ptr,
span,
})
Some(StackFrame { locals, destination: Some(target_bb), prev_stack_ptr, span })
} else {
let result = self.interpret_mir(mir_body, arg_bytes).map_err(|e| {
MirEvalError::InFunction(
Box::new(e),
vec![(Either::Left(imp), span, locals.body.owner)],
vec![(Either::Left(def), span, locals.body.owner)],
)
})?;
destination.write_from_bytes(self, &result)?;
@ -2330,16 +2473,15 @@ impl Evaluator<'_> {
// we can ignore drop in them.
return Ok(());
};
let (impl_drop_candidate, subst) = self.db.lookup_impl_method(
self.trait_env.clone(),
drop_fn,
Substitution::from1(Interner, ty.clone()),
);
if impl_drop_candidate != drop_fn {
let generic_args = Substitution::from1(Interner, ty.clone());
if let Ok(MirOrDynIndex::Mir(body)) =
self.get_mir_or_dyn_index(drop_fn, generic_args, locals, span)
{
self.exec_looked_up_function(
subst,
body,
locals,
impl_drop_candidate,
drop_fn,
[IntervalOrOwned::Owned(addr.to_bytes())].into_iter(),
span,
Interval { addr: Address::Invalid(0), size: 0 },

View file

@ -36,6 +36,9 @@ impl Evaluator<'_> {
destination: Interval,
span: MirSpan,
) -> Result<bool> {
if self.not_special_fn_cache.borrow().contains(&def) {
return Ok(false);
}
let function_data = self.db.function_data(def);
let is_intrinsic = match &function_data.abi {
Some(abi) => *abi == Interned::new_str("rust-intrinsic"),
@ -124,9 +127,88 @@ impl Evaluator<'_> {
destination.write_from_bytes(self, &result)?;
return Ok(true);
}
if let ItemContainerId::TraitId(t) = def.lookup(self.db.upcast()).container {
if self.db.lang_attr(t.into()) == Some(LangItem::Clone) {
let [self_ty] = generic_args.as_slice(Interner) else {
not_supported!("wrong generic arg count for clone");
};
let Some(self_ty) = self_ty.ty(Interner) else {
not_supported!("wrong generic arg kind for clone");
};
// Clone has special impls for tuples and function pointers
if matches!(self_ty.kind(Interner), TyKind::Function(_) | TyKind::Tuple(..)) {
self.exec_clone(def, args, self_ty.clone(), locals, destination, span)?;
return Ok(true);
}
// Return early to prevent caching clone as non special fn.
return Ok(false);
}
}
self.not_special_fn_cache.borrow_mut().insert(def);
Ok(false)
}
/// Clone has special impls for tuples and function pointers
fn exec_clone(
&mut self,
def: FunctionId,
args: &[IntervalAndTy],
self_ty: Ty,
locals: &Locals,
destination: Interval,
span: MirSpan,
) -> Result<()> {
match self_ty.kind(Interner) {
TyKind::Function(_) => {
let [arg] = args else {
not_supported!("wrong arg count for clone");
};
let addr = Address::from_bytes(arg.get(self)?)?;
return destination
.write_from_interval(self, Interval { addr, size: destination.size });
}
TyKind::Tuple(_, subst) => {
let [arg] = args else {
not_supported!("wrong arg count for clone");
};
let addr = Address::from_bytes(arg.get(self)?)?;
let layout = self.layout(&self_ty)?;
for (i, ty) in subst.iter(Interner).enumerate() {
let ty = ty.assert_ty_ref(Interner);
let size = self.layout(ty)?.size.bytes_usize();
let tmp = self.heap_allocate(self.ptr_size(), self.ptr_size())?;
let arg = IntervalAndTy {
interval: Interval { addr: tmp, size: self.ptr_size() },
ty: TyKind::Ref(Mutability::Not, static_lifetime(), ty.clone())
.intern(Interner),
};
let offset = layout.fields.offset(i).bytes_usize();
self.write_memory(tmp, &addr.offset(offset).to_bytes())?;
self.exec_clone(
def,
&[arg],
ty.clone(),
locals,
destination.slice(offset..offset + size),
span,
)?;
}
}
_ => {
self.exec_fn_with_args(
def,
args,
Substitution::from1(Interner, self_ty),
locals,
destination,
None,
span,
)?;
}
}
Ok(())
}
fn exec_alloc_fn(
&mut self,
alloc_fn: &str,
@ -618,12 +700,15 @@ impl Evaluator<'_> {
else {
return Err(MirEvalError::TypeError("type_name generic arg is not provided"));
};
let Ok(ty_name) = ty.display_source_code(
let ty_name = match ty.display_source_code(
self.db,
locals.body.owner.module(self.db.upcast()),
true,
) else {
not_supported!("fail in generating type_name using source code display");
) {
Ok(ty_name) => ty_name,
// Fallback to human readable display in case of `Err`. Ideally we want to use `display_source_code` to
// render full paths.
Err(_) => ty.display(self.db).to_string(),
};
let len = ty_name.len();
let addr = self.heap_allocate(len, 1)?;
@ -679,7 +764,22 @@ impl Evaluator<'_> {
let ans = lhs.wrapping_add(rhs);
destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
}
"wrapping_sub" | "unchecked_sub" | "ptr_offset_from_unsigned" | "ptr_offset_from" => {
"ptr_offset_from_unsigned" | "ptr_offset_from" => {
let [lhs, rhs] = args else {
return Err(MirEvalError::TypeError("wrapping_sub args are not provided"));
};
let lhs = i128::from_le_bytes(pad16(lhs.get(self)?, false));
let rhs = i128::from_le_bytes(pad16(rhs.get(self)?, false));
let ans = lhs.wrapping_sub(rhs);
let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner))
else {
return Err(MirEvalError::TypeError("ptr_offset_from generic arg is not provided"));
};
let size = self.size_of_sized(ty, locals, "ptr_offset_from arg")? as i128;
let ans = ans / size;
destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
}
"wrapping_sub" | "unchecked_sub" => {
let [lhs, rhs] = args else {
return Err(MirEvalError::TypeError("wrapping_sub args are not provided"));
};
@ -1057,7 +1157,14 @@ impl Evaluator<'_> {
_span: MirSpan,
) -> Result<()> {
// We are a single threaded runtime with no UB checking and no optimization, so
// we can implement these as normal functions.
// we can implement atomic intrinsics as normal functions.
if name.starts_with("singlethreadfence_") || name.starts_with("fence_") {
return Ok(());
}
// The rest of atomic intrinsics have exactly one generic arg
let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) else {
return Err(MirEvalError::TypeError("atomic intrinsic generic arg is not provided"));
};

View file

@ -182,6 +182,50 @@ fn main() {
);
}
#[test]
fn drop_struct_field() {
check_pass(
r#"
//- minicore: drop, add, option, cell, builtin_impls
use core::cell::Cell;
fn should_not_reach() {
_ // FIXME: replace this function with panic when that works
}
struct X<'a>(&'a Cell<i32>);
impl<'a> Drop for X<'a> {
fn drop(&mut self) {
self.0.set(self.0.get() + 1)
}
}
struct Tuple<'a>(X<'a>, X<'a>, X<'a>);
fn main() {
let s = Cell::new(0);
{
let x0 = X(&s);
let xt = Tuple(x0, X(&s), X(&s));
let x1 = xt.1;
if s.get() != 0 {
should_not_reach();
}
drop(xt.0);
if s.get() != 1 {
should_not_reach();
}
}
// FIXME: this should be 3
if s.get() != 2 {
should_not_reach();
}
}
"#,
);
}
#[test]
fn drop_in_place() {
check_pass(
@ -613,6 +657,50 @@ fn main() {
);
}
#[test]
fn self_with_capital_s() {
check_pass(
r#"
//- minicore: fn, add, copy
struct S1;
impl S1 {
fn f() {
Self;
}
}
struct S2 {
f1: i32,
}
impl S2 {
fn f() {
Self { f1: 5 };
}
}
struct S3(i32);
impl S3 {
fn f() {
Self(2);
Self;
let this = Self;
this(2);
}
}
fn main() {
S1::f();
S2::f();
S3::f();
}
"#,
);
}
#[test]
fn syscalls() {
check_pass(

View file

@ -486,13 +486,10 @@ impl<'ctx> MirLowerCtx<'ctx> {
);
Ok(Some(current))
}
ValueNs::FunctionId(_) | ValueNs::StructId(_) => {
ValueNs::FunctionId(_) | ValueNs::StructId(_) | ValueNs::ImplSelf(_) => {
// It's probably a unit struct or a zero sized function, so no action is needed.
Ok(Some(current))
}
it => {
not_supported!("unknown name {it:?} in value name space");
}
}
}
Expr::If { condition, then_branch, else_branch } => {
@ -585,36 +582,6 @@ impl<'ctx> MirLowerCtx<'ctx> {
Ok(())
})
}
Expr::While { condition, body, label } => {
self.lower_loop(current, place, *label, expr_id.into(), |this, begin| {
let scope = this.push_drop_scope();
let Some((discr, to_switch)) =
this.lower_expr_to_some_operand(*condition, begin)?
else {
return Ok(());
};
let fail_cond = this.new_basic_block();
let after_cond = this.new_basic_block();
this.set_terminator(
to_switch,
TerminatorKind::SwitchInt {
discr,
targets: SwitchTargets::static_if(1, after_cond, fail_cond),
},
expr_id.into(),
);
let fail_cond = this.drop_until_scope(this.drop_scopes.len() - 1, fail_cond);
let end = this.current_loop_end()?;
this.set_goto(fail_cond, end, expr_id.into());
if let Some((_, block)) = this.lower_expr_as_place(after_cond, *body, true)? {
let block = scope.pop_and_drop(this, block);
this.set_goto(block, begin, expr_id.into());
} else {
scope.pop_assume_dropped(this);
}
Ok(())
})
}
Expr::Call { callee, args, .. } => {
if let Some((func_id, generic_args)) = self.infer.method_resolution(expr_id) {
let ty = chalk_ir::TyKind::FnDef(
@ -660,6 +627,11 @@ impl<'ctx> MirLowerCtx<'ctx> {
expr_id.into(),
)
}
TyKind::Closure(_, _) => {
not_supported!(
"method resolution not emitted for closure (Are Fn traits available?)"
);
}
TyKind::Error => {
return Err(MirLowerError::MissingFunctionDefinition(self.owner, expr_id))
}
@ -1026,18 +998,7 @@ impl<'ctx> MirLowerCtx<'ctx> {
self.push_assignment(current, lhs_place, r_value, expr_id.into());
return Ok(Some(current));
} else {
let Some((lhs_place, current)) =
self.lower_expr_as_place(current, *lhs, false)?
else {
return Ok(None);
};
let Some((rhs_op, current)) =
self.lower_expr_to_some_operand(*rhs, current)?
else {
return Ok(None);
};
self.push_assignment(current, lhs_place, rhs_op.into(), expr_id.into());
return Ok(Some(current));
return self.lower_assignment(current, *lhs, *rhs, expr_id.into());
}
}
let Some((lhs_op, current)) = self.lower_expr_to_some_operand(*lhs, current)?
@ -1283,6 +1244,30 @@ impl<'ctx> MirLowerCtx<'ctx> {
}
}
fn lower_assignment(
&mut self,
current: BasicBlockId,
lhs: ExprId,
rhs: ExprId,
span: MirSpan,
) -> Result<Option<BasicBlockId>> {
let Some((rhs_op, current)) =
self.lower_expr_to_some_operand(rhs, current)?
else {
return Ok(None);
};
if matches!(&self.body.exprs[lhs], Expr::Underscore) {
return Ok(Some(current));
}
let Some((lhs_place, current)) =
self.lower_expr_as_place(current, lhs, false)?
else {
return Ok(None);
};
self.push_assignment(current, lhs_place, rhs_op.into(), span);
Ok(Some(current))
}
fn placeholder_subst(&mut self) -> Substitution {
let placeholder_subst = match self.owner.as_generic_def_id() {
Some(it) => TyBuilder::placeholder_subst(self.db, it),

View file

@ -227,3 +227,22 @@ fn f(a: impl Foo<i8, Assoc<i16> = i32>) {
"#,
);
}
#[test]
fn fn_def_is_shown_as_fn_ptr() {
check_types_source_code(
r#"
fn foo(_: i32) -> i64 { 42 }
struct S<T>(T);
enum E { A(usize) }
fn test() {
let f = foo;
//^ fn(i32) -> i64
let f = S::<i8>;
//^ fn(i8) -> S<i8>
let f = E::A;
//^ fn(usize) -> E
}
"#,
);
}

View file

@ -209,6 +209,8 @@ fn expr_macro_def_expanded_in_various_places() {
104..105 '_': IntoIterator::Item<isize>
117..119 '{}': ()
124..134 '|| spam!()': impl Fn() -> isize
140..156 'while ...!() {}': !
140..156 'while ...!() {}': ()
140..156 'while ...!() {}': ()
154..156 '{}': ()
161..174 'break spam!()': !
@ -300,6 +302,8 @@ fn expr_macro_rules_expanded_in_various_places() {
118..119 '_': IntoIterator::Item<isize>
131..133 '{}': ()
138..148 '|| spam!()': impl Fn() -> isize
154..170 'while ...!() {}': !
154..170 'while ...!() {}': ()
154..170 'while ...!() {}': ()
168..170 '{}': ()
175..188 'break spam!()': !

View file

@ -412,17 +412,23 @@ fn diverging_expression_3_break() {
355..654 '{ ...; }; }': ()
398..399 'x': u32
407..433 '{ whil...; }; }': u32
409..430 'while ...eak; }': !
409..430 'while ...eak; }': ()
409..430 'while ...eak; }': ()
415..419 'true': bool
420..430 '{ break; }': ()
422..427 'break': !
537..538 'x': u32
546..564 '{ whil... {}; }': u32
548..561 'while true {}': !
548..561 'while true {}': ()
548..561 'while true {}': ()
554..558 'true': bool
559..561 '{}': ()
615..616 'x': u32
624..651 '{ whil...; }; }': u32
626..648 'while ...urn; }': !
626..648 'while ...urn; }': ()
626..648 'while ...urn; }': ()
632..636 'true': bool
637..648 '{ return; }': ()

View file

@ -1267,6 +1267,8 @@ fn test() {
"#,
expect![[r#"
10..59 '{ ... } }': ()
16..57 'while ... }': !
16..57 'while ... }': ()
16..57 'while ... }': ()
22..30 '{ true }': bool
24..28 'true': bool
@ -1978,3 +1980,23 @@ fn x(a: [i32; 4]) {
"#,
);
}
#[test]
fn dont_unify_on_casts() {
// #15246
check_types(
r#"
fn unify(_: [bool; 1]) {}
fn casted(_: *const bool) {}
fn default<T>() -> T { loop {} }
fn test() {
let foo = default();
//^^^ [bool; 1]
casted(&foo as *const _);
unify(foo);
}
"#,
);
}

View file

@ -3513,7 +3513,6 @@ fn func() {
);
}
// FIXME
#[test]
fn castable_to() {
check_infer(
@ -3538,10 +3537,10 @@ fn func() {
120..122 '{}': ()
138..184 '{ ...0]>; }': ()
148..149 'x': Box<[i32; 0]>
152..160 'Box::new': fn new<[{unknown}; 0]>([{unknown}; 0]) -> Box<[{unknown}; 0]>
152..164 'Box::new([])': Box<[{unknown}; 0]>
152..160 'Box::new': fn new<[i32; 0]>([i32; 0]) -> Box<[i32; 0]>
152..164 'Box::new([])': Box<[i32; 0]>
152..181 'Box::n...2; 0]>': Box<[i32; 0]>
161..163 '[]': [{unknown}; 0]
161..163 '[]': [i32; 0]
"#]],
);
}
@ -3577,6 +3576,21 @@ fn f<T>(t: Ark<T>) {
);
}
#[test]
fn ref_to_array_to_ptr_cast() {
check_types(
r#"
fn default<T>() -> T { loop {} }
fn foo() {
let arr = [default()];
//^^^ [i32; 1]
let ref_to_arr = &arr;
let casted = ref_to_arr as *const i32;
}
"#,
);
}
#[test]
fn const_dependent_on_local() {
check_types(