diff --git a/compiler/gen_dev/src/generic64/mod.rs b/compiler/gen_dev/src/generic64/mod.rs index e0c53d9c0b..1bc07d0954 100644 --- a/compiler/gen_dev/src/generic64/mod.rs +++ b/compiler/gen_dev/src/generic64/mod.rs @@ -558,17 +558,18 @@ impl< .load_to_general_reg(&mut self.buf, cond_symbol); let mut base_storage = self.storage_manager.clone(); + let mut max_branch_stack_size = 0; let mut ret_jumps = bumpalo::vec![in self.env.arena]; let mut tmp = bumpalo::vec![in self.env.arena]; for (val, _branch_info, stmt) in branches.iter() { - // TODO: look inot branch info and if it matters here. + // TODO: look into branch info and if it matters here. tmp.clear(); - // Create jump to next branch if not cond_sym not equal to value. + // Create jump to next branch if cond_sym not equal to value. // Since we don't know the offset yet, set it to 0 and overwrite later. let jne_location = self.buf.len(); let start_offset = ASM::jne_reg64_imm64_imm32(&mut self.buf, cond_reg, *val, 0); - // Build all statements in this branch. + // Build all statements in this branch. Using storage as from before any branch. self.storage_manager = base_storage.clone(); self.build_stmt(stmt, ret_layout); @@ -586,10 +587,14 @@ impl< self.buf[jne_location + i] = *byte; } - base_storage.update_stack_size(self.storage_manager.stack_size()); + // Update important storage information to avoid overwrites. + max_branch_stack_size = + std::cmp::max(max_branch_stack_size, self.storage_manager.stack_size()); base_storage.update_fn_call_stack_size(self.storage_manager.fn_call_stack_size()); } self.storage_manager = base_storage; + self.storage_manager + .update_stack_size(max_branch_stack_size); let (_branch_info, stmt) = default_branch; self.build_stmt(stmt, ret_layout); @@ -613,7 +618,7 @@ impl< remainder: &'a Stmt<'a>, ret_layout: &Layout<'a>, ) { - // Free everything to the stack to make sure they don't get messed with in the branch. + // Free everything to the stack to make sure they don't get messed up when looping back to this point. // TODO: look into a nicer solution. self.storage_manager.free_all_to_stack(&mut self.buf); @@ -625,7 +630,6 @@ impl< self.join_map.insert(*id, bumpalo::vec![in self.env.arena]); // Build remainder of function first. It is what gets run and jumps to join. - // self.storage_manager = base_storage; self.build_stmt(remainder, ret_layout); let join_location = self.buf.len() as u64; @@ -959,7 +963,7 @@ impl< let list_alignment = list_layout.alignment_bytes(self.storage_manager.target_info()); self.load_literal( &Symbol::DEV_TMP, - &u32_layout, + u32_layout, &Literal::Int(list_alignment as i128), ); @@ -978,13 +982,13 @@ impl< let elem_stack_size = elem_layout.stack_size(self.storage_manager.target_info()); self.load_literal( &Symbol::DEV_TMP3, - &u64_layout, + u64_layout, &Literal::Int(elem_stack_size as i128), ); // Setup the return location. let base_offset = self.storage_manager.claim_stack_area( - &dst, + dst, ret_layout.stack_size(self.storage_manager.target_info()), ); diff --git a/compiler/gen_dev/src/generic64/storage.rs b/compiler/gen_dev/src/generic64/storage.rs index 7bd3c79222..be5eeaea8a 100644 --- a/compiler/gen_dev/src/generic64/storage.rs +++ b/compiler/gen_dev/src/generic64/storage.rs @@ -713,7 +713,7 @@ impl< } /// Copies a complex symbol on the stack to the arg pointer. - pub fn copy_symbol_to_arg_pionter( + pub fn copy_symbol_to_arg_pointer( &mut self, buf: &mut Vec<'a, u8>, sym: &Symbol, diff --git a/compiler/gen_dev/src/generic64/x86_64.rs b/compiler/gen_dev/src/generic64/x86_64.rs index d2a174923f..2eff83ecd2 100644 --- a/compiler/gen_dev/src/generic64/x86_64.rs +++ b/compiler/gen_dev/src/generic64/x86_64.rs @@ -440,7 +440,7 @@ impl CallConv for X86_64Syste } _ => { // This is a large type returned via the arg pointer. - storage_manager.copy_symbol_to_arg_pionter(buf, sym, layout); + storage_manager.copy_symbol_to_arg_pointer(buf, sym, layout); // Also set the return reg to the arg pointer. storage_manager.load_to_specified_general_reg( buf, @@ -1599,7 +1599,7 @@ fn movsd_freg64_rip_offset32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, offset: buf.extend(&offset.to_le_bytes()); } -/// `MOVSD r/m64,xmm1` -> Move xmm1 to r/m64. where m64 references the base pionter. +/// `MOVSD r/m64,xmm1` -> Move xmm1 to r/m64. where m64 references the base pointer. #[inline(always)] fn movsd_base64_offset32_freg64( buf: &mut Vec<'_, u8>, @@ -1622,7 +1622,7 @@ fn movsd_base64_offset32_freg64( buf.extend(&offset.to_le_bytes()); } -/// `MOVSD xmm1,r/m64` -> Move r/m64 to xmm1. where m64 references the base pionter. +/// `MOVSD xmm1,r/m64` -> Move r/m64 to xmm1. where m64 references the base pointer. #[inline(always)] fn movsd_freg64_base64_offset32( buf: &mut Vec<'_, u8>, diff --git a/compiler/gen_dev/src/lib.rs b/compiler/gen_dev/src/lib.rs index c4ed9edf83..5daa75e0b4 100644 --- a/compiler/gen_dev/src/lib.rs +++ b/compiler/gen_dev/src/lib.rs @@ -336,7 +336,7 @@ trait Backend<'a> { .. } => { self.load_literal_symbols(arguments); - self.tag(sym, &arguments, tag_layout, *tag_id); + self.tag(sym, arguments, tag_layout, *tag_id); } x => todo!("the expression, {:?}", x), }