Optimize away unnecessary jump right before return

This commit is contained in:
Brendan Hansknecht 2021-09-21 00:14:13 -07:00
parent 54e2792b12
commit 1fb0c8043f
2 changed files with 45 additions and 7 deletions

View file

@ -340,20 +340,48 @@ impl<
let setup_offset = out.len(); let setup_offset = out.len();
// Deal with jumps to the return address. // Deal with jumps to the return address.
let ret_offset = self.buf.len();
let old_relocs = std::mem::replace(&mut self.relocs, bumpalo::vec![in self.env.arena]); let old_relocs = std::mem::replace(&mut self.relocs, bumpalo::vec![in self.env.arena]);
// Check if their is an unnessary jump to return right at the end of the function.
let mut end_jmp_size = 0;
for reloc in old_relocs
.iter()
.filter(|reloc| matches!(reloc, Relocation::JmpToReturn { .. }))
{
if let Relocation::JmpToReturn {
inst_loc,
inst_size,
..
} = reloc
{
if *inst_loc as usize + *inst_size as usize == self.buf.len() {
end_jmp_size = *inst_size as usize;
break;
}
}
}
// Update jumps to returns.
let ret_offset = self.buf.len() - end_jmp_size;
let mut tmp = bumpalo::vec![in self.env.arena]; let mut tmp = bumpalo::vec![in self.env.arena];
for reloc in old_relocs for reloc in old_relocs
.iter() .iter()
.filter(|reloc| matches!(reloc, Relocation::JmpToReturn { .. })) .filter(|reloc| matches!(reloc, Relocation::JmpToReturn { .. }))
{ {
if let Relocation::JmpToReturn { inst_loc, offset } = reloc { if let Relocation::JmpToReturn {
inst_loc,
inst_size,
offset,
} = reloc
{
if *inst_loc as usize + *inst_size as usize != self.buf.len() {
self.update_jmp_imm32_offset(&mut tmp, *inst_loc, *offset, ret_offset as u64); self.update_jmp_imm32_offset(&mut tmp, *inst_loc, *offset, ret_offset as u64);
} }
} }
}
// Add function body. // Add function body.
out.extend(&self.buf); out.extend(&self.buf[..self.buf.len() - end_jmp_size]);
// Cleanup stack. // Cleanup stack.
CC::cleanup_stack( CC::cleanup_stack(
@ -606,8 +634,13 @@ impl<
offset: offset + sub_func_offset, offset: offset + sub_func_offset,
name, name,
}, },
Relocation::JmpToReturn { inst_loc, offset } => Relocation::JmpToReturn { Relocation::JmpToReturn {
inst_loc,
inst_size,
offset,
} => Relocation::JmpToReturn {
inst_loc: inst_loc + sub_func_offset, inst_loc: inst_loc + sub_func_offset,
inst_size,
offset: offset + sub_func_offset, offset: offset + sub_func_offset,
}, },
})); }));
@ -1017,8 +1050,11 @@ impl<
} }
let inst_loc = self.buf.len() as u64; let inst_loc = self.buf.len() as u64;
let offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678) as u64; let offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678) as u64;
self.relocs self.relocs.push(Relocation::JmpToReturn {
.push(Relocation::JmpToReturn { inst_loc, offset }); inst_loc,
inst_size: self.buf.len() as u64 - inst_loc,
offset,
});
Ok(()) Ok(())
} }
} }
@ -1384,6 +1420,7 @@ impl<
Ok(()) Ok(())
} }
// Updates a jump instruction to a new offset and returns the number of bytes written.
fn update_jmp_imm32_offset( fn update_jmp_imm32_offset(
&mut self, &mut self,
tmp: &mut Vec<'a, u8>, tmp: &mut Vec<'a, u8>,

View file

@ -48,6 +48,7 @@ pub enum Relocation {
}, },
JmpToReturn { JmpToReturn {
inst_loc: u64, inst_loc: u64,
inst_size: u64,
offset: u64, offset: u64,
}, },
} }