dev: box and unbox 16-bit and 8-bit values

This commit is contained in:
Folkert 2023-02-11 15:31:35 +01:00
parent 49d52f5d9a
commit ee79a311f9
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
4 changed files with 299 additions and 12 deletions

View file

@ -656,6 +656,25 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("mem offsets over 32k for AArch64");
}
}
#[inline(always)]
fn mov_reg16_mem16_offset32(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src: AArch64GeneralReg,
_offset: i32,
) {
todo!()
}
#[inline(always)]
fn mov_reg8_mem8_offset32(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src: AArch64GeneralReg,
_offset: i32,
) {
todo!()
}
#[inline(always)]
fn mov_mem64_offset32_reg64(
buf: &mut Vec<'_, u8>,
@ -675,19 +694,32 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
#[inline(always)]
fn mov_mem32_offset32_reg32(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
offset: i32,
src: AArch64GeneralReg,
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_offset: i32,
_src: AArch64GeneralReg,
) {
if offset < 0 {
todo!("negative mem offsets for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
str_reg64_reg64_imm12(buf, src, dst, (offset as u16) >> 3);
} else {
todo!("mem offsets over 32k for AArch64");
}
todo!()
}
#[inline(always)]
fn mov_mem16_offset32_reg16(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_offset: i32,
_src: AArch64GeneralReg,
) {
todo!()
}
#[inline(always)]
fn mov_mem8_offset32_reg8(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_offset: i32,
_src: AArch64GeneralReg,
) {
todo!()
}
#[inline(always)]

View file

@ -253,6 +253,13 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src: GeneralReg,
offset: i32,
);
fn mov_reg16_mem16_offset32(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src: GeneralReg,
offset: i32,
);
fn mov_reg8_mem8_offset32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg, offset: i32);
// move from register to memory
fn mov_mem64_offset32_reg64(
@ -267,6 +274,13 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
offset: i32,
src: GeneralReg,
);
fn mov_mem16_offset32_reg16(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
offset: i32,
src: GeneralReg,
);
fn mov_mem8_offset32_reg8(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32, src: GeneralReg);
/// Sign extends the data at `offset` with `size` as it copies it to `dst`
/// size must be less than or equal to 8.
@ -2026,6 +2040,14 @@ impl<
let sym_reg = storage_manager.load_to_general_reg(&mut self.buf, &value);
ASM::mov_mem32_offset32_reg32(&mut self.buf, ptr_reg, element_offset, sym_reg);
}
Layout::Builtin(Builtin::Int(IntWidth::I16 | IntWidth::U16)) => {
let sym_reg = storage_manager.load_to_general_reg(&mut self.buf, &value);
ASM::mov_mem16_offset32_reg16(&mut self.buf, ptr_reg, element_offset, sym_reg);
}
Layout::Builtin(Builtin::Int(IntWidth::I8 | IntWidth::U8) | Builtin::Bool) => {
let sym_reg = storage_manager.load_to_general_reg(&mut self.buf, &value);
ASM::mov_mem8_offset32_reg8(&mut self.buf, ptr_reg, element_offset, sym_reg);
}
_ if element_width == 0 => {}
_ if element_width > 8 => {
let (from_offset, size) = self.storage_manager.stack_offset_and_size(&value);
@ -2078,6 +2100,14 @@ impl<
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, &dst);
ASM::mov_reg32_mem32_offset32(&mut self.buf, dst_reg, ptr_reg, 0);
}
Layout::U16 | Layout::I16 => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, &dst);
ASM::mov_reg16_mem16_offset32(&mut self.buf, dst_reg, ptr_reg, 0);
}
Layout::U8 | Layout::I8 | Layout::BOOL => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, &dst);
ASM::mov_reg8_mem8_offset32(&mut self.buf, dst_reg, ptr_reg, 0);
}
Layout::STR => {
self.storage_manager.with_tmp_general_reg(
&mut self.buf,

View file

@ -1302,6 +1302,22 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
) {
mov_reg32_base32_offset32(buf, dst, src, offset)
}
fn mov_reg16_mem16_offset32(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
offset: i32,
) {
mov_reg16_base16_offset32(buf, dst, src, offset)
}
fn mov_reg8_mem8_offset32(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
offset: i32,
) {
mov_reg8_base8_offset32(buf, dst, src, offset)
}
#[inline(always)]
fn mov_mem64_offset32_reg64(
@ -1323,6 +1339,26 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
mov_base32_offset32_reg32(buf, dst, offset, src)
}
#[inline(always)]
fn mov_mem16_offset32_reg16(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
mov_base16_offset32_reg16(buf, dst, offset, src)
}
#[inline(always)]
fn mov_mem8_offset32_reg8(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
mov_base8_offset32_reg8(buf, dst, offset, src)
}
#[inline(always)]
fn movsx_reg64_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32, size: u8) {
debug_assert!(size <= 8);
@ -1605,6 +1641,9 @@ impl X86_64Assembler {
push_reg64(buf, reg);
}
}
const GRP_4: u8 = 0x66;
const REX: u8 = 0x40;
// see https://wiki.osdev.org/X86-64_Instruction_Encoding#Encoding
@ -2091,6 +2130,48 @@ fn mov_base32_offset32_reg32(
buf.extend(offset.to_le_bytes());
}
/// `MOV r/m16,r16` -> Move r16 to r/m16, where m16 references a base + offset.
#[inline(always)]
fn mov_base16_offset32_reg16(
buf: &mut Vec<'_, u8>,
base: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
let rex = add_rm_extension(base, REX);
let rex = add_reg_extension(src, rex);
let src_mod = (src as u8 % 8) << 3;
let base_mod = base as u8 % 8;
buf.reserve(8);
buf.extend([GRP_4, rex, 0x89, 0x80 | src_mod | base_mod]);
// Using RSP or R12 requires a secondary index byte.
if base == X86_64GeneralReg::RSP || base == X86_64GeneralReg::R12 {
buf.push(0x24);
}
buf.extend(offset.to_le_bytes());
}
/// `MOV r/m8,r8` -> Move r8 to r/m8, where m8 references a base + offset.
#[inline(always)]
fn mov_base8_offset32_reg8(
buf: &mut Vec<'_, u8>,
base: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
let rex = add_rm_extension(base, REX);
let rex = add_reg_extension(src, rex);
let src_mod = (src as u8 % 8) << 3;
let base_mod = base as u8 % 8;
buf.reserve(8);
buf.extend([rex, 0x88, 0x80 | src_mod | base_mod]);
// Using RSP or R12 requires a secondary index byte.
if base == X86_64GeneralReg::RSP || base == X86_64GeneralReg::R12 {
buf.push(0x24);
}
buf.extend(offset.to_le_bytes());
}
/// `MOV r64,r/m64` -> Move r/m64 to r64, where m64 references a base + offset.
#[inline(always)]
fn mov_reg64_base64_offset32(
@ -2135,6 +2216,52 @@ fn mov_reg32_base32_offset32(
buf.extend(offset.to_le_bytes());
}
/// `MOV r/m16,r16` -> Move r16 to r/m16.
#[inline(always)]
fn mov_reg16_base16_offset32(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
base: X86_64GeneralReg,
offset: i32,
) {
let rex = add_rm_extension(base, REX);
let rex = add_reg_extension(dst, rex);
let dst_mod = (dst as u8 % 8) << 3; // (dst as u8 % 8) << 3;
let base_mod = base as u8 % 8;
buf.reserve(8);
buf.extend([0x66, rex, 0x8B, 0x80 | dst_mod | base_mod]);
// Using RSP or R12 requires a secondary index byte.
// if base == X86_64GeneralReg::RSP || base == X86_64GeneralReg::R12 {
if base == X86_64GeneralReg::RSP || base == X86_64GeneralReg::R12 {
buf.push(0x24);
}
buf.extend(offset.to_le_bytes());
}
/// `MOV r/m16,r16` -> Move r16 to r/m16.
#[inline(always)]
fn mov_reg8_base8_offset32(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
base: X86_64GeneralReg,
offset: i32,
) {
let rex = add_rm_extension(base, REX);
let rex = add_reg_extension(dst, rex);
let dst_mod = (dst as u8 % 8) << 3; // (dst as u8 % 8) << 3;
let base_mod = base as u8 % 8;
buf.reserve(8);
buf.extend([rex, 0x8A, 0x80 | dst_mod | base_mod]);
// Using RSP or R12 requires a secondary index byte.
// if base == X86_64GeneralReg::RSP || base == X86_64GeneralReg::R12 {
if base == X86_64GeneralReg::RSP || base == X86_64GeneralReg::R12 {
buf.push(0x24);
}
buf.extend(offset.to_le_bytes());
}
/// `MOVZX r64,r/m8` -> Move r/m8 with zero extention to r64, where m8 references a base + offset.
#[inline(always)]
fn movzx_reg64_base8_offset32(
@ -2935,6 +3062,38 @@ mod tests {
);
}
#[test]
fn test_mov_reg16_base16_offset32() {
disassembler_test!(
mov_reg16_base16_offset32,
|reg1, reg2, imm| format!(
"mov {}, word ptr [{} + 0x{:x}]",
X86_64GeneralReg::low_16bits_string(&reg1),
reg2,
imm
),
ALL_GENERAL_REGS,
ALL_GENERAL_REGS,
[TEST_I32]
);
}
#[test]
fn test_mov_reg8_base8_offset32() {
disassembler_test!(
mov_reg8_base8_offset32,
|reg1, reg2, imm| format!(
"mov {}, byte ptr [{} + 0x{:x}]",
X86_64GeneralReg::low_8bits_string(&reg1),
reg2,
imm
),
ALL_GENERAL_REGS,
ALL_GENERAL_REGS,
[TEST_I32]
);
}
#[test]
fn test_mov_base64_offset32_reg64() {
disassembler_test!(
@ -2946,6 +3105,54 @@ mod tests {
);
}
#[test]
fn test_mov_base32_offset32_reg32() {
disassembler_test!(
mov_base32_offset32_reg32,
|reg1, imm, reg2| format!(
"mov dword ptr [{} + 0x{:x}], {}",
reg1,
imm,
X86_64GeneralReg::low_32bits_string(&reg2),
),
ALL_GENERAL_REGS,
[TEST_I32],
ALL_GENERAL_REGS
);
}
#[test]
fn test_mov_base16_offset32_reg16() {
disassembler_test!(
mov_base16_offset32_reg16,
|reg1, imm, reg2| format!(
"mov word ptr [{} + 0x{:x}], {}",
reg1,
imm,
X86_64GeneralReg::low_16bits_string(&reg2),
),
ALL_GENERAL_REGS,
[TEST_I32],
ALL_GENERAL_REGS
);
}
#[test]
fn test_mov_base8_offset32_reg8() {
disassembler_test!(
mov_base8_offset32_reg8,
|reg1, imm, reg2| format!(
"mov byte ptr [{} + 0x{:x}], {}",
reg1,
imm,
X86_64GeneralReg::low_8bits_string(&reg2),
),
ALL_GENERAL_REGS,
[TEST_I32],
ALL_GENERAL_REGS
);
}
#[test]
fn test_movzx_reg64_base8_offset32() {
disassembler_test!(