simplify many methods that operate on different register widths

This commit is contained in:
Folkert 2023-09-15 21:04:15 +02:00
parent 6cbe15bf9d
commit e33414938d
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
3 changed files with 162 additions and 239 deletions

View file

@ -1226,28 +1226,33 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
} }
#[inline(always)] #[inline(always)]
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, offset: i32) { fn mov_reg_mem_offset32(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
offset: i32,
) {
if offset < 0 { if offset < 0 {
ldur_reg64_reg64_imm9(buf, dst, AArch64GeneralReg::FP, offset as i16); ldur_reg_reg_imm9(buf, register_width, dst, src, offset as i16);
} else if offset < (0xFFF << 8) { } else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0); debug_assert!(offset % 8 == 0);
ldr_reg64_reg64_imm12(buf, dst, AArch64GeneralReg::FP, (offset as u16) >> 3); ldr_reg_reg_imm12(buf, register_width, dst, src, (offset as u16) >> 3);
} else { } else {
todo!("base offsets over 32k for AArch64"); todo!("base offsets over 32k for AArch64");
} }
} }
#[inline(always)] #[inline(always)]
fn mov_reg32_base32(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _offset: i32) { fn mov_reg_base32(
todo!() buf: &mut Vec<'_, u8>,
} register_width: RegisterWidth,
#[inline(always)] dst: AArch64GeneralReg,
fn mov_reg16_base32(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _offset: i32) { offset: i32,
todo!() ) {
} Self::mov_reg_mem_offset32(buf, register_width, dst, AArch64GeneralReg::FP, offset)
#[inline(always)]
fn mov_reg8_base32(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _offset: i32) {
todo!()
} }
#[inline(always)] #[inline(always)]
fn mov_base32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64FloatReg) { fn mov_base32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64FloatReg) {
Self::mov_mem64_offset32_freg64(buf, AArch64GeneralReg::FP, offset, src) Self::mov_mem64_offset32_freg64(buf, AArch64GeneralReg::FP, offset, src)
@ -1268,14 +1273,7 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
#[inline(always)] #[inline(always)]
fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GeneralReg) { fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GeneralReg) {
if offset < 0 { Self::mov_mem64_offset32_reg64(buf, AArch64GeneralReg::FP, offset, src)
str_reg64_reg64_imm9(buf, src, AArch64GeneralReg::FP, offset as i16);
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
str_reg64_reg64_imm12(buf, src, AArch64GeneralReg::FP, (offset as u16) >> 3);
} else {
todo!("base offsets over 32k for AArch64");
}
} }
#[inline(always)] #[inline(always)]
@ -1291,71 +1289,20 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!() todo!()
} }
#[inline(always)] fn mov_mem_offset32_reg(
fn mov_reg64_mem64_offset32(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
offset: i32,
) {
if offset < 0 {
todo!("negative mem offsets for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
ldr_reg64_reg64_imm12(buf, dst, src, (offset as u16) >> 3);
} else {
todo!("mem offsets over 32k for AArch64");
}
}
#[inline(always)]
fn mov_reg32_mem32_offset32(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
offset: i32,
) {
if offset < 0 {
todo!("negative mem offsets for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
ldr_reg64_reg64_imm12(buf, dst, src, (offset as u16) >> 3);
} else {
todo!("mem offsets over 32k for AArch64");
}
}
#[inline(always)]
fn mov_reg16_mem16_offset32(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src: AArch64GeneralReg,
_offset: i32,
) {
todo!()
}
#[inline(always)]
fn mov_reg8_mem8_offset32(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src: AArch64GeneralReg,
_offset: i32,
) {
todo!()
}
#[inline(always)]
fn mov_mem64_offset32_reg64(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: AArch64GeneralReg, dst: AArch64GeneralReg,
offset: i32, offset: i32,
src: AArch64GeneralReg, src: AArch64GeneralReg,
) { ) {
if offset < 0 { if offset < 0 {
todo!("negative mem offsets for AArch64"); str_reg_reg_imm9(buf, register_width, src, dst, offset as i16);
} else if offset < (0xFFF << 8) { } else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0); debug_assert!(offset % 8 == 0);
str_reg64_reg64_imm12(buf, src, dst, (offset as u16) >> 3); str_reg_reg_imm12(buf, register_width, src, dst, (offset as u16) >> 3);
} else { } else {
todo!("mem offsets over 32k for AArch64"); todo!("base offsets over 32k for AArch64");
} }
} }
@ -1376,36 +1323,6 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
} }
} }
#[inline(always)]
fn mov_mem32_offset32_reg32(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_offset: i32,
_src: AArch64GeneralReg,
) {
todo!()
}
#[inline(always)]
fn mov_mem16_offset32_reg16(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_offset: i32,
_src: AArch64GeneralReg,
) {
todo!()
}
#[inline(always)]
fn mov_mem8_offset32_reg8(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_offset: i32,
_src: AArch64GeneralReg,
) {
todo!()
}
#[inline(always)] #[inline(always)]
fn movsx_reg_base32( fn movsx_reg_base32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
@ -1442,14 +1359,13 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
} }
#[inline(always)] #[inline(always)]
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, offset: i32) { fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, offset: i32) {
if offset < 0 { Self::mov_reg_mem_offset32(
todo!("negative stack offsets for AArch64"); buf,
} else if offset < (0xFFF << 8) { RegisterWidth::W64,
debug_assert!(offset % 8 == 0); dst,
ldr_reg64_reg64_imm12(buf, dst, AArch64GeneralReg::ZRSP, (offset as u16) >> 3); AArch64GeneralReg::ZRSP,
} else { offset,
todo!("stack offsets over 32k for AArch64"); )
}
} }
#[inline(always)] #[inline(always)]
fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64FloatReg) { fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64FloatReg) {
@ -1459,25 +1375,11 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
#[inline(always)] #[inline(always)]
fn mov_stack32_reg( fn mov_stack32_reg(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth, _register_width: RegisterWidth,
offset: i32, offset: i32,
src: AArch64GeneralReg, src: AArch64GeneralReg,
) { ) {
match register_width { Self::mov_mem64_offset32_reg64(buf, AArch64GeneralReg::ZRSP, offset, src)
RegisterWidth::W8 => todo!(),
RegisterWidth::W16 => todo!(),
RegisterWidth::W32 => todo!(),
RegisterWidth::W64 => {
if offset < 0 {
todo!("negative stack offsets for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
str_reg64_reg64_imm12(buf, src, AArch64GeneralReg::ZRSP, (offset as u16) >> 3);
} else {
todo!("stack offsets over 32k for AArch64");
}
}
}
} }
#[inline(always)] #[inline(always)]
fn neg_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64GeneralReg) { fn neg_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64GeneralReg) {
@ -2987,14 +2889,15 @@ fn eor_reg64_reg64_reg64(
/// `LDR Xt, [Xn, #offset]` -> Load Xn + Offset Xt. ZRSP is SP. /// `LDR Xt, [Xn, #offset]` -> Load Xn + Offset Xt. ZRSP is SP.
/// Note: imm12 is the offest divided by 8. /// Note: imm12 is the offest divided by 8.
#[inline(always)] #[inline(always)]
fn ldr_reg64_reg64_imm12( fn ldr_reg_reg_imm12(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: AArch64GeneralReg, dst: AArch64GeneralReg,
base: AArch64GeneralReg, base: AArch64GeneralReg,
imm12: u16, imm12: u16,
) { ) {
let inst = LoadStoreRegisterImmediate::new_load(LoadStoreRegisterImmediateParams { let inst = LoadStoreRegisterImmediate::new_load(LoadStoreRegisterImmediateParams {
size: 0b11, size: register_width as u8,
imm12, imm12,
rn: base, rn: base,
rt: dst, rt: dst,
@ -3004,8 +2907,9 @@ fn ldr_reg64_reg64_imm12(
} }
#[inline(always)] #[inline(always)]
fn ldur_reg64_reg64_imm9( fn ldur_reg_reg_imm9(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: AArch64GeneralReg, dst: AArch64GeneralReg,
base: AArch64GeneralReg, base: AArch64GeneralReg,
imm9: i16, imm9: i16,
@ -3014,10 +2918,11 @@ fn ldur_reg64_reg64_imm9(
assert!((-256..256).contains(&imm9)); assert!((-256..256).contains(&imm9));
let imm9 = u16::from_ne_bytes(imm9.to_ne_bytes()); let imm9 = u16::from_ne_bytes(imm9.to_ne_bytes());
let imm12 = ((imm9 & 0b0001_1111_1111) << 2) | 0b00; #[allow(clippy::identity_op)]
let imm12 = (imm9 & 0b0001_1111_1111) << 2 | 0b00;
let inst = LoadStoreRegisterImmediate { let inst = LoadStoreRegisterImmediate {
size: 0b11.into(), // 64-bit size: (register_width as u8).into(), // 64-bit
fixed: 0b111.into(), fixed: 0b111.into(),
fixed2: false, fixed2: false,
fixed3: 0b00.into(), fixed3: 0b00.into(),
@ -3227,10 +3132,9 @@ fn sdiv_reg64_reg64_reg64(
buf.extend(inst.bytes()); buf.extend(inst.bytes());
} }
/// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP. fn str_reg_reg_imm9(
#[inline(always)]
fn str_reg64_reg64_imm9(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
src: AArch64GeneralReg, src: AArch64GeneralReg,
base: AArch64GeneralReg, base: AArch64GeneralReg,
imm9: i16, imm9: i16,
@ -3242,7 +3146,7 @@ fn str_reg64_reg64_imm9(
let imm12 = ((imm9 & 0b0001_1111_1111) << 2) | 0b11; let imm12 = ((imm9 & 0b0001_1111_1111) << 2) | 0b11;
let inst = LoadStoreRegisterImmediate { let inst = LoadStoreRegisterImmediate {
size: 0b11.into(), // 64-bit size: (register_width as u8).into(), // 64-bit
fixed: 0b111.into(), fixed: 0b111.into(),
fixed2: false, fixed2: false,
fixed3: 0b00.into(), fixed3: 0b00.into(),
@ -3258,14 +3162,15 @@ fn str_reg64_reg64_imm9(
/// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP. /// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP.
/// Note: imm12 is the offest divided by 8. /// Note: imm12 is the offest divided by 8.
#[inline(always)] #[inline(always)]
fn str_reg64_reg64_imm12( fn str_reg_reg_imm12(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
src: AArch64GeneralReg, src: AArch64GeneralReg,
base: AArch64GeneralReg, base: AArch64GeneralReg,
imm12: u16, imm12: u16,
) { ) {
let inst = LoadStoreRegisterImmediate::new_store(LoadStoreRegisterImmediateParams { let inst = LoadStoreRegisterImmediate::new_store(LoadStoreRegisterImmediateParams {
size: 0b11, size: register_width as u8,
imm12, imm12,
rn: base, rn: base,
rt: src, rt: src,
@ -4150,13 +4055,14 @@ mod tests {
#[test] #[test]
fn test_ldr_reg64_reg64_imm12() { fn test_ldr_reg64_reg64_imm12() {
disassembler_test!( disassembler_test!(
ldr_reg64_reg64_imm12, ldr_reg_reg_imm12,
|reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!( |_, reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!(
"ldr {}, [{}, #0x{:x}]", "ldr {}, [{}, #0x{:x}]",
reg1.capstone_string(UsesZR), reg1.capstone_string(UsesZR),
reg2.capstone_string(UsesSP), reg2.capstone_string(UsesSP),
imm << 3 imm << 3
), ),
[RegisterWidth::W64],
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
[0x123] [0x123]
@ -4198,13 +4104,14 @@ mod tests {
#[test] #[test]
fn test_ldr_reg64_reg64_imm9() { fn test_ldr_reg64_reg64_imm9() {
disassembler_test!( disassembler_test!(
ldur_reg64_reg64_imm9, ldur_reg_reg_imm9,
|reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!( |_, reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!(
"ldur {}, [{}, {}]", "ldur {}, [{}, {}]",
reg1.capstone_string(UsesZR), reg1.capstone_string(UsesZR),
reg2.capstone_string(UsesSP), reg2.capstone_string(UsesSP),
signed_hex_i16(imm), signed_hex_i16(imm),
), ),
[RegisterWidth::W64],
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
[0x010, -0x010, 4, -4] [0x010, -0x010, 4, -4]
@ -4400,13 +4307,14 @@ mod tests {
#[test] #[test]
fn test_str_reg64_reg64_imm12() { fn test_str_reg64_reg64_imm12() {
disassembler_test!( disassembler_test!(
str_reg64_reg64_imm12, str_reg_reg_imm12,
|reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!( |_, reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!(
"str {}, [{}, #0x{:x}]", "str {}, [{}, #0x{:x}]",
reg1.capstone_string(UsesZR), reg1.capstone_string(UsesZR),
reg2.capstone_string(UsesSP), reg2.capstone_string(UsesSP),
imm << 3 imm << 3
), ),
[RegisterWidth::W64],
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
[0x123] [0x123]
@ -4448,13 +4356,14 @@ mod tests {
#[test] #[test]
fn test_str_reg64_reg64_imm9() { fn test_str_reg64_reg64_imm9() {
disassembler_test!( disassembler_test!(
str_reg64_reg64_imm9, str_reg_reg_imm9,
|reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!( |_, reg1: AArch64GeneralReg, reg2: AArch64GeneralReg, imm| format!(
"str {}, [{}, {}]!", // ! indicates writeback "str {}, [{}, {}]!", // ! indicates writeback
reg1.capstone_string(UsesZR), reg1.capstone_string(UsesZR),
reg2.capstone_string(UsesSP), reg2.capstone_string(UsesSP),
signed_hex_i16(imm), signed_hex_i16(imm),
), ),
[RegisterWidth::W64],
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
ALL_GENERAL_REGS, ALL_GENERAL_REGS,
[4, -4] [4, -4]

View file

@ -35,10 +35,10 @@ const JUMP_PLACEHOLDER: i32 = 0x0011_1100;
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub enum RegisterWidth { pub enum RegisterWidth {
W8, W8 = 0b00,
W16, W16 = 0b01,
W32, W32 = 0b10,
W64, W64 = 0b11,
} }
impl RegisterWidth { impl RegisterWidth {
@ -329,10 +329,25 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
// base32 is similar to stack based instructions but they reference the base/frame pointer. // base32 is similar to stack based instructions but they reference the base/frame pointer.
fn mov_freg64_base32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32); fn mov_freg64_base32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32); fn mov_reg_base32(
fn mov_reg32_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32); buf: &mut Vec<'_, u8>,
fn mov_reg16_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32); register_width: RegisterWidth,
fn mov_reg8_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32); dst: GeneralReg,
offset: i32,
);
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32) {
Self::mov_reg_base32(buf, RegisterWidth::W64, dst, offset)
}
fn mov_reg32_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32) {
Self::mov_reg_base32(buf, RegisterWidth::W32, dst, offset)
}
fn mov_reg16_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32) {
Self::mov_reg_base32(buf, RegisterWidth::W16, dst, offset)
}
fn mov_reg8_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32) {
Self::mov_reg_base32(buf, RegisterWidth::W8, dst, offset)
}
fn mov_base32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg); fn mov_base32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
fn mov_base32_freg32(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg); fn mov_base32_freg32(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
@ -343,25 +358,46 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
fn mov_base32_reg8(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg); fn mov_base32_reg8(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
// move from memory (a pointer) to register // move from memory (a pointer) to register
fn mov_reg_mem_offset32(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: GeneralReg,
src: GeneralReg,
offset: i32,
);
fn mov_reg64_mem64_offset32( fn mov_reg64_mem64_offset32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: GeneralReg, dst: GeneralReg,
src: GeneralReg, src: GeneralReg,
offset: i32, offset: i32,
); ) {
Self::mov_reg_mem_offset32(buf, RegisterWidth::W64, dst, src, offset)
}
fn mov_reg32_mem32_offset32( fn mov_reg32_mem32_offset32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: GeneralReg, dst: GeneralReg,
src: GeneralReg, src: GeneralReg,
offset: i32, offset: i32,
); ) {
Self::mov_reg_mem_offset32(buf, RegisterWidth::W32, dst, src, offset)
}
fn mov_reg16_mem16_offset32( fn mov_reg16_mem16_offset32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: GeneralReg, dst: GeneralReg,
src: GeneralReg, src: GeneralReg,
offset: i32, offset: i32,
); ) {
fn mov_reg8_mem8_offset32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg, offset: i32); Self::mov_reg_mem_offset32(buf, RegisterWidth::W16, dst, src, offset)
}
fn mov_reg8_mem8_offset32(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src: GeneralReg,
offset: i32,
) {
Self::mov_reg_mem_offset32(buf, RegisterWidth::W8, dst, src, offset)
}
fn mov_freg64_mem64_offset32( fn mov_freg64_mem64_offset32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
@ -377,25 +413,46 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
); );
// move from register to memory // move from register to memory
fn mov_mem_offset32_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: GeneralReg,
offset: i32,
src: GeneralReg,
);
fn mov_mem64_offset32_reg64( fn mov_mem64_offset32_reg64(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: GeneralReg, dst: GeneralReg,
offset: i32, offset: i32,
src: GeneralReg, src: GeneralReg,
); ) {
Self::mov_mem_offset32_reg(buf, RegisterWidth::W64, dst, offset, src)
}
fn mov_mem32_offset32_reg32( fn mov_mem32_offset32_reg32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: GeneralReg, dst: GeneralReg,
offset: i32, offset: i32,
src: GeneralReg, src: GeneralReg,
); ) {
Self::mov_mem_offset32_reg(buf, RegisterWidth::W32, dst, offset, src)
}
fn mov_mem16_offset32_reg16( fn mov_mem16_offset32_reg16(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: GeneralReg, dst: GeneralReg,
offset: i32, offset: i32,
src: GeneralReg, src: GeneralReg,
); ) {
fn mov_mem8_offset32_reg8(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32, src: GeneralReg); Self::mov_mem_offset32_reg(buf, RegisterWidth::W16, dst, offset, src)
}
fn mov_mem8_offset32_reg8(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
offset: i32,
src: GeneralReg,
) {
Self::mov_mem_offset32_reg(buf, RegisterWidth::W8, dst, offset, src)
}
fn movesd_mem64_offset32_freg64( fn movesd_mem64_offset32_freg64(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,

View file

@ -2257,20 +2257,20 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
} }
#[inline(always)] #[inline(always)]
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) { fn mov_reg_base32(
mov_reg64_base64_offset32(buf, dst, X86_64GeneralReg::RBP, offset) buf: &mut Vec<'_, u8>,
} register_width: RegisterWidth,
#[inline(always)] dst: X86_64GeneralReg,
fn mov_reg32_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) { offset: i32,
mov_reg32_base32_offset32(buf, dst, X86_64GeneralReg::RBP, offset) ) {
} use RegisterWidth::*;
#[inline(always)]
fn mov_reg16_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) { match register_width {
mov_reg16_base16_offset32(buf, dst, X86_64GeneralReg::RBP, offset) W8 => mov_reg8_base8_offset32(buf, dst, X86_64GeneralReg::RBP, offset),
} W16 => mov_reg16_base16_offset32(buf, dst, X86_64GeneralReg::RBP, offset),
#[inline(always)] W32 => mov_reg32_base32_offset32(buf, dst, X86_64GeneralReg::RBP, offset),
fn mov_reg8_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) { W64 => mov_reg64_base64_offset32(buf, dst, X86_64GeneralReg::RBP, offset),
mov_reg8_base8_offset32(buf, dst, X86_64GeneralReg::RBP, offset) }
} }
#[inline(always)] #[inline(always)]
@ -2311,78 +2311,35 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
} }
#[inline(always)] #[inline(always)]
fn mov_reg64_mem64_offset32( fn mov_reg_mem_offset32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg, dst: X86_64GeneralReg,
src: X86_64GeneralReg, src: X86_64GeneralReg,
offset: i32, offset: i32,
) { ) {
mov_reg64_base64_offset32(buf, dst, src, offset) match register_width {
} RegisterWidth::W8 => mov_reg8_base8_offset32(buf, dst, src, offset),
#[inline(always)] RegisterWidth::W16 => mov_reg16_base16_offset32(buf, dst, src, offset),
fn mov_reg32_mem32_offset32( RegisterWidth::W32 => mov_reg32_base32_offset32(buf, dst, src, offset),
buf: &mut Vec<'_, u8>, RegisterWidth::W64 => mov_reg64_base64_offset32(buf, dst, src, offset),
dst: X86_64GeneralReg, }
src: X86_64GeneralReg,
offset: i32,
) {
mov_reg32_base32_offset32(buf, dst, src, offset)
}
fn mov_reg16_mem16_offset32(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
offset: i32,
) {
mov_reg16_base16_offset32(buf, dst, src, offset)
}
fn mov_reg8_mem8_offset32(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
offset: i32,
) {
mov_reg8_base8_offset32(buf, dst, src, offset)
} }
#[inline(always)] #[inline(always)]
fn mov_mem64_offset32_reg64( fn mov_mem_offset32_reg(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg, dst: X86_64GeneralReg,
offset: i32, offset: i32,
src: X86_64GeneralReg, src: X86_64GeneralReg,
) { ) {
mov_base64_offset32_reg64(buf, dst, offset, src) match register_width {
} RegisterWidth::W8 => mov_base8_offset32_reg8(buf, dst, offset, src),
RegisterWidth::W16 => mov_base16_offset32_reg16(buf, dst, offset, src),
#[inline(always)] RegisterWidth::W32 => mov_base32_offset32_reg32(buf, dst, offset, src),
fn mov_mem32_offset32_reg32( RegisterWidth::W64 => mov_base64_offset32_reg64(buf, dst, offset, src),
buf: &mut Vec<'_, u8>, }
dst: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
mov_base32_offset32_reg32(buf, dst, offset, src)
}
#[inline(always)]
fn mov_mem16_offset32_reg16(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
mov_base16_offset32_reg16(buf, dst, offset, src)
}
#[inline(always)]
fn mov_mem8_offset32_reg8(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
mov_base8_offset32_reg8(buf, dst, offset, src)
} }
#[inline(always)] #[inline(always)]