feat: refactor byte reversal into triat helper and inline some thangs

This commit is contained in:
rvcas 2021-11-22 19:55:55 -05:00
parent 7f06d54816
commit e4861b679d

View file

@ -47,6 +47,7 @@ pub enum AArch64GeneralReg {
impl RegTrait for AArch64GeneralReg {} impl RegTrait for AArch64GeneralReg {}
impl AArch64GeneralReg { impl AArch64GeneralReg {
#[inline(always)]
fn id(&self) -> u8 { fn id(&self) -> u8 {
*self as u8 *self as u8
} }
@ -511,6 +512,21 @@ impl AArch64Assembler {}
// https://developer.arm.com/documentation/ddi0487/ga // https://developer.arm.com/documentation/ddi0487/ga
// Map all instructions to a packed struct. // Map all instructions to a packed struct.
trait Aarch64Bytes: PackedStruct {
const N: usize = 4;
#[inline(always)]
fn bytes(&self) -> [u8; 4] {
let mut bytes: [u8; 4] = [0, 0, 0, 0];
self.pack_to_slice(&mut bytes).unwrap();
bytes.reverse();
bytes
}
}
#[derive(PackedStruct, Debug)] #[derive(PackedStruct, Debug)]
#[packed_struct(endian = "msb")] #[packed_struct(endian = "msb")]
pub struct MoveWideImmediate { pub struct MoveWideImmediate {
@ -522,7 +538,10 @@ pub struct MoveWideImmediate {
reg_d: Integer<u8, packed_bits::Bits<5>>, // AArch64GeneralReg reg_d: Integer<u8, packed_bits::Bits<5>>, // AArch64GeneralReg
} }
impl Aarch64Bytes for MoveWideImmediate {}
impl MoveWideImmediate { impl MoveWideImmediate {
#[inline(always)]
fn new(opc: u8, rd: AArch64GeneralReg, imm16: u16, hw: u8, sf: bool) -> Self { fn new(opc: u8, rd: AArch64GeneralReg, imm16: u16, hw: u8, sf: bool) -> Self {
// TODO: revisit this is we change where we want to check the shift // TODO: revisit this is we change where we want to check the shift
// currently this is done in the assembler above // currently this is done in the assembler above
@ -554,7 +573,10 @@ pub struct ArithmeticImmediate {
reg_d: Integer<u8, packed_bits::Bits<5>>, reg_d: Integer<u8, packed_bits::Bits<5>>,
} }
impl Aarch64Bytes for ArithmeticImmediate {}
impl ArithmeticImmediate { impl ArithmeticImmediate {
#[inline(always)]
fn new( fn new(
op: bool, op: bool,
s: bool, s: bool,
@ -588,6 +610,7 @@ enum ShiftType {
} }
impl ShiftType { impl ShiftType {
#[inline(always)]
fn id(&self) -> u8 { fn id(&self) -> u8 {
*self as u8 *self as u8
} }
@ -608,7 +631,10 @@ pub struct ArithmeticShifted {
reg_d: Integer<u8, packed_bits::Bits<5>>, reg_d: Integer<u8, packed_bits::Bits<5>>,
} }
impl Aarch64Bytes for ArithmeticShifted {}
impl ArithmeticShifted { impl ArithmeticShifted {
#[inline(always)]
fn new( fn new(
op: bool, op: bool,
s: bool, s: bool,
@ -662,7 +688,10 @@ pub struct LogicalShiftedRegister {
reg_d: Integer<u8, packed_bits::Bits<5>>, reg_d: Integer<u8, packed_bits::Bits<5>>,
} }
impl Aarch64Bytes for LogicalShiftedRegister {}
impl LogicalShiftedRegister { impl LogicalShiftedRegister {
#[inline(always)]
fn new( fn new(
op: LogicalOp, op: LogicalOp,
shift: ShiftType, shift: ShiftType,
@ -712,7 +741,10 @@ pub struct UnconditionalBranchRegister {
fixed5: Integer<u8, packed_bits::Bits<5>>, fixed5: Integer<u8, packed_bits::Bits<5>>,
} }
impl Aarch64Bytes for UnconditionalBranchRegister {}
impl UnconditionalBranchRegister { impl UnconditionalBranchRegister {
#[inline(always)]
fn new(op: u8, rn: AArch64GeneralReg) -> Self { fn new(op: u8, rn: AArch64GeneralReg) -> Self {
Self { Self {
fixed5: 0b00000.into(), fixed5: 0b00000.into(),
@ -745,7 +777,10 @@ pub struct LoadStoreRegisterImmediate {
rt: Integer<u8, packed_bits::Bits<5>>, rt: Integer<u8, packed_bits::Bits<5>>,
} }
impl Aarch64Bytes for LoadStoreRegisterImmediate {}
impl LoadStoreRegisterImmediate { impl LoadStoreRegisterImmediate {
#[inline(always)]
fn new(size: u8, opc: u8, imm12: u16, rn: AArch64GeneralReg, rt: AArch64GeneralReg) -> Self { fn new(size: u8, opc: u8, imm12: u16, rn: AArch64GeneralReg, rt: AArch64GeneralReg) -> Self {
debug_assert!(size <= 0b11); debug_assert!(size <= 0b11);
debug_assert!(imm12 <= 0xFFF); debug_assert!(imm12 <= 0xFFF);
@ -762,10 +797,12 @@ impl LoadStoreRegisterImmediate {
} }
} }
#[inline(always)]
fn new_load(size: u8, imm12: u16, rn: AArch64GeneralReg, rt: AArch64GeneralReg) -> Self { fn new_load(size: u8, imm12: u16, rn: AArch64GeneralReg, rt: AArch64GeneralReg) -> Self {
Self::new(size, 0b01, imm12, rn, rt) Self::new(size, 0b01, imm12, rn, rt)
} }
#[inline(always)]
fn new_store(size: u8, imm12: u16, rn: AArch64GeneralReg, rt: AArch64GeneralReg) -> Self { fn new_store(size: u8, imm12: u16, rn: AArch64GeneralReg, rt: AArch64GeneralReg) -> Self {
Self::new(size, 0b00, imm12, rn, rt) Self::new(size, 0b00, imm12, rn, rt)
} }
@ -787,11 +824,7 @@ fn add_reg64_reg64_imm12(
) { ) {
let inst = ArithmeticImmediate::new(false, false, dst, src, imm12, false); let inst = ArithmeticImmediate::new(false, false, dst, src, imm12, false);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `ADD Xd, Xm, Xn` -> Add Xm and Xn and place the result into Xd. /// `ADD Xd, Xm, Xn` -> Add Xm and Xn and place the result into Xd.
@ -804,11 +837,7 @@ fn add_reg64_reg64_reg64(
) { ) {
let inst = ArithmeticShifted::new(false, false, ShiftType::LSL, 0, src1, src2, dst); let inst = ArithmeticShifted::new(false, false, ShiftType::LSL, 0, src1, src2, dst);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `LDR Xt, [Xn, #offset]` -> Load Xn + Offset Xt. ZRSP is SP. /// `LDR Xt, [Xn, #offset]` -> Load Xn + Offset Xt. ZRSP is SP.
@ -822,11 +851,7 @@ fn ldr_reg64_imm12(
) { ) {
let inst = LoadStoreRegisterImmediate::new_load(0b11, imm12, base, dst); let inst = LoadStoreRegisterImmediate::new_load(0b11, imm12, base, dst);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `MOV Xd, Xm` -> Move Xm to Xd. /// `MOV Xd, Xm` -> Move Xm to Xd.
@ -842,11 +867,7 @@ fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64Ge
dst, dst,
); );
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `MOVK Xd, imm16` -> Keeps Xd and moves an optionally shifted imm16 to Xd. /// `MOVK Xd, imm16` -> Keeps Xd and moves an optionally shifted imm16 to Xd.
@ -854,11 +875,7 @@ fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64Ge
fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) { fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) {
let inst = MoveWideImmediate::new(0b11, dst, imm16, hw, true); let inst = MoveWideImmediate::new(0b11, dst, imm16, hw, true);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `MOVZ Xd, imm16` -> Zeros Xd and moves an optionally shifted imm16 to Xd. /// `MOVZ Xd, imm16` -> Zeros Xd and moves an optionally shifted imm16 to Xd.
@ -866,11 +883,7 @@ fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, h
fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) { fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) {
let inst = MoveWideImmediate::new(0b10, dst, imm16, hw, true); let inst = MoveWideImmediate::new(0b10, dst, imm16, hw, true);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP. /// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP.
@ -884,11 +897,7 @@ fn str_reg64_imm12(
) { ) {
let inst = LoadStoreRegisterImmediate::new_store(0b11, imm12, base, src); let inst = LoadStoreRegisterImmediate::new_store(0b11, imm12, base, src);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `SUB Xd, Xn, imm12` -> Subtract Xn and imm12 and place the result into Xd. /// `SUB Xd, Xn, imm12` -> Subtract Xn and imm12 and place the result into Xd.
@ -901,11 +910,7 @@ fn sub_reg64_reg64_imm12(
) { ) {
let inst = ArithmeticImmediate::new(true, false, dst, src, imm12, false); let inst = ArithmeticImmediate::new(true, false, dst, src, imm12, false);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
/// `RET Xn` -> Return to the address stored in Xn. /// `RET Xn` -> Return to the address stored in Xn.
@ -913,11 +918,7 @@ fn sub_reg64_reg64_imm12(
fn ret_reg64(buf: &mut Vec<'_, u8>, xn: AArch64GeneralReg) { fn ret_reg64(buf: &mut Vec<'_, u8>, xn: AArch64GeneralReg) {
let inst = UnconditionalBranchRegister::new(0b10, xn); let inst = UnconditionalBranchRegister::new(0b10, xn);
let mut bytes = inst.pack().unwrap(); buf.extend(inst.bytes());
bytes.reverse();
buf.extend(bytes);
} }
#[cfg(test)] #[cfg(test)]