py: Add store r8 and store r16 ops to asm_x86 and asm_x64.
This commit is contained in:
parent
851f15f34c
commit
d66e48662b
28
py/asmx64.c
28
py/asmx64.c
|
@ -49,6 +49,7 @@
|
|||
#define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
|
||||
#define OPCODE_MOV_I64_TO_R64 (0xb8) /* +rq */
|
||||
#define OPCODE_MOV_I32_TO_RM32 (0xc7)
|
||||
#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
|
||||
#define OPCODE_MOV_R64_TO_RM64 (0x89) /* /r */
|
||||
#define OPCODE_MOV_RM64_TO_R64 (0x8b)
|
||||
#define OPCODE_LEA_MEM_TO_R64 (0x8d) /* /r */
|
||||
|
@ -85,6 +86,8 @@
|
|||
#define MODRM_RM_REG (0xc0)
|
||||
#define MODRM_RM_R64(x) ((x) & 0x7)
|
||||
|
||||
#define OP_SIZE_PREFIX (0x66)
|
||||
|
||||
#define REX_PREFIX (0x40)
|
||||
#define REX_W (0x08) // width
|
||||
#define REX_R (0x04) // register
|
||||
|
@ -298,18 +301,28 @@ STATIC void asm_x64_ret(asm_x64_t *as) {
|
|||
asm_x64_write_byte_1(as, OPCODE_RET);
|
||||
}
|
||||
|
||||
void asm_x64_mov_r32_to_r32(asm_x64_t *as, int src_r32, int dest_r32) {
|
||||
// defaults to 32 bit operation
|
||||
assert(src_r32 < 8);
|
||||
assert(dest_r32 < 8);
|
||||
asm_x64_write_byte_2(as, OPCODE_MOV_R64_TO_RM64, MODRM_R64(src_r32) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
|
||||
}
|
||||
|
||||
void asm_x64_mov_r64_to_r64(asm_x64_t *as, int src_r64, int dest_r64) {
|
||||
// use REX prefix for 64 bit operation
|
||||
asm_x64_write_byte_3(as, REX_PREFIX | REX_W | (src_r64 < 8 ? 0 : REX_R) | (dest_r64 < 8 ? 0 : REX_B), OPCODE_MOV_R64_TO_RM64, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
|
||||
}
|
||||
|
||||
void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
assert(dest_r64 < 8);
|
||||
if (src_r64 < 8) {
|
||||
asm_x64_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
|
||||
} else {
|
||||
asm_x64_write_byte_2(as, REX_PREFIX | REX_R, OPCODE_MOV_R8_TO_RM8);
|
||||
}
|
||||
asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
assert(src_r64 < 8);
|
||||
assert(dest_r64 < 8);
|
||||
asm_x64_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R64_TO_RM64);
|
||||
asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x64_mov_r64_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
// use REX prefix for 64 bit operation
|
||||
assert(dest_r64 < 8);
|
||||
|
@ -356,6 +369,7 @@ void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64) {
|
|||
}
|
||||
|
||||
void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64) {
|
||||
// TODO use movzx, movsx if possible
|
||||
if (UNSIGNED_FIT32(src_i64)) {
|
||||
// 5 bytes
|
||||
asm_x64_mov_i32_to_r64(as, src_i64 & 0xffffffff, dest_r64);
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#define ASM_X64_CC_JNZ (0x5)
|
||||
#define ASM_X64_CC_JNE (0x5)
|
||||
#define ASM_X64_CC_JL (0xc) // less, signed
|
||||
#define ASM_X64_CC_JG (0xf) // greater, signed
|
||||
|
||||
typedef struct _asm_x64_t asm_x64_t;
|
||||
|
||||
|
@ -75,6 +76,9 @@ void asm_x64_mov_r64_to_r64(asm_x64_t* as, int src_r64, int dest_r64);
|
|||
void asm_x64_mov_i64_to_r64(asm_x64_t* as, int64_t src_i64, int dest_r64);
|
||||
void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64);
|
||||
void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64);
|
||||
void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_r64_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_xor_r64_to_r64(asm_x64_t *as, int src_r64, int dest_r64);
|
||||
void asm_x64_add_r64_to_r64(asm_x64_t* as, int src_r64, int dest_r64);
|
||||
void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b);
|
||||
|
|
15
py/asmx86.c
15
py/asmx86.c
|
@ -49,6 +49,7 @@
|
|||
//#define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
|
||||
#define OPCODE_MOV_I32_TO_R32 (0xb8)
|
||||
//#define OPCODE_MOV_I32_TO_RM32 (0xc7)
|
||||
#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
|
||||
#define OPCODE_MOV_R32_TO_RM32 (0x89)
|
||||
#define OPCODE_MOV_RM32_TO_R32 (0x8b)
|
||||
#define OPCODE_LEA_MEM_TO_R32 (0x8d) /* /r */
|
||||
|
@ -85,6 +86,8 @@
|
|||
#define MODRM_RM_REG (0xc0)
|
||||
#define MODRM_RM_R32(x) (x)
|
||||
|
||||
#define OP_SIZE_PREFIX (0x66)
|
||||
|
||||
#define IMM32_L0(x) ((x) & 0xff)
|
||||
#define IMM32_L1(x) (((x) >> 8) & 0xff)
|
||||
#define IMM32_L2(x) (((x) >> 16) & 0xff)
|
||||
|
@ -232,7 +235,17 @@ void asm_x86_mov_r32_to_r32(asm_x86_t *as, int src_r32, int dest_r32) {
|
|||
asm_x86_write_byte_2(as, OPCODE_MOV_R32_TO_RM32, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
|
||||
}
|
||||
|
||||
STATIC void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
void asm_x86_mov_r8_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
asm_x86_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
|
||||
asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x86_mov_r16_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
asm_x86_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R32_TO_RM32);
|
||||
asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
asm_x86_write_byte_1(as, OPCODE_MOV_R32_TO_RM32);
|
||||
asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#define ASM_X86_CC_JNZ (0x5)
|
||||
#define ASM_X86_CC_JNE (0x5)
|
||||
#define ASM_X86_CC_JL (0xc) // less, signed
|
||||
#define ASM_X86_CC_JG (0xf) // greater, signed
|
||||
|
||||
typedef struct _asm_x86_t asm_x86_t;
|
||||
|
||||
|
@ -72,6 +73,9 @@ void* asm_x86_get_code(asm_x86_t* as);
|
|||
void asm_x86_mov_r32_to_r32(asm_x86_t* as, int src_r32, int dest_r32);
|
||||
void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32);
|
||||
void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32);
|
||||
void asm_x86_mov_r8_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_r16_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_xor_r32_to_r32(asm_x86_t *as, int src_r32, int dest_r32);
|
||||
void asm_x86_add_r32_to_r32(asm_x86_t* as, int src_r32, int dest_r32);
|
||||
void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b);
|
||||
|
|
Loading…
Reference in New Issue