Macro Cleaning
This commit is contained in:
@@ -21,8 +21,7 @@
|
||||
|
||||
#define MAX_INSTRUCTION_COUNT 50
|
||||
|
||||
static struct
|
||||
{
|
||||
static struct {
|
||||
uint32_t pc;
|
||||
int op_ssegs;
|
||||
x86seg *op_ea_seg;
|
||||
|
||||
@@ -8,8 +8,7 @@
|
||||
#include "codegen_accumulate.h"
|
||||
#include "codegen_ir.h"
|
||||
|
||||
static struct
|
||||
{
|
||||
static struct {
|
||||
int count;
|
||||
int dest_reg;
|
||||
} acc_regs[] = {
|
||||
|
||||
@@ -16,28 +16,29 @@
|
||||
#include "codegen_ops_3dnow.h"
|
||||
#include "codegen_ops_helpers.h"
|
||||
|
||||
#define ropParith(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc + 1, 1); \
|
||||
return op_pc + 2; \
|
||||
#define ropParith(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc + 1, 1); \
|
||||
return op_pc + 2; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
@@ -815,41 +815,56 @@ ropJNLE_common(codeblock_t *block, ir_data_t *ir, uint32_t dest_addr, uint32_t n
|
||||
}
|
||||
}
|
||||
|
||||
#define ropJ(cond) \
|
||||
uint32_t ropJ##cond##_8(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
uint32_t offset = (int32_t) (int8_t) fastreadb(cs + op_pc); \
|
||||
uint32_t dest_addr = op_pc + 1 + offset; \
|
||||
int ret; \
|
||||
\
|
||||
if (!(op_32 & 0x100)) \
|
||||
dest_addr &= 0xffff; \
|
||||
ret = ropJ##cond##_common(block, ir, dest_addr, op_pc + 1); \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
return ret ? dest_addr : (op_pc + 1); \
|
||||
} \
|
||||
uint32_t ropJ##cond##_16(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
uint32_t offset = (int32_t) (int16_t) fastreadw(cs + op_pc); \
|
||||
uint32_t dest_addr = (op_pc + 2 + offset) & 0xffff; \
|
||||
int ret; \
|
||||
\
|
||||
ret = ropJ##cond##_common(block, ir, dest_addr, op_pc + 2); \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 2); \
|
||||
return ret ? dest_addr : (op_pc + 2); \
|
||||
} \
|
||||
uint32_t ropJ##cond##_32(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
uint32_t offset = fastreadl(cs + op_pc); \
|
||||
uint32_t dest_addr = op_pc + 4 + offset; \
|
||||
int ret; \
|
||||
\
|
||||
ret = ropJ##cond##_common(block, ir, dest_addr, op_pc + 4); \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 4); \
|
||||
return ret ? dest_addr : (op_pc + 4); \
|
||||
#define ropJ(cond) \
|
||||
uint32_t ropJ##cond##_8(codeblock_t *block, \
|
||||
ir_data_t *ir, \
|
||||
UNUSED(uint8_t opcode), \
|
||||
UNUSED(uint32_t fetchdat), \
|
||||
uint32_t op_32, \
|
||||
uint32_t op_pc) \
|
||||
{ \
|
||||
uint32_t offset = (int32_t) (int8_t) fastreadb(cs + op_pc); \
|
||||
uint32_t dest_addr = op_pc + 1 + offset; \
|
||||
int ret; \
|
||||
\
|
||||
if (!(op_32 & 0x100)) \
|
||||
dest_addr &= 0xffff; \
|
||||
ret = ropJ##cond##_common(block, ir, dest_addr, op_pc + 1); \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
return ret ? dest_addr : (op_pc + 1); \
|
||||
} \
|
||||
uint32_t ropJ##cond##_16(codeblock_t *block, \
|
||||
ir_data_t *ir, \
|
||||
UNUSED(uint8_t opcode), \
|
||||
UNUSED(uint32_t fetchdat), \
|
||||
UNUSED(uint32_t op_32), \
|
||||
uint32_t op_pc) \
|
||||
{ \
|
||||
uint32_t offset = (int32_t) (int16_t) fastreadw(cs + op_pc); \
|
||||
uint32_t dest_addr = (op_pc + 2 + offset) & 0xffff; \
|
||||
int ret; \
|
||||
\
|
||||
ret = ropJ##cond##_common(block, ir, dest_addr, op_pc + 2); \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 2); \
|
||||
return ret ? dest_addr : (op_pc + 2); \
|
||||
} \
|
||||
uint32_t ropJ##cond##_32(codeblock_t *block, \
|
||||
ir_data_t *ir, \
|
||||
UNUSED(uint8_t opcode), \
|
||||
UNUSED(uint32_t fetchdat), \
|
||||
UNUSED(uint32_t op_32), \
|
||||
uint32_t op_pc) \
|
||||
{ \
|
||||
uint32_t offset = fastreadl(cs + op_pc); \
|
||||
uint32_t dest_addr = op_pc + 4 + offset; \
|
||||
int ret; \
|
||||
\
|
||||
ret = ropJ##cond##_common(block, ir, dest_addr, op_pc + 4); \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 4); \
|
||||
return ret ? dest_addr : (op_pc + 4); \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
@@ -300,131 +300,139 @@ ropFUCOMPP(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uin
|
||||
return op_pc;
|
||||
}
|
||||
|
||||
#define ropF_arith_mem(name, load_uop) \
|
||||
uint32_t ropFADD##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
if ((cpu_state.npxc >> 10) & 3) \
|
||||
return 0; \
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FADD(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFCOM##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFCOMP##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
fpu_POP(block, ir); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFDIV##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFDIVR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFMUL##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FMUL(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFSUB##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFSUBR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
#define ropF_arith_mem(name, load_uop) \
|
||||
uint32_t ropFADD##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
if ((cpu_state.npxc >> 10) & 3) \
|
||||
return 0; \
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FADD(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFCOM##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFCOMP##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
fpu_POP(block, ir); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFDIV##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFDIVR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFMUL##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FMUL(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFSUB##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFSUBR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
load_uop(ir, IREG_temp0_D, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
@@ -432,137 +440,145 @@ ropF_arith_mem(s, uop_MEM_LOAD_SINGLE)
|
||||
ropF_arith_mem(d, uop_MEM_LOAD_DOUBLE)
|
||||
// clang-format on
|
||||
|
||||
#define ropFI_arith_mem(name, temp_reg) \
|
||||
uint32_t ropFIADD##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FADD(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFICOM##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFICOMP##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
fpu_POP(block, ir); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFIDIV##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFIDIVR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFIMUL##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FMUL(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFISUB##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFISUBR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
#define ropFI_arith_mem(name, temp_reg) \
|
||||
uint32_t ropFIADD##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FADD(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFICOM##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFICOMP##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FCOM(ir, IREG_temp1_W, IREG_ST(0), IREG_temp0_D); \
|
||||
uop_AND_IMM(ir, IREG_NPXS, IREG_NPXS, ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3)); \
|
||||
uop_OR(ir, IREG_NPXS, IREG_NPXS, IREG_temp1_W); \
|
||||
fpu_POP(block, ir); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFIDIV##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFIDIVR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FDIV(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFIMUL##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FMUL(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFISUB##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_ST(0), IREG_temp0_D); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t ropFISUBR##name(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_FP_ENTER(ir); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
op_pc--; \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, temp_reg, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MOV_DOUBLE_INT(ir, IREG_temp0_D, temp_reg); \
|
||||
uop_FSUB(ir, IREG_ST(0), IREG_temp0_D, IREG_ST(0)); \
|
||||
uop_MOV_IMM(ir, IREG_tag(0), TAG_VALID); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
@@ -519,57 +519,61 @@ ropCWDE(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), UNUSE
|
||||
return op_pc;
|
||||
}
|
||||
|
||||
#define ropLxS(name, seg) \
|
||||
uint32_t rop##name##_16(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg = NULL; \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
if ((fetchdat & 0xc0) == 0xc0) \
|
||||
return 0; \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 2); \
|
||||
uop_LOAD_SEG(ir, seg, IREG_temp1_W); \
|
||||
uop_MOV(ir, IREG_16(dest_reg), IREG_temp0_W); \
|
||||
\
|
||||
if (seg == &cpu_state.seg_ss) \
|
||||
CPU_BLOCK_END(); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t rop##name##_32(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg = NULL; \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
if ((fetchdat & 0xc0) == 0xc0) \
|
||||
return 0; \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 4); \
|
||||
uop_LOAD_SEG(ir, seg, IREG_temp1_W); \
|
||||
uop_MOV(ir, IREG_32(dest_reg), IREG_temp0); \
|
||||
\
|
||||
if (seg == &cpu_state.seg_ss) \
|
||||
CPU_BLOCK_END(); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
#define ropLxS(name, seg) \
|
||||
uint32_t rop##name##_16(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg = NULL; \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
if ((fetchdat & 0xc0) == 0xc0) \
|
||||
return 0; \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 2); \
|
||||
uop_LOAD_SEG(ir, seg, IREG_temp1_W); \
|
||||
uop_MOV(ir, IREG_16(dest_reg), IREG_temp0_W); \
|
||||
\
|
||||
if (seg == &cpu_state.seg_ss) \
|
||||
CPU_BLOCK_END(); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
} \
|
||||
uint32_t rop##name##_32(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
x86seg *target_seg = NULL; \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
if ((fetchdat & 0xc0) == 0xc0) \
|
||||
return 0; \
|
||||
\
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 4); \
|
||||
uop_LOAD_SEG(ir, seg, IREG_temp1_W); \
|
||||
uop_MOV(ir, IREG_32(dest_reg), IREG_temp0); \
|
||||
\
|
||||
if (seg == &cpu_state.seg_ss) \
|
||||
CPU_BLOCK_END(); \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
ropLxS(LDS, &cpu_state.seg_ds)
|
||||
ropLxS(LES, &cpu_state.seg_es)
|
||||
ropLxS(LFS, &cpu_state.seg_fs)
|
||||
ropLxS(LGS, &cpu_state.seg_gs)
|
||||
ropLxS(LSS, &cpu_state.seg_ss)
|
||||
// clang-format on
|
||||
|
||||
uint32_t
|
||||
ropCLC(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc)
|
||||
|
||||
@@ -16,27 +16,28 @@
|
||||
#include "codegen_ops_mmx_arith.h"
|
||||
#include "codegen_ops_helpers.h"
|
||||
|
||||
#define ropParith(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
#define ropParith(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
@@ -16,27 +16,28 @@
|
||||
#include "codegen_ops_mmx_cmp.h"
|
||||
#include "codegen_ops_helpers.h"
|
||||
|
||||
#define ropPcmp(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
#define ropPcmp(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
@@ -16,27 +16,28 @@
|
||||
#include "codegen_ops_mmx_pack.h"
|
||||
#include "codegen_ops_helpers.h"
|
||||
|
||||
#define ropPpack(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
#define ropPpack(func) \
|
||||
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
|
||||
{ \
|
||||
int dest_reg = (fetchdat >> 3) & 7; \
|
||||
\
|
||||
uop_MMX_ENTER(ir); \
|
||||
codegen_mark_code_present(block, cs + op_pc, 1); \
|
||||
if ((fetchdat & 0xc0) == 0xc0) { \
|
||||
int src_reg = fetchdat & 7; \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
|
||||
} else { \
|
||||
x86seg *target_seg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
|
||||
codegen_check_seg_read(block, ir, target_seg); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
|
||||
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
|
||||
} \
|
||||
\
|
||||
return op_pc + 1; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
@@ -196,61 +196,65 @@ ropPOP_L(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fet
|
||||
return op_pc + 1;
|
||||
}
|
||||
|
||||
#define ROP_PUSH_SEG(seg) \
|
||||
uint32_t ropPUSH_##seg##_16(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
int sp_reg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
sp_reg = LOAD_SP_WITH_OFFSET(ir, -2); \
|
||||
uop_MEM_STORE_REG(ir, IREG_SS_base, sp_reg, IREG_##seg##_seg_W); \
|
||||
SUB_SP(ir, 2); \
|
||||
\
|
||||
return op_pc; \
|
||||
} \
|
||||
uint32_t ropPUSH_##seg##_32(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
int sp_reg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
sp_reg = LOAD_SP_WITH_OFFSET(ir, -4); \
|
||||
uop_MOVZX(ir, IREG_temp0, IREG_##seg##_seg_W); \
|
||||
uop_MEM_STORE_REG(ir, IREG_SS_base, sp_reg, IREG_temp0); \
|
||||
SUB_SP(ir, 4); \
|
||||
\
|
||||
return op_pc; \
|
||||
#define ROP_PUSH_SEG(seg) \
|
||||
uint32_t ropPUSH_##seg##_16(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
int sp_reg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
sp_reg = LOAD_SP_WITH_OFFSET(ir, -2); \
|
||||
uop_MEM_STORE_REG(ir, IREG_SS_base, sp_reg, IREG_##seg##_seg_W); \
|
||||
SUB_SP(ir, 2); \
|
||||
\
|
||||
return op_pc; \
|
||||
} \
|
||||
uint32_t ropPUSH_##seg##_32(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
int sp_reg; \
|
||||
\
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
sp_reg = LOAD_SP_WITH_OFFSET(ir, -4); \
|
||||
uop_MOVZX(ir, IREG_temp0, IREG_##seg##_seg_W); \
|
||||
uop_MEM_STORE_REG(ir, IREG_SS_base, sp_reg, IREG_temp0); \
|
||||
SUB_SP(ir, 4); \
|
||||
\
|
||||
return op_pc; \
|
||||
}
|
||||
|
||||
#define ROP_POP_SEG(seg, rseg) \
|
||||
uint32_t ropPOP_##seg##_16(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
\
|
||||
if (stack32) \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_ESP); \
|
||||
else { \
|
||||
uop_MOVZX(ir, IREG_eaaddr, IREG_SP); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_eaaddr); \
|
||||
} \
|
||||
uop_LOAD_SEG(ir, &rseg, IREG_temp0_W); \
|
||||
ADD_SP(ir, 2); \
|
||||
\
|
||||
return op_pc; \
|
||||
} \
|
||||
uint32_t ropPOP_##seg##_32(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
\
|
||||
if (stack32) \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_ESP); \
|
||||
else { \
|
||||
uop_MOVZX(ir, IREG_eaaddr, IREG_SP); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_eaaddr); \
|
||||
} \
|
||||
uop_LOAD_SEG(ir, &rseg, IREG_temp0_W); \
|
||||
ADD_SP(ir, 4); \
|
||||
\
|
||||
return op_pc; \
|
||||
#define ROP_POP_SEG(seg, rseg) \
|
||||
uint32_t ropPOP_##seg##_16(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
\
|
||||
if (stack32) \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_ESP); \
|
||||
else { \
|
||||
uop_MOVZX(ir, IREG_eaaddr, IREG_SP); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_eaaddr); \
|
||||
} \
|
||||
uop_LOAD_SEG(ir, &rseg, IREG_temp0_W); \
|
||||
ADD_SP(ir, 2); \
|
||||
\
|
||||
return op_pc; \
|
||||
} \
|
||||
uint32_t ropPOP_##seg##_32(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), \
|
||||
UNUSED(uint32_t fetchdat), UNUSED(uint32_t op_32), uint32_t op_pc) \
|
||||
{ \
|
||||
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
|
||||
\
|
||||
if (stack32) \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_ESP); \
|
||||
else { \
|
||||
uop_MOVZX(ir, IREG_eaaddr, IREG_SP); \
|
||||
uop_MEM_LOAD_REG(ir, IREG_temp0_W, IREG_SS_base, IREG_eaaddr); \
|
||||
} \
|
||||
uop_LOAD_SEG(ir, &rseg, IREG_temp0_W); \
|
||||
ADD_SP(ir, 4); \
|
||||
\
|
||||
return op_pc; \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
Reference in New Issue
Block a user