Rewrote the recompiler interrupt checking in assembly (and removed it for the new dynamic compiler because the requires uops are not present), brings performance back up, and also did a number of CPU-related clean-ups (mostly removal of dead variables and associated code).

This commit is contained in:
OBattler
2020-07-13 19:46:19 +02:00
parent 0cd0d83cee
commit a862bda04c
32 changed files with 196 additions and 291 deletions

View File

@@ -599,7 +599,6 @@ generate_call:
codegen_timing_opcode(opcode, fetchdat, op_32, op_pc);
codegen_accumulate(ir, ACCREG_ins, 1);
codegen_accumulate(ir, ACCREG_cycles, -codegen_block_cycles);
codegen_block_cycles = 0;
@@ -696,9 +695,6 @@ generate_call:
block->ins++;
/* Check for interrupts. */
uop_CALL_INSTRUCTION_FUNC(ir, int_check);
if (block->ins >= MAX_INSTRUCTION_COUNT)
CPU_BLOCK_END();
@@ -759,8 +755,6 @@ generate_call:
uop_MOV_IMM(ir, IREG_ssegs, op_ssegs);
uop_LOAD_FUNC_ARG_IMM(ir, 0, fetchdat);
uop_CALL_INSTRUCTION_FUNC(ir, op);
/* Check for interrupts. */
uop_CALL_INSTRUCTION_FUNC(ir, int_check);
codegen_mark_code_present(block, cs+cpu_state.pc, 8);
last_op_32 = op_32;

View File

@@ -341,15 +341,8 @@ void codegen_delete_random_block(int required_mem_block);
extern int cpu_block_end;
extern uint32_t codegen_endpc;
extern int cpu_recomp_blocks, cpu_recomp_full_ins, cpu_new_blocks;
extern int cpu_recomp_blocks_latched, cpu_recomp_ins_latched, cpu_recomp_full_ins_latched, cpu_new_blocks_latched;
extern int cpu_recomp_flushes, cpu_recomp_flushes_latched;
extern int cpu_recomp_evicted, cpu_recomp_evicted_latched;
extern int cpu_recomp_reuse, cpu_recomp_reuse_latched;
extern int cpu_recomp_removed, cpu_recomp_removed_latched;
extern int cpu_reps, cpu_reps_latched;
extern int cpu_notreps, cpu_notreps_latched;
extern int cpu_reps;
extern int cpu_notreps;
extern int codegen_block_cycles;

View File

@@ -13,8 +13,7 @@ static struct
int dest_reg;
} acc_regs[] =
{
[ACCREG_ins] = {0, IREG_ins},
[ACCREG_cycles] = {0, IREG_cycles},
[ACCREG_cycles] = {0, IREG_cycles}
};
void codegen_accumulate(ir_data_t *ir, int acc_reg, int delta)
@@ -28,23 +27,14 @@ void codegen_accumulate(ir_data_t *ir, int acc_reg, int delta)
void codegen_accumulate_flush(ir_data_t *ir)
{
int c;
for (c = 0; c < ACCREG_COUNT; c++)
{
if (acc_regs[c].count)
{
uop_ADD_IMM(ir, acc_regs[c].dest_reg, acc_regs[c].dest_reg, acc_regs[c].count);
}
if (acc_regs[0].count) {
uop_ADD_IMM(ir, acc_regs[0].dest_reg, acc_regs[0].dest_reg, acc_regs[0].count);
}
acc_regs[c].count = 0;
}
acc_regs[0].count = 0;
}
void codegen_accumulate_reset()
{
int c;
for (c = 0; c < ACCREG_COUNT; c++)
acc_regs[c].count = 0;
acc_regs[0].count = 0;
}

View File

@@ -37,11 +37,6 @@ int block_current = 0;
static int block_num;
int block_pos;
int cpu_recomp_flushes, cpu_recomp_flushes_latched;
int cpu_recomp_evicted, cpu_recomp_evicted_latched;
int cpu_recomp_reuse, cpu_recomp_reuse_latched;
int cpu_recomp_removed, cpu_recomp_removed_latched;
uint32_t codegen_endpc;
int codegen_block_cycles;
@@ -479,7 +474,6 @@ void codegen_check_flush(page_t *page, uint64_t mask, uint32_t phys_addr)
if (*block->dirty_mask & block->page_mask)
{
invalidate_block(block);
cpu_recomp_evicted++;
}
if (block_nr == next_block)
fatal("Broken 1\n");
@@ -496,7 +490,6 @@ void codegen_check_flush(page_t *page, uint64_t mask, uint32_t phys_addr)
if (*block->dirty_mask2 & block->page_mask2)
{
invalidate_block(block);
cpu_recomp_evicted++;
}
if (block_nr == next_block)
fatal("Broken 2\n");
@@ -626,7 +619,6 @@ void codegen_block_remove()
codeblock_t *block = &codeblock[block_current];
delete_block(block);
cpu_recomp_removed++;
recomp_page = -1;
}

View File

@@ -95,7 +95,7 @@ struct
[IREG_rm_mod_reg] = {REG_DWORD, &cpu_state.rm_data.rm_mod_reg_data, REG_INTEGER, REG_PERMANENT},
[IREG_ins] = {REG_DWORD, &cpu_state.cpu_recomp_ins, REG_INTEGER, REG_PERMANENT},
[IREG_acycs] = {REG_DWORD, &acycs, REG_INTEGER, REG_PERMANENT},
[IREG_cycles] = {REG_DWORD, &cpu_state._cycles, REG_INTEGER, REG_PERMANENT},
[IREG_CS_base] = {REG_DWORD, &cpu_state.seg_cs.base, REG_INTEGER, REG_PERMANENT},
@@ -180,8 +180,6 @@ struct
[IREG_temp0d] = {REG_DOUBLE, (void *)40, REG_FP, REG_VOLATILE},
[IREG_temp1d] = {REG_DOUBLE, (void *)48, REG_FP, REG_VOLATILE},
[IREG_acycs] = {REG_DWORD, &acycs, REG_INTEGER, REG_PERMANENT}
};
void codegen_reg_mark_as_required()

View File

@@ -41,7 +41,7 @@ enum
IREG_rm_mod_reg = 18,
IREG_ins = 19,
IREG_acycs = 19,
IREG_cycles = 20,
IREG_CS_base = 21,
@@ -133,9 +133,7 @@ enum
IREG_GS_limit_high = 86,
IREG_SS_limit_high = 87,
IREG_acycs = 88,
IREG_COUNT = 89,
IREG_COUNT = 88,
IREG_INVALID = 255,

View File

@@ -908,11 +908,9 @@ void loadcscall(uint16_t seg, uint32_t old_pc)
int type;
uint16_t tempw;
int csout = output;
if (msw&1 && !(cpu_state.eflags&VM_FLAG))
{
if (csout) x86seg_log("Protected mode CS load! %04X\n",seg);
x86seg_log("Protected mode CS load! %04X\n", seg);
if (!(seg&~3))
{
x86gpf(NULL,0);
@@ -946,7 +944,7 @@ void loadcscall(uint16_t seg, uint32_t old_pc)
newpc=segdat[0];
if (type&0x800) newpc|=segdat[3]<<16;
if (csout) x86seg_log("Code seg call - %04X - %04X %04X %04X\n",seg,segdat[0],segdat[1],segdat[2]);
x86seg_log("Code seg call - %04X - %04X %04X %04X\n",seg,segdat[0],segdat[1],segdat[2]);
if (segdat[2]&0x1000)
{
if (!(segdat[2]&0x400)) /*Not conforming*/
@@ -992,13 +990,15 @@ void loadcscall(uint16_t seg, uint32_t old_pc)
do_seg_load(&cpu_state.seg_cs, segdat);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
if (csout) x86seg_log("Complete\n");
#ifdef ENABLE_X86SEG_LOG
x86seg_log("Complete\n");
#endif
cycles -= timing_call_pm;
}
else
{
type=segdat[2]&0xF00;
if (csout) x86seg_log("Type %03X\n",type);
x86seg_log("Type %03X\n",type);
switch (type)
{
case 0x400: /*Call gate*/