diff --git a/src/cpu/386.c b/src/cpu/386.c index 295f94056..b5a1e61e3 100644 --- a/src/cpu/386.c +++ b/src/cpu/386.c @@ -240,6 +240,7 @@ exec386_2386(int32_t cycs) cycdiff = 0; oldcyc = cycles; while (cycdiff < cycle_period) { + int ins_fetch_fault = 0; ins_cycles = cycles; #ifndef USE_NEW_DYNAREC @@ -259,6 +260,14 @@ exec386_2386(int32_t cycs) fetchdat = fastreadl_fetch(cs + cpu_state.pc); ol = opcode_length[fetchdat & 0xff]; CHECK_READ_CS(MIN(ol, 4)); + ins_fetch_fault = cpu_386_check_instruction_fault(); + + if (!cpu_state.abrt && ins_fetch_fault) { + x86gen(); + ins_fetch_fault = 0; + /* No instructions executed at this point. */ + goto block_ended; + } if (!cpu_state.abrt) { #ifdef ENABLE_386_LOG @@ -267,7 +276,7 @@ exec386_2386(int32_t cycs) #endif opcode = fetchdat & 0xFF; fetchdat >>= 8; - trap = cpu_state.flags & T_FLAG; + trap |= !!(cpu_state.flags & T_FLAG); cpu_state.pc++; x86_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat); @@ -287,6 +296,7 @@ exec386_2386(int32_t cycs) if (cpu_end_block_after_ins) cpu_end_block_after_ins--; +block_ended: if (cpu_state.abrt) { flags_rebuild(); tempi = cpu_state.abrt & ABRT_MASK; @@ -309,9 +319,12 @@ exec386_2386(int32_t cycs) #endif } } + if (!x86_was_reset && ins_fetch_fault) + x86gen(); /* This is supposed to be the first one serviced by the processor according to the manual. */ } else if (trap) { flags_rebuild(); - dr[6] |= (trap == 2) ? 0x8000 : 0x4000; + if (trap & 2) dr[6] |= 0x8000; + if (trap & 1) dr[6] |= 0x4000; trap = 0; #ifndef USE_NEW_DYNAREC oldcs = CS; diff --git a/src/cpu/386_common.c b/src/cpu/386_common.c index 60ecd8954..4b4037207 100644 --- a/src/cpu/386_common.c +++ b/src/cpu/386_common.c @@ -80,6 +80,7 @@ int smm_in_hlt = 0; int smi_block = 0; int prefetch_prefixes = 0; +int rf_flag_no_clear = 0; int tempc; int oldcpl; @@ -1491,7 +1492,7 @@ x86_int_sw(int num) } } - trap = 0; + trap &= ~1; CPU_BLOCK_END(); } @@ -1534,7 +1535,7 @@ x86_int_sw_rm(int num) #endif cycles -= timing_int_rm; - trap = 0; + trap &= ~1; CPU_BLOCK_END(); return 0; @@ -1655,6 +1656,37 @@ cpu_386_flags_rebuild(void) flags_rebuild(); } +extern uint64_t mmutranslate_noabrt_2386(uint32_t addr, int rw); +int +cpu_386_check_instruction_fault(void) +{ + int i = 0; + int fault = 0; + /* Report no fault if RF is set. */ + if (cpu_state.eflags & RF_FLAG) + return 0; + + /* Make sure breakpoints are enabled. */ + if (!(dr[7] & 0xFF)) + return 0; + + for (i = 0; i < 4; i++) { + int breakpoint_enabled = !!(dr[7] & (0x3 << (2 * i))) && !(dr[7] & (0x30000 << (4 * i))); + uint32_t translated_addr = 0xffffffff; + if (!breakpoint_enabled) + continue; + + translated_addr = dr[i]; + + if ((cs + cpu_state.pc) == (uint32_t)translated_addr) { + dr[6] |= (1 << i); + fault = 1; + } + } + + return fault; +} + int sysenter(uint32_t fetchdat) { diff --git a/src/cpu/386_common.h b/src/cpu/386_common.h index 22fbd4bff..35d4f7cc8 100644 --- a/src/cpu/386_common.h +++ b/src/cpu/386_common.h @@ -674,3 +674,8 @@ seteaq(uint64_t v) cpu_state.pc += 2 #endif + +/* Resume Flag handling. */ +extern int rf_flag_no_clear; + +int cpu_386_check_instruction_fault(void); \ No newline at end of file diff --git a/src/cpu/386_ops.h b/src/cpu/386_ops.h index 8a0f4cd5a..710031ef1 100644 --- a/src/cpu/386_ops.h +++ b/src/cpu/386_ops.h @@ -181,7 +181,11 @@ extern void x386_dynarec_log(const char *fmt, ...); #ifndef OPS_286_386 # include "x86_ops_cyrix.h" #endif -#include "x86_ops_flag.h" +#ifdef OPS_286_386 +# include "x86_ops_flag_2386.h" +#else +# include "x86_ops_flag.h" +#endif #include "x86_ops_fpu.h" #include "x86_ops_inc_dec.h" #include "x86_ops_int.h" @@ -200,7 +204,11 @@ extern void x386_dynarec_log(const char *fmt, ...); # include "x86_ops_mmx_shift.h" #endif #include "x86_ops_mov.h" -#include "x86_ops_mov_ctrl.h" +#ifdef OPS_286_386 +# include "x86_ops_mov_ctrl_2386.h" +#else +# include "x86_ops_mov_ctrl.h" +#endif #include "x86_ops_mov_seg.h" #include "x86_ops_movx.h" #ifndef OPS_286_386 @@ -218,7 +226,11 @@ extern void x386_dynarec_log(const char *fmt, ...); # include "x86_ops_rep.h" # endif #endif -#include "x86_ops_ret.h" +#ifdef OPS_286_386 +# include "x86_ops_ret_2386.h" +#else +# include "x86_ops_ret.h" +#endif #include "x86_ops_set.h" #include "x86_ops_stack.h" #ifdef OPS_286_386 diff --git a/src/cpu/x86_ops_flag_2386.h b/src/cpu/x86_ops_flag_2386.h new file mode 100644 index 000000000..ba34ae5e7 --- /dev/null +++ b/src/cpu/x86_ops_flag_2386.h @@ -0,0 +1,323 @@ +static int +opCMC(uint32_t fetchdat) +{ + flags_rebuild(); + cpu_state.flags ^= C_FLAG; + CLOCK_CYCLES(2); + PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0); + return 0; +} + +static int +opCLC(uint32_t fetchdat) +{ + flags_rebuild(); + cpu_state.flags &= ~C_FLAG; + CLOCK_CYCLES(2); + PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0); + return 0; +} +static int +opCLD(uint32_t fetchdat) +{ + cpu_state.flags &= ~D_FLAG; + CLOCK_CYCLES(2); + PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0); + return 0; +} +static int +opCLI(uint32_t fetchdat) +{ + if (!IOPLp) { + if ((!(cpu_state.eflags & VM_FLAG) && (cr4 & CR4_PVI)) || ((cpu_state.eflags & VM_FLAG) && (cr4 & CR4_VME))) { + cpu_state.eflags &= ~VIF_FLAG; + } else { + x86gpf(NULL, 0); + return 1; + } + } else + cpu_state.flags &= ~I_FLAG; + + CLOCK_CYCLES(3); + PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0); + return 0; +} + +static int +opSTC(uint32_t fetchdat) +{ + flags_rebuild(); + cpu_state.flags |= C_FLAG; + CLOCK_CYCLES(2); + PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0); + return 0; +} +static int +opSTD(uint32_t fetchdat) +{ + cpu_state.flags |= D_FLAG; + CLOCK_CYCLES(2); + PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0); + return 0; +} +static int +opSTI(uint32_t fetchdat) +{ + if (!IOPLp) { + if ((!(cpu_state.eflags & VM_FLAG) && (cr4 & CR4_PVI)) || ((cpu_state.eflags & VM_FLAG) && (cr4 & CR4_VME))) { + if (cpu_state.eflags & VIP_FLAG) { + x86gpf(NULL, 0); + return 1; + } else + cpu_state.eflags |= VIF_FLAG; + } else { + x86gpf(NULL, 0); + return 1; + } + } else + cpu_state.flags |= I_FLAG; + + /*First instruction after STI will always execute, regardless of whether + there is a pending interrupt*/ + cpu_end_block_after_ins = 2; + + CLOCK_CYCLES(2); + PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0); + return 0; +} + +static int +opSAHF(uint32_t fetchdat) +{ + flags_rebuild(); + cpu_state.flags = (cpu_state.flags & 0xff00) | (AH & 0xd5) | 2; + CLOCK_CYCLES(3); + PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0); + +#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC)) + codegen_flags_changed = 0; +#endif + + return 0; +} +static int +opLAHF(uint32_t fetchdat) +{ + flags_rebuild(); + AH = cpu_state.flags & 0xff; + CLOCK_CYCLES(3); + PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0); + return 0; +} + +static int +opPUSHF(uint32_t fetchdat) +{ + if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) { + if (cr4 & CR4_VME) { + uint16_t temp; + + flags_rebuild(); + temp = (cpu_state.flags & ~I_FLAG) | 0x3000; + if (cpu_state.eflags & VIF_FLAG) + temp |= I_FLAG; + PUSH_W(temp); + } else { + x86gpf(NULL, 0); + return 1; + } + } else { + flags_rebuild(); + PUSH_W(cpu_state.flags); + } + CLOCK_CYCLES(4); + PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 0); + return cpu_state.abrt; +} +static int +opPUSHFD(uint32_t fetchdat) +{ + uint16_t tempw; + if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) { + x86gpf(NULL, 0); + return 1; + } + if (cpu_CR4_mask & CR4_VME) + tempw = cpu_state.eflags & 0x3c; + else if (CPUID) + tempw = cpu_state.eflags & 0x24; + else + tempw = cpu_state.eflags & 4; + flags_rebuild(); + PUSH_L(cpu_state.flags | (tempw << 16)); + CLOCK_CYCLES(4); + PREFETCH_RUN(4, 1, -1, 0, 0, 0, 1, 0); + return cpu_state.abrt; +} + +static int +opPOPF_186(uint32_t fetchdat) +{ + uint16_t tempw; + + if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) { + x86gpf(NULL, 0); + return 1; + } + + tempw = POP_W(); + if (cpu_state.abrt) + return 1; + + if (!(msw & 1)) + cpu_state.flags = (cpu_state.flags & 0x7000) | (tempw & 0x0fd5) | 2; + else if (!(CPL)) + cpu_state.flags = (tempw & 0x7fd5) | 2; + else if (IOPLp) + cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2; + else + cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2; + flags_extract(); + rf_flag_no_clear = 1; + + CLOCK_CYCLES(5); + PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0); + +#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC)) + codegen_flags_changed = 0; +#endif + + return 0; +} +static int +opPOPF_286(uint32_t fetchdat) +{ + uint16_t tempw; + + if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) { + x86gpf(NULL, 0); + return 1; + } + + tempw = POP_W(); + if (cpu_state.abrt) + return 1; + + if (!(msw & 1)) + cpu_state.flags = (cpu_state.flags & 0x7000) | (tempw & 0x0fd5) | 2; + else if (!(CPL)) + cpu_state.flags = (tempw & 0x7fd5) | 2; + else if (IOPLp) + cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2; + else + cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2; + flags_extract(); + rf_flag_no_clear = 1; + + CLOCK_CYCLES(5); + PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0); + +#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC)) + codegen_flags_changed = 0; +#endif + + return 0; +} +static int +opPOPF(uint32_t fetchdat) +{ + uint16_t tempw; + + if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) { + if (cr4 & CR4_VME) { + uint32_t old_esp = ESP; + + tempw = POP_W(); + if (cpu_state.abrt) { + + ESP = old_esp; + return 1; + } + + if ((tempw & T_FLAG) || ((tempw & I_FLAG) && (cpu_state.eflags & VIP_FLAG))) { + ESP = old_esp; + x86gpf(NULL, 0); + return 1; + } + if (tempw & I_FLAG) + cpu_state.eflags |= VIF_FLAG; + else + cpu_state.eflags &= ~VIF_FLAG; + cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2; + } else { + x86gpf(NULL, 0); + return 1; + } + } else { + tempw = POP_W(); + if (cpu_state.abrt) + return 1; + + if (!(CPL) || !(msw & 1)) + cpu_state.flags = (tempw & 0x7fd5) | 2; + else if (IOPLp) + cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2; + else + cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2; + } + flags_extract(); + rf_flag_no_clear = 1; + + CLOCK_CYCLES(5); + PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0); + +#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC)) + codegen_flags_changed = 0; +#endif + + return 0; +} +static int +opPOPFD(uint32_t fetchdat) +{ + uint32_t templ; + + if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) { + x86gpf(NULL, 0); + return 1; + } + + templ = POP_L(); + if (cpu_state.abrt) + return 1; + + if (!(CPL) || !(msw & 1)) + cpu_state.flags = (templ & 0x7fd5) | 2; + else if (IOPLp) + cpu_state.flags = (cpu_state.flags & 0x3000) | (templ & 0x4fd5) | 2; + else + cpu_state.flags = (cpu_state.flags & 0x3200) | (templ & 0x4dd5) | 2; + + templ &= (is486 || isibm486) ? 0x3c0000 : 0; + templ |= ((cpu_state.eflags & 3) << 16); + if (cpu_CR4_mask & CR4_VME) + cpu_state.eflags = (templ >> 16) & 0x3f; + else if (CPUID) + cpu_state.eflags = (templ >> 16) & 0x27; + else if (is486 || isibm486) + cpu_state.eflags = (templ >> 16) & 7; + else + cpu_state.eflags = (templ >> 16) & 3; + + flags_extract(); + rf_flag_no_clear = 1; + + CLOCK_CYCLES(5); + PREFETCH_RUN(5, 1, -1, 0, 1, 0, 0, 0); + +#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC)) + codegen_flags_changed = 0; +#endif + + return 0; +} diff --git a/src/cpu/x86_ops_mov_ctrl_2386.h b/src/cpu/x86_ops_mov_ctrl_2386.h new file mode 100644 index 000000000..c57dc8226 --- /dev/null +++ b/src/cpu/x86_ops_mov_ctrl_2386.h @@ -0,0 +1,414 @@ +static int +opMOV_r_CRx_a16(uint32_t fetchdat) +{ + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_16(fetchdat); + switch (cpu_reg) { + case 0: + cpu_state.regs[cpu_rm].l = cr0; + if (is486 || isibm486) + cpu_state.regs[cpu_rm].l |= 0x10; /*ET hardwired on 486*/ + else { + if (is386) + cpu_state.regs[cpu_rm].l |= 0x7fffffe0; + else + cpu_state.regs[cpu_rm].l |= 0x7ffffff0; + } + break; + case 2: + cpu_state.regs[cpu_rm].l = cr2; + break; + case 3: + cpu_state.regs[cpu_rm].l = cr3; + break; + case 4: + if (cpu_has_feature(CPU_FEATURE_CR4)) { + cpu_state.regs[cpu_rm].l = cr4; + break; + } + default: + cpu_state.pc = cpu_state.oldpc; + x86illegal(); + break; + } + CLOCK_CYCLES(6); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0); + return 0; +} +static int +opMOV_r_CRx_a32(uint32_t fetchdat) +{ + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_32(fetchdat); + switch (cpu_reg) { + case 0: + cpu_state.regs[cpu_rm].l = cr0; + if (is486 || isibm486) + cpu_state.regs[cpu_rm].l |= 0x10; /*ET hardwired on 486*/ + else { + if (is386) + cpu_state.regs[cpu_rm].l |= 0x7fffffe0; + else + cpu_state.regs[cpu_rm].l |= 0x7ffffff0; + } + break; + case 2: + cpu_state.regs[cpu_rm].l = cr2; + break; + case 3: + cpu_state.regs[cpu_rm].l = cr3; + break; + case 4: + if (cpu_has_feature(CPU_FEATURE_CR4)) { + cpu_state.regs[cpu_rm].l = cr4; + break; + } + default: + cpu_state.pc = cpu_state.oldpc; + x86illegal(); + break; + } + CLOCK_CYCLES(6); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1); + return 0; +} + +static int +opMOV_r_DRx_a16(uint32_t fetchdat) +{ + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_16(fetchdat); + if (cpu_reg == 4 || cpu_reg == 5) { + if (cr4 & 0x8) + x86illegal(); + else + cpu_reg += 2; + } + cpu_state.regs[cpu_rm].l = dr[cpu_reg] | (cpu_reg == 6 ? 0xffff0ff0u : 0); + CLOCK_CYCLES(6); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0); + return 0; +} +static int +opMOV_r_DRx_a32(uint32_t fetchdat) +{ + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_32(fetchdat); + if (cpu_reg == 4 || cpu_reg == 5) { + if (cr4 & 0x8) + x86illegal(); + else + cpu_reg += 2; + } + cpu_state.regs[cpu_rm].l = dr[cpu_reg] | (cpu_reg == 6 ? 0xffff0ff0u : 0); + CLOCK_CYCLES(6); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1); + return 0; +} + +static int +opMOV_CRx_r_a16(uint32_t fetchdat) +{ + uint32_t old_cr0 = cr0; + + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_16(fetchdat); + switch (cpu_reg) { + case 0: + if ((cpu_state.regs[cpu_rm].l ^ cr0) & 0x80000001) + flushmmucache(); + /* Make sure CPL = 0 when switching from real mode to protected mode. */ + if ((cpu_state.regs[cpu_rm].l & 0x01) && !(cr0 & 0x01)) + cpu_state.seg_cs.access &= 0x9f; + cr0 = cpu_state.regs[cpu_rm].l; + if (cpu_16bitbus) + cr0 |= 0x10; + if (!(cr0 & 0x80000000)) + mmu_perm = 4; + if (hascache && !(cr0 & (1 << 30))) + cpu_cache_int_enabled = 1; + else + cpu_cache_int_enabled = 0; + if (hascache && ((cr0 ^ old_cr0) & (1 << 30))) + cpu_update_waitstates(); + if (cr0 & 1) + cpu_cur_status |= CPU_STATUS_PMODE; + else + cpu_cur_status &= ~CPU_STATUS_PMODE; + break; + case 2: + cr2 = cpu_state.regs[cpu_rm].l; + break; + case 3: + cr3 = cpu_state.regs[cpu_rm].l; + flushmmucache(); + break; + case 4: + if (cpu_has_feature(CPU_FEATURE_CR4)) { + if (((cpu_state.regs[cpu_rm].l ^ cr4) & cpu_CR4_mask) & (CR4_PAE | CR4_PGE)) + flushmmucache(); + cr4 = cpu_state.regs[cpu_rm].l & cpu_CR4_mask; + break; + } + + default: + cpu_state.pc = cpu_state.oldpc; + x86illegal(); + break; + } + CLOCK_CYCLES(10); + PREFETCH_RUN(10, 2, rmdat, 0, 0, 0, 0, 0); + return 0; +} +static int +opMOV_CRx_r_a32(uint32_t fetchdat) +{ + uint32_t old_cr0 = cr0; + + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_32(fetchdat); + switch (cpu_reg) { + case 0: + if ((cpu_state.regs[cpu_rm].l ^ cr0) & 0x80000001) + flushmmucache(); + /* Make sure CPL = 0 when switching from real mode to protected mode. */ + if ((cpu_state.regs[cpu_rm].l & 0x01) && !(cr0 & 0x01)) + cpu_state.seg_cs.access &= 0x9f; + cr0 = cpu_state.regs[cpu_rm].l; + if (cpu_16bitbus) + cr0 |= 0x10; + if (!(cr0 & 0x80000000)) + mmu_perm = 4; + if (hascache && !(cr0 & (1 << 30))) + cpu_cache_int_enabled = 1; + else + cpu_cache_int_enabled = 0; + if (hascache && ((cr0 ^ old_cr0) & (1 << 30))) + cpu_update_waitstates(); + if (cr0 & 1) + cpu_cur_status |= CPU_STATUS_PMODE; + else + cpu_cur_status &= ~CPU_STATUS_PMODE; + break; + case 2: + cr2 = cpu_state.regs[cpu_rm].l; + break; + case 3: + cr3 = cpu_state.regs[cpu_rm].l; + flushmmucache(); + break; + case 4: + if (cpu_has_feature(CPU_FEATURE_CR4)) { + if (((cpu_state.regs[cpu_rm].l ^ cr4) & cpu_CR4_mask) & (CR4_PAE | CR4_PGE)) + flushmmucache(); + cr4 = cpu_state.regs[cpu_rm].l & cpu_CR4_mask; + break; + } + + default: + cpu_state.pc = cpu_state.oldpc; + x86illegal(); + break; + } + CLOCK_CYCLES(10); + PREFETCH_RUN(10, 2, rmdat, 0, 0, 0, 0, 1); + return 0; +} + +static int +opMOV_DRx_r_a16(uint32_t fetchdat) +{ + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + if ((dr[6] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) { + dr[7] |= 0x2000; + dr[6] &= ~0x2000; + x86gen(); + return 1; + } + fetch_ea_16(fetchdat); + if (cpu_reg == 4 || cpu_reg == 5) { + if (cr4 & 0x8) + x86illegal(); + else + cpu_reg += 2; + } + dr[cpu_reg] = cpu_state.regs[cpu_rm].l; + CLOCK_CYCLES(6); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0); + CPU_BLOCK_END(); + return 0; +} +static int +opMOV_DRx_r_a32(uint32_t fetchdat) +{ + if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_16(fetchdat); + if (cpu_reg == 4 || cpu_reg == 5) { + if (cr4 & 0x8) + x86illegal(); + else + cpu_reg += 2; + } + dr[cpu_reg] = cpu_state.regs[cpu_rm].l; + CLOCK_CYCLES(6); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1); + CPU_BLOCK_END(); + return 0; +} + +static void +opMOV_r_TRx(void) +{ +#if 0 + uint32_t base; + + base = _tr[4] & 0xfffff800; +#endif + + switch (cpu_reg) { + case 3: +#if 0 + pclog("[R] %08X cache = %08X\n", base + cache_index, _tr[3]); +#endif + _tr[3] = *(uint32_t *) &(_cache[cache_index]); + cache_index = (cache_index + 4) & 0xf; + break; + } + cpu_state.regs[cpu_rm].l = _tr[cpu_reg]; + CLOCK_CYCLES(6); +} +static int +opMOV_r_TRx_a16(uint32_t fetchdat) +{ + if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_16(fetchdat); + opMOV_r_TRx(); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0); + return 0; +} +static int +opMOV_r_TRx_a32(uint32_t fetchdat) +{ + if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_32(fetchdat); + opMOV_r_TRx(); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1); + return 0; +} + +static void +opMOV_TRx_r(void) +{ + uint32_t base; + int i; + int ctl; + + _tr[cpu_reg] = cpu_state.regs[cpu_rm].l; + base = _tr[4] & 0xfffff800; + ctl = _tr[5] & 3; + switch (cpu_reg) { + case 3: +#if 0 + pclog("[W] %08X cache = %08X\n", base + cache_index, _tr[3]); +#endif + *(uint32_t *) &(_cache[cache_index]) = _tr[3]; + cache_index = (cache_index + 4) & 0xf; + break; + case 4: +#if 0 + if (!(cr0 & 1) && !(_tr[5] & (1 << 19))) + pclog("TAG = %08X, DEST = %08X\n", base, base + cache_index - 16); +#endif + break; + case 5: +#if 0 + pclog("[16] EXT = %i (%i), SET = %04X\n", !!(_tr[5] & (1 << 19)), _tr[5] & 0x03, _tr[5] & 0x7f0); +#endif + if (!(_tr[5] & (1 << 19))) { + switch (ctl) { + case 0: +#if 0 + pclog(" Cache fill or read...\n", base); +#endif + break; + case 1: + base += (_tr[5] & 0x7f0); +#if 0 + pclog(" Writing 16 bytes to %08X...\n", base); +#endif + for (i = 0; i < 16; i += 4) + mem_writel_phys(base + i, *(uint32_t *) &(_cache[i])); + break; + case 2: + base += (_tr[5] & 0x7f0); +#if 0 + pclog(" Reading 16 bytes from %08X...\n", base); +#endif + for (i = 0; i < 16; i += 4) + *(uint32_t *) &(_cache[i]) = mem_readl_phys(base + i); + break; + case 3: +#if 0 + pclog(" Cache invalidate/flush...\n", base); +#endif + break; + } + } + break; + } + CLOCK_CYCLES(6); +} +static int +opMOV_TRx_r_a16(uint32_t fetchdat) +{ + if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_16(fetchdat); + opMOV_TRx_r(); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0); + return 0; +} +static int +opMOV_TRx_r_a32(uint32_t fetchdat) +{ + if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) { + x86gpf(NULL, 0); + return 1; + } + fetch_ea_32(fetchdat); + opMOV_TRx_r(); + PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1); + return 0; +} diff --git a/src/cpu/x86_ops_ret_2386.h b/src/cpu/x86_ops_ret_2386.h new file mode 100644 index 000000000..ca85bf2b0 --- /dev/null +++ b/src/cpu/x86_ops_ret_2386.h @@ -0,0 +1,297 @@ +#ifdef USE_NEW_DYNAREC +# define CPU_SET_OXPC +#else +# define CPU_SET_OXPC oxpc = cpu_state.pc; +#endif + +#define RETF_a16(stack_offset) \ + if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) { \ + op_pmoderetf(0, stack_offset); \ + return 1; \ + } \ + CPU_SET_OXPC \ + if (stack32) { \ + cpu_state.pc = readmemw(ss, ESP); \ + op_loadcs(readmemw(ss, ESP + 2)); \ + } else { \ + cpu_state.pc = readmemw(ss, SP); \ + op_loadcs(readmemw(ss, SP + 2)); \ + } \ + if (cpu_state.abrt) \ + return 1; \ + if (stack32) \ + ESP += 4 + stack_offset; \ + else \ + SP += 4 + stack_offset; \ + cycles -= timing_retf_rm; + +#define RETF_a32(stack_offset) \ + if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) { \ + op_pmoderetf(1, stack_offset); \ + return 1; \ + } \ + CPU_SET_OXPC \ + if (stack32) { \ + cpu_state.pc = readmeml(ss, ESP); \ + op_loadcs(readmeml(ss, ESP + 4) & 0xffff); \ + } else { \ + cpu_state.pc = readmeml(ss, SP); \ + op_loadcs(readmeml(ss, SP + 4) & 0xffff); \ + } \ + if (cpu_state.abrt) \ + return 1; \ + if (stack32) \ + ESP += 8 + stack_offset; \ + else \ + SP += 8 + stack_offset; \ + cycles -= timing_retf_rm; + +static int +opRETF_a16(uint32_t fetchdat) +{ + int cycles_old = cycles; + UN_USED(cycles_old); + + CPU_BLOCK_END(); + RETF_a16(0); + + PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0); + PREFETCH_FLUSH(); + return 0; +} +static int +opRETF_a32(uint32_t fetchdat) +{ + int cycles_old = cycles; + UN_USED(cycles_old); + + CPU_BLOCK_END(); + RETF_a32(0); + + PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 2, 0, 0, 1); + PREFETCH_FLUSH(); + return 0; +} + +static int +opRETF_a16_imm(uint32_t fetchdat) +{ + uint16_t offset = getwordf(); + int cycles_old = cycles; + UN_USED(cycles_old); + + CPU_BLOCK_END(); + RETF_a16(offset); + + PREFETCH_RUN(cycles_old - cycles, 3, -1, 2, 0, 0, 0, 0); + PREFETCH_FLUSH(); + return 0; +} +static int +opRETF_a32_imm(uint32_t fetchdat) +{ + uint16_t offset = getwordf(); + int cycles_old = cycles; + UN_USED(cycles_old); + + CPU_BLOCK_END(); + RETF_a32(offset); + + PREFETCH_RUN(cycles_old - cycles, 3, -1, 0, 2, 0, 0, 1); + PREFETCH_FLUSH(); + return 0; +} + +static int +opIRET_186(uint32_t fetchdat) +{ + int cycles_old = cycles; + UN_USED(cycles_old); + + if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) { + x86gpf(NULL, 0); + return 1; + } + if (msw & 1) { + optype = IRET; + op_pmodeiret(0); + optype = 0; + } else { + uint16_t new_cs; + CPU_SET_OXPC + if (stack32) { + cpu_state.pc = readmemw(ss, ESP); + new_cs = readmemw(ss, ESP + 2); + cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ESP + 4) & 0xffd5) | 2; + ESP += 6; + } else { + cpu_state.pc = readmemw(ss, SP); + new_cs = readmemw(ss, ((SP + 2) & 0xffff)); + cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ((SP + 4) & 0xffff)) & 0x0fd5) | 2; + SP += 6; + } + op_loadcs(new_cs); + cycles -= timing_iret_rm; + } + flags_extract(); + nmi_enable = 1; + rf_flag_no_clear = 1; + CPU_BLOCK_END(); + + PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0); + PREFETCH_FLUSH(); + return cpu_state.abrt; +} + +static int +opIRET_286(uint32_t fetchdat) +{ + int cycles_old = cycles; + UN_USED(cycles_old); + + if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) { + x86gpf(NULL, 0); + return 1; + } + if (msw & 1) { + optype = IRET; + op_pmodeiret(0); + optype = 0; + } else { + uint16_t new_cs; + CPU_SET_OXPC + if (stack32) { + cpu_state.pc = readmemw(ss, ESP); + new_cs = readmemw(ss, ESP + 2); + cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ESP + 4) & 0xffd5) | 2; + ESP += 6; + } else { + cpu_state.pc = readmemw(ss, SP); + new_cs = readmemw(ss, ((SP + 2) & 0xffff)); + cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ((SP + 4) & 0xffff)) & 0x0fd5) | 2; + SP += 6; + } + op_loadcs(new_cs); + cycles -= timing_iret_rm; + } + flags_extract(); + nmi_enable = 1; + rf_flag_no_clear = 1; + CPU_BLOCK_END(); + + PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0); + PREFETCH_FLUSH(); + return cpu_state.abrt; +} + +static int +opIRET(uint32_t fetchdat) +{ + int cycles_old = cycles; + UN_USED(cycles_old); + + if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) { + if (cr4 & CR4_VME) { + uint16_t new_pc; + uint16_t new_cs; + uint16_t new_flags; + + new_pc = readmemw(ss, SP); + new_cs = readmemw(ss, ((SP + 2) & 0xffff)); + new_flags = readmemw(ss, ((SP + 4) & 0xffff)); + if (cpu_state.abrt) + return 1; + + if ((new_flags & T_FLAG) || ((new_flags & I_FLAG) && (cpu_state.eflags & VIP_FLAG))) { + x86gpf(NULL, 0); + return 1; + } + SP += 6; + if (new_flags & I_FLAG) + cpu_state.eflags |= VIF_FLAG; + else + cpu_state.eflags &= ~VIF_FLAG; + cpu_state.flags = (cpu_state.flags & 0x3300) | (new_flags & 0x4cd5) | 2; + op_loadcs(new_cs); + cpu_state.pc = new_pc; + + cycles -= timing_iret_rm; + } else { + x86gpf_expected(NULL, 0); + return 1; + } + } else { + if (msw & 1) { + optype = IRET; + op_pmodeiret(0); + optype = 0; + } else { + uint16_t new_cs; + CPU_SET_OXPC + if (stack32) { + cpu_state.pc = readmemw(ss, ESP); + new_cs = readmemw(ss, ESP + 2); + cpu_state.flags = (readmemw(ss, ESP + 4) & 0xffd5) | 2; + ESP += 6; + } else { + cpu_state.pc = readmemw(ss, SP); + new_cs = readmemw(ss, ((SP + 2) & 0xffff)); + cpu_state.flags = (readmemw(ss, ((SP + 4) & 0xffff)) & 0xffd5) | 2; + SP += 6; + } + op_loadcs(new_cs); + cycles -= timing_iret_rm; + } + } + flags_extract(); + nmi_enable = 1; + rf_flag_no_clear = 1; + CPU_BLOCK_END(); + + PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0); + PREFETCH_FLUSH(); + return cpu_state.abrt; +} + +static int +opIRETD(uint32_t fetchdat) +{ + int cycles_old = cycles; + UN_USED(cycles_old); + + if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) { + x86gpf_expected(NULL, 0); + return 1; + } + if (msw & 1) { + optype = IRET; + op_pmodeiret(1); + optype = 0; + } else { + uint16_t new_cs; + CPU_SET_OXPC + if (stack32) { + cpu_state.pc = readmeml(ss, ESP); + new_cs = readmemw(ss, ESP + 4); + cpu_state.flags = (readmemw(ss, ESP + 8) & 0xffd5) | 2; + cpu_state.eflags = readmemw(ss, ESP + 10); + ESP += 12; + } else { + cpu_state.pc = readmeml(ss, SP); + new_cs = readmemw(ss, ((SP + 4) & 0xffff)); + cpu_state.flags = (readmemw(ss, (SP + 8) & 0xffff) & 0xffd5) | 2; + cpu_state.eflags = readmemw(ss, (SP + 10) & 0xffff); + SP += 12; + } + op_loadcs(new_cs); + cycles -= timing_iret_rm; + } + flags_extract(); + nmi_enable = 1; + rf_flag_no_clear = 1; + CPU_BLOCK_END(); + + PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 2, 0, 0, 1); + PREFETCH_FLUSH(); + return cpu_state.abrt; +} diff --git a/src/cpu/x86seg.c b/src/cpu/x86seg.c index feaad5913..245f3fa65 100644 --- a/src/cpu/x86seg.c +++ b/src/cpu/x86seg.c @@ -2286,8 +2286,10 @@ taskswitch286(uint16_t seg, uint16_t *segdat, int is32) op_loadseg(new_fs, &cpu_state.seg_fs); op_loadseg(new_gs, &cpu_state.seg_gs); + rf_flag_no_clear = 1; + if (t_bit) { - trap = 2; + trap |= 2; #ifdef USE_DYNAREC cpu_block_end = 1; #endif @@ -2467,6 +2469,7 @@ taskswitch286(uint16_t seg, uint16_t *segdat, int is32) tr.limit = limit; tr.access = segdat[2] >> 8; tr.ar_high = segdat[3] & 0xff; + dr[7] &= 0xFFFFFFAA; } void diff --git a/src/cpu/x86seg_common.c b/src/cpu/x86seg_common.c index 8926af0d7..12b698b1a 100644 --- a/src/cpu/x86seg_common.c +++ b/src/cpu/x86seg_common.c @@ -87,6 +87,12 @@ x86de(UNUSED(char *s), UNUSED(uint16_t error)) #endif } +void +x86gen(void) +{ + x86_int(1); +} + void x86gpf(UNUSED(char *s), uint16_t error) { diff --git a/src/cpu/x86seg_common.h b/src/cpu/x86seg_common.h index f4bffed40..9f9049322 100644 --- a/src/cpu/x86seg_common.h +++ b/src/cpu/x86seg_common.h @@ -41,6 +41,7 @@ extern int cgate32; extern int intgatesize; extern void x86seg_reset(void); +extern void x86gen(void); extern void x86de(char *s, uint16_t error); extern void x86gpf(char *s, uint16_t error); extern void x86gpf_expected(char *s, uint16_t error); diff --git a/src/include/86box/mem.h b/src/include/86box/mem.h index 69a2b5de8..db95d3f7b 100644 --- a/src/include/86box/mem.h +++ b/src/include/86box/mem.h @@ -439,6 +439,8 @@ extern void mem_reset_page_blocks(void); extern void flushmmucache(void); extern void flushmmucache_nopc(void); +extern void mem_debug_check_addr(uint32_t addr, int write); + extern void mem_a20_init(void); extern void mem_a20_recalc(void); diff --git a/src/io.c b/src/io.c index 0e68049c3..6e4f190e5 100644 --- a/src/io.c +++ b/src/io.c @@ -279,6 +279,60 @@ io_handler_interleaved(int set, uint16_t base, int size, io_handler_common(set, base, size, inb, inw, inl, outb, outw, outl, priv, 2); } +extern int trap; +/* Set trap for I/O address breakpoints. */ +void +io_debug_check_addr(uint16_t addr) +{ + int i = 0; + int set_trap = 0; + + /* Do nothing on 486+. */ + if (is486) + return; + + if (!(dr[7] & 0xFF)) + return; + + if (!(cr4 & 0x8)) + return; /* No I/O debug trap. */ + + for (i = 0; i < 4; i++) { + uint16_t dr_addr = dr[i] & 0xFFFF; + int breakpoint_enabled = !!(dr[7] & (0x3 << (2 * i))); + int len_type_pair = ((dr[7] >> 16) & (0xF << (4 * i))) >> (4 * i); + if (!breakpoint_enabled) + continue; + if ((len_type_pair & 3) != 2) + continue; + + switch ((len_type_pair >> 2) & 3) + { + case 0x00: + if (dr_addr == addr) { + set_trap = 1; + dr[6] |= (1 << i); + } + break; + case 0x01: + if ((dr_addr & ~1) == addr || ((dr_addr & ~1) + 1) == (addr + 1)) { + set_trap = 1; + dr[6] |= (1 << i); + } + break; + case 0x03: + dr_addr &= ~3; + if (addr >= dr_addr && addr < (dr_addr + 4)) { + set_trap = 1; + dr[6] |= (1 << i); + } + break; + } + } + if (set_trap) + trap |= 4; +} + uint8_t inb(uint16_t port) { @@ -290,6 +344,8 @@ inb(uint16_t port) int qfound = 0; #endif + io_debug_check_addr(port); + if ((pci_flags & FLAG_CONFIG_IO_ON) && (port >= pci_base) && (port < (pci_base + pci_size))) { ret = pci_read(port, NULL); found = 1; @@ -350,6 +406,8 @@ outb(uint16_t port, uint8_t val) int qfound = 0; #endif + io_debug_check_addr(port); + if ((pci_flags & FLAG_CONFIG_IO_ON) && (port >= pci_base) && (port < (pci_base + pci_size))) { pci_write(port, val, NULL); found = 1; @@ -402,6 +460,8 @@ inw(uint16_t port) #endif uint8_t ret8[2]; + io_debug_check_addr(port); + if ((pci_flags & FLAG_CONFIG_IO_ON) && (port >= pci_base) && (port < (pci_base + pci_size))) { ret = pci_readw(port, NULL); found = 2; @@ -474,6 +534,8 @@ outw(uint16_t port, uint16_t val) int qfound = 0; #endif + io_debug_check_addr(port); + if ((pci_flags & FLAG_CONFIG_IO_ON) && (port >= pci_base) && (port < (pci_base + pci_size))) { pci_writew(port, val, NULL); found = 2; @@ -542,6 +604,8 @@ inl(uint16_t port) int qfound = 0; #endif + io_debug_check_addr(port); + if ((pci_flags & FLAG_CONFIG_IO_ON) && (port >= pci_base) && (port < (pci_base + pci_size))) { ret = pci_readl(port, NULL); found = 4; @@ -646,6 +710,8 @@ outl(uint16_t port, uint32_t val) #endif int i = 0; + io_debug_check_addr(port); + if ((pci_flags & FLAG_CONFIG_IO_ON) && (port >= pci_base) && (port < (pci_base + pci_size))) { pci_writel(port, val, NULL); found = 4; diff --git a/src/mem/mmu_2386.c b/src/mem/mmu_2386.c index 21c62b833..9ed26edfa 100644 --- a/src/mem/mmu_2386.c +++ b/src/mem/mmu_2386.c @@ -39,6 +39,54 @@ #include <86box/rom.h> #include <86box/gdbstub.h> +/* Set trap for data address breakpoints. */ +void +mem_debug_check_addr(uint32_t addr, int write) +{ + int i = 0; + int set_trap = 0; + + if (!(dr[7] & 0xFF)) + return; + + for (i = 0; i < 4; i++) { + uint32_t dr_addr = dr[i]; + int breakpoint_enabled = !!(dr[7] & (0x3 << (2 * i))); + int len_type_pair = ((dr[7] >> 16) & (0xF << (4 * i))) >> (4 * i); + if (!breakpoint_enabled) + continue; + if (!write && (len_type_pair & 3) != 3) + continue; + if ((len_type_pair & 3) != 1) + continue; + + switch ((len_type_pair >> 2) & 3) + { + case 0x00: + if (dr_addr == addr) { + set_trap = 1; + dr[6] |= (1 << i); + } + break; + case 0x01: + if ((dr_addr & ~1) == addr || ((dr_addr & ~1) + 1) == (addr + 1)) { + set_trap = 1; + dr[6] |= (1 << i); + } + break; + case 0x03: + dr_addr &= ~3; + if (addr >= dr_addr && addr < (dr_addr + 4)) { + set_trap = 1; + dr[6] |= (1 << i); + } + break; + } + } + if (set_trap) + trap |= 4; +} + uint8_t mem_readb_map(uint32_t addr) { @@ -243,6 +291,7 @@ readmembl_2386(uint32_t addr) GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_READ, 1); + mem_debug_check_addr(addr, 0); addr64 = (uint64_t) addr; mem_logical_addr = addr; @@ -270,6 +319,7 @@ writemembl_2386(uint32_t addr, uint8_t val) mem_mapping_t *map; uint64_t a; + mem_debug_check_addr(addr, 1); GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_WRITE, 1); addr64 = (uint64_t) addr; @@ -347,6 +397,8 @@ readmemwl_2386(uint32_t addr) addr64a[0] = addr; addr64a[1] = addr + 1; + mem_debug_check_addr(addr, 0); + mem_debug_check_addr(addr + 1, 0); GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 2); mem_logical_addr = addr; @@ -402,6 +454,8 @@ writememwl_2386(uint32_t addr, uint16_t val) addr64a[0] = addr; addr64a[1] = addr + 1; + mem_debug_check_addr(addr, 1); + mem_debug_check_addr(addr + 1, 1); GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 2); mem_logical_addr = addr; @@ -555,8 +609,10 @@ readmemll_2386(uint32_t addr) int i; uint64_t a = 0x0000000000000000ULL; - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { addr64a[i] = (uint64_t) (addr + i); + mem_debug_check_addr(addr + i, 0); + } GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 4); mem_logical_addr = addr; @@ -626,8 +682,10 @@ writememll_2386(uint32_t addr, uint32_t val) int i; uint64_t a = 0x0000000000000000ULL; - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { addr64a[i] = (uint64_t) (addr + i); + mem_debug_check_addr(addr + i, 1); + } GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 4); mem_logical_addr = addr; @@ -807,8 +865,10 @@ readmemql_2386(uint32_t addr) int i; uint64_t a = 0x0000000000000000ULL; - for (i = 0; i < 8; i++) + for (i = 0; i < 8; i++) { addr64a[i] = (uint64_t) (addr + i); + mem_debug_check_addr(addr + i, 0); + } GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 8); mem_logical_addr = addr; @@ -870,8 +930,10 @@ writememql_2386(uint32_t addr, uint64_t val) int i; uint64_t a = 0x0000000000000000ULL; - for (i = 0; i < 8; i++) + for (i = 0; i < 8; i++) { addr64a[i] = (uint64_t) (addr + i); + mem_debug_check_addr(addr + i, 1); + } GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 8); mem_logical_addr = addr; @@ -957,32 +1019,35 @@ do_mmutranslate_2386(uint32_t addr, uint32_t *a64, int num, int write) uint32_t last_addr = addr + (num - 1); uint64_t a = 0x0000000000000000ULL; + mem_debug_check_addr(addr, write); + for (i = 0; i < num; i++) a64[i] = (uint64_t) addr; - for (i = 0; i < num; i++) { - if (cr0 >> 31) { - /* If we have encountered at least one page fault, mark all subsequent addresses as - having page faulted, prevents false negatives in readmem*l_no_mmut. */ - if ((i > 0) && cpu_state.abrt && !high_page) - a64[i] = a64[i - 1]; - /* If we are on the same page, there is no need to translate again, as we can just - reuse the previous result. */ - else if (i == 0) { - a = mmutranslatereal_2386(addr, write); - a64[i] = (uint32_t) a; - } else if (!(addr & 0xfff)) { - a = mmutranslatereal_2386(last_addr, write); - a64[i] = (uint32_t) a; + if (!(cr0 >> 31)) + return; - if (!cpu_state.abrt) { - a = (a & 0xfffffffffffff000ULL) | ((uint64_t) (addr & 0xfff)); - a64[i] = (uint32_t) a; - } - } else { + for (i = 0; i < num; i++) { + /* If we have encountered at least one page fault, mark all subsequent addresses as + having page faulted, prevents false negatives in readmem*l_no_mmut. */ + if ((i > 0) && cpu_state.abrt && !high_page) + a64[i] = a64[i - 1]; + /* If we are on the same page, there is no need to translate again, as we can just + reuse the previous result. */ + else if (i == 0) { + a = mmutranslatereal_2386(addr, write); + a64[i] = (uint32_t) a; + } else if (!(addr & 0xfff)) { + a = mmutranslatereal_2386(last_addr, write); + a64[i] = (uint32_t) a; + + if (!cpu_state.abrt) { a = (a & 0xfffffffffffff000ULL) | ((uint64_t) (addr & 0xfff)); a64[i] = (uint32_t) a; } + } else { + a = (a & 0xfffffffffffff000ULL) | ((uint64_t) (addr & 0xfff)); + a64[i] = (uint32_t) a; } addr++;