286/386 interpreter fixes - the correct opcode arrays are now used and fixed the debug registers.
This commit is contained in:
@@ -25,6 +25,7 @@
|
||||
#include <86box/fdd.h>
|
||||
#include <86box/fdc.h>
|
||||
#include <86box/machine.h>
|
||||
#include <86box/plat_fallthrough.h>
|
||||
#include <86box/gdbstub.h>
|
||||
#ifndef OPS_286_386
|
||||
# define OPS_286_386
|
||||
@@ -262,11 +263,10 @@ exec386_2386(int32_t cycs)
|
||||
CHECK_READ_CS(MIN(ol, 4));
|
||||
ins_fetch_fault = cpu_386_check_instruction_fault();
|
||||
|
||||
if (!cpu_state.abrt && ins_fetch_fault) {
|
||||
x86gen();
|
||||
/* Breakpoint fault has priority over other faults. */
|
||||
if (ins_fetch_fault) {
|
||||
ins_fetch_fault = 0;
|
||||
/* No instructions executed at this point. */
|
||||
goto block_ended;
|
||||
cpu_state.abrt = 1;
|
||||
}
|
||||
|
||||
if (!cpu_state.abrt) {
|
||||
@@ -279,7 +279,8 @@ exec386_2386(int32_t cycs)
|
||||
trap |= !!(cpu_state.flags & T_FLAG);
|
||||
|
||||
cpu_state.pc++;
|
||||
x86_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat);
|
||||
cpu_state.eflags &= ~(RF_FLAG);
|
||||
x86_2386_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat);
|
||||
if (x86_was_reset)
|
||||
break;
|
||||
}
|
||||
@@ -319,8 +320,7 @@ block_ended:
|
||||
#endif
|
||||
}
|
||||
}
|
||||
if (!x86_was_reset && ins_fetch_fault)
|
||||
x86gen(); /* This is supposed to be the first one serviced by the processor according to the manual. */
|
||||
according to the manual. */
|
||||
} else if (trap) {
|
||||
flags_rebuild();
|
||||
if (trap & 2) dr[6] |= 0x8000;
|
||||
|
||||
@@ -1412,7 +1412,7 @@ x86_int(int num)
|
||||
cpu_state.pc = cpu_state.oldpc;
|
||||
|
||||
if (msw & 1)
|
||||
is486 ? pmodeint(num, 0) : pmodeint_2386(num, 0);
|
||||
cpu_use_exec ? pmodeint(num, 0) : pmodeint_2386(num, 0);
|
||||
else {
|
||||
addr = (num << 2) + idt.base;
|
||||
|
||||
@@ -1445,7 +1445,7 @@ x86_int(int num)
|
||||
oxpc = cpu_state.pc;
|
||||
#endif
|
||||
cpu_state.pc = readmemw(0, addr);
|
||||
is486 ? loadcs(readmemw(0, addr + 2)) : loadcs_2386(readmemw(0, addr + 2));
|
||||
cpu_use_exec ? loadcs(readmemw(0, addr + 2)) : loadcs_2386(readmemw(0, addr + 2));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1462,7 +1462,7 @@ x86_int_sw(int num)
|
||||
cycles -= timing_int;
|
||||
|
||||
if (msw & 1)
|
||||
is486 ? pmodeint(num, 1) : pmodeint_2386(num, 1);
|
||||
cpu_use_exec ? pmodeint(num, 1) : pmodeint_2386(num, 1);
|
||||
else {
|
||||
addr = (num << 2) + idt.base;
|
||||
|
||||
@@ -1487,7 +1487,7 @@ x86_int_sw(int num)
|
||||
oxpc = cpu_state.pc;
|
||||
#endif
|
||||
cpu_state.pc = readmemw(0, addr);
|
||||
is486 ? loadcs(readmemw(0, addr + 2)) : loadcs_2386(readmemw(0, addr + 2));
|
||||
cpu_use_exec ? loadcs(readmemw(0, addr + 2)) : loadcs_2386(readmemw(0, addr + 2));
|
||||
cycles -= timing_int_rm;
|
||||
}
|
||||
}
|
||||
@@ -1529,7 +1529,7 @@ x86_int_sw_rm(int num)
|
||||
cpu_state.eflags &= ~VIF_FLAG;
|
||||
cpu_state.flags &= ~T_FLAG;
|
||||
cpu_state.pc = new_pc;
|
||||
is486 ? loadcs(new_cs) : loadcs_2386(new_cs);
|
||||
cpu_use_exec ? loadcs(new_cs) : loadcs_2386(new_cs);
|
||||
#ifndef USE_NEW_DYNAREC
|
||||
oxpc = cpu_state.pc;
|
||||
#endif
|
||||
|
||||
@@ -225,19 +225,37 @@ int checkio(uint32_t port, int mask);
|
||||
static __inline uint8_t
|
||||
fastreadb(uint32_t a)
|
||||
{
|
||||
return readmembl_2386(a);
|
||||
uint8_t ret;
|
||||
read_type = 1;
|
||||
ret = readmembl_2386(a);
|
||||
read_type = 4;
|
||||
if (cpu_state.abrt)
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __inline uint16_t
|
||||
fastreadw(uint32_t a)
|
||||
{
|
||||
return readmemwl_2386(a);
|
||||
uint16_t ret;
|
||||
read_type = 1;
|
||||
ret = readmemwl_2386(a);
|
||||
read_type = 4;
|
||||
if (cpu_state.abrt)
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __inline uint32_t
|
||||
fastreadl(uint32_t a)
|
||||
{
|
||||
return readmemll_2386(a);
|
||||
uint32_t ret;
|
||||
read_type = 1;
|
||||
ret = readmemll_2386(a);
|
||||
read_type = 4;
|
||||
if (cpu_state.abrt)
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static __inline uint8_t
|
||||
@@ -342,31 +360,41 @@ extern int opcode_length[256];
|
||||
static __inline uint16_t
|
||||
fastreadw_fetch(uint32_t a)
|
||||
{
|
||||
uint16_t val;
|
||||
uint16_t ret;
|
||||
|
||||
if ((a & 0xFFF) > 0xFFE) {
|
||||
val = fastreadb(a);
|
||||
if (opcode_length[val & 0xff] > 1)
|
||||
val |= ((uint16_t) fastreadb(a + 1) << 8);
|
||||
return val;
|
||||
ret = fastreadb(a);
|
||||
if (!cpu_state.abrt && (opcode_length[ret & 0xff] > 1))
|
||||
ret |= ((uint16_t) fastreadb(a + 1) << 8);
|
||||
} else if (cpu_state.abrt)
|
||||
ret = 0;
|
||||
else {
|
||||
read_type = 1;
|
||||
ret = readmemwl_2386(a);
|
||||
read_type = 4;
|
||||
}
|
||||
|
||||
return readmemwl_2386(a);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __inline uint32_t
|
||||
fastreadl_fetch(uint32_t a)
|
||||
{
|
||||
uint32_t val;
|
||||
uint32_t ret;
|
||||
|
||||
if (cpu_16bitbus || ((a & 0xFFF) > 0xFFC)) {
|
||||
val = fastreadw_fetch(a);
|
||||
if (opcode_length[val & 0xff] > 2)
|
||||
val |= ((uint32_t) fastreadw(a + 2) << 16);
|
||||
return val;
|
||||
ret = fastreadw_fetch(a);
|
||||
if (!cpu_state.abrt && (opcode_length[ret & 0xff] > 2))
|
||||
ret |= ((uint32_t) fastreadw(a + 2) << 16);
|
||||
} else if (cpu_state.abrt)
|
||||
ret = 0;
|
||||
else {
|
||||
read_type = 1;
|
||||
ret = readmemll_2386(a);
|
||||
read_type = 4;
|
||||
}
|
||||
|
||||
return readmemll_2386(a);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static __inline uint16_t
|
||||
|
||||
@@ -186,7 +186,11 @@ extern void x386_dynarec_log(const char *fmt, ...);
|
||||
#else
|
||||
# include "x86_ops_flag.h"
|
||||
#endif
|
||||
#include "x86_ops_fpu.h"
|
||||
#ifdef OPS_286_386
|
||||
# include "x86_ops_fpu_2386.h"
|
||||
#else
|
||||
# include "x86_ops_fpu.h"
|
||||
#endif
|
||||
#include "x86_ops_inc_dec.h"
|
||||
#include "x86_ops_int.h"
|
||||
#include "x86_ops_io.h"
|
||||
@@ -216,7 +220,11 @@ extern void x386_dynarec_log(const char *fmt, ...);
|
||||
#endif
|
||||
#include "x86_ops_mul.h"
|
||||
#include "x86_ops_pmode.h"
|
||||
#include "x86_ops_prefix.h"
|
||||
#ifdef OPS_286_386
|
||||
# include "x86_ops_prefix_2386.h"
|
||||
#else
|
||||
# include "x86_ops_prefix.h"
|
||||
#endif
|
||||
#ifdef IS_DYNAREC
|
||||
# include "x86_ops_rep_dyn.h"
|
||||
#else
|
||||
|
||||
@@ -174,6 +174,7 @@ typedef struct {
|
||||
#define VIP_FLAG 0x0010 /* in EFLAGS */
|
||||
#define VID_FLAG 0x0020 /* in EFLAGS */
|
||||
|
||||
#define EM_FLAG 0x00004 /* in CR0 */
|
||||
#define WP_FLAG 0x10000 /* in CR0 */
|
||||
|
||||
#define CR4_VME (1 << 0) /* Virtual 8086 Mode Extensions */
|
||||
|
||||
@@ -275,6 +275,12 @@ reset_common(int hard)
|
||||
cr4 = 0;
|
||||
cpu_state.eflags = 0;
|
||||
cgate32 = 0;
|
||||
if (is386 && !is486) {
|
||||
for (uint8_t i = 0; i < 4; i++)
|
||||
dr[i] = 0x00000000;
|
||||
dr[6] = 0xffff1ff0;
|
||||
dr[7] = 0x00000400;
|
||||
}
|
||||
if (is286) {
|
||||
if (is486)
|
||||
loadcs(0xF000);
|
||||
|
||||
113
src/cpu/x86_ops_fpu_2386.h
Normal file
113
src/cpu/x86_ops_fpu_2386.h
Normal file
@@ -0,0 +1,113 @@
|
||||
/* Copyright holders: Sarah Walker
|
||||
see COPYING for more details
|
||||
*/
|
||||
static int
|
||||
opESCAPE_d8_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_d8_a16[(fetchdat >> 3) & 0x1f](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_d8_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_d8_a32[(fetchdat >> 3) & 0x1f](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opESCAPE_d9_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_d9_a16[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_d9_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_d9_a32[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opESCAPE_da_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_da_a16[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_da_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_da_a32[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opESCAPE_db_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_db_a16[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_db_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_db_a32[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opESCAPE_dc_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_dc_a16[(fetchdat >> 3) & 0x1f](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_dc_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_dc_a32[(fetchdat >> 3) & 0x1f](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opESCAPE_dd_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_dd_a16[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_dd_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_dd_a32[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opESCAPE_de_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_de_a16[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_de_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_de_a32[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opESCAPE_df_a16(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_df_a16[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
static int
|
||||
opESCAPE_df_a32(uint32_t fetchdat)
|
||||
{
|
||||
return x86_2386_opcodes_df_a32[fetchdat & 0xff](fetchdat);
|
||||
}
|
||||
|
||||
static int
|
||||
opWAIT(uint32_t fetchdat)
|
||||
{
|
||||
if ((cr0 & 0xa) == 0xa) {
|
||||
x86_int(7);
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (!cpu_use_dynarec && fpu_softfloat) {
|
||||
#endif
|
||||
if (fpu_softfloat) {
|
||||
if (fpu_state.swd & FPU_SW_Summary) {
|
||||
if (cr0 & 0x20) {
|
||||
x86_int(16);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
CLOCK_CYCLES(4);
|
||||
return 0;
|
||||
}
|
||||
@@ -726,6 +726,22 @@ opHLT(uint32_t fetchdat)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef OPS_286_386
|
||||
static int
|
||||
opLOCK(uint32_t fetchdat)
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 0;
|
||||
cpu_state.pc++;
|
||||
|
||||
ILLEGAL_ON((fetchdat & 0xff) == 0x90);
|
||||
|
||||
CLOCK_CYCLES(4);
|
||||
PREFETCH_PREFIX();
|
||||
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
#else
|
||||
static int
|
||||
opLOCK(uint32_t fetchdat)
|
||||
{
|
||||
@@ -740,6 +756,7 @@ opLOCK(uint32_t fetchdat)
|
||||
PREFETCH_PREFIX();
|
||||
return x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
opBOUND_w_a16(uint32_t fetchdat)
|
||||
|
||||
@@ -82,18 +82,41 @@ opMOV_r_CRx_a32(uint32_t fetchdat)
|
||||
static int
|
||||
opMOV_r_DRx_a16(uint32_t fetchdat)
|
||||
{
|
||||
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
|
||||
if ((CPL > 0) && (cr0 & 1)) {
|
||||
x86gpf(NULL, 0);
|
||||
return 1;
|
||||
}
|
||||
fetch_ea_16(fetchdat);
|
||||
if (cpu_reg == 4 || cpu_reg == 5) {
|
||||
if (cr4 & 0x8)
|
||||
x86illegal();
|
||||
else
|
||||
cpu_reg += 2;
|
||||
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
|
||||
trap |= 1;
|
||||
return 1;
|
||||
}
|
||||
fetch_ea_16(fetchdat);
|
||||
switch (cpu_reg) {
|
||||
case 0 ... 3:
|
||||
cpu_state.regs[cpu_rm].l = dr[cpu_reg];
|
||||
break;
|
||||
case 4:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 6:
|
||||
cpu_state.regs[cpu_rm].l = dr[6];
|
||||
break;
|
||||
case 5:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 7:
|
||||
cpu_state.regs[cpu_rm].l = dr[7];
|
||||
break;
|
||||
default:
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
cpu_state.regs[cpu_rm].l = dr[cpu_reg] | (cpu_reg == 6 ? 0xffff0ff0u : 0);
|
||||
CLOCK_CYCLES(6);
|
||||
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
|
||||
return 0;
|
||||
@@ -101,18 +124,41 @@ opMOV_r_DRx_a16(uint32_t fetchdat)
|
||||
static int
|
||||
opMOV_r_DRx_a32(uint32_t fetchdat)
|
||||
{
|
||||
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
|
||||
if ((CPL > 0) && (cr0 & 1)) {
|
||||
x86gpf(NULL, 0);
|
||||
return 1;
|
||||
}
|
||||
fetch_ea_32(fetchdat);
|
||||
if (cpu_reg == 4 || cpu_reg == 5) {
|
||||
if (cr4 & 0x8)
|
||||
x86illegal();
|
||||
else
|
||||
cpu_reg += 2;
|
||||
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
|
||||
trap |= 1;
|
||||
return 1;
|
||||
}
|
||||
fetch_ea_32(fetchdat);
|
||||
switch (cpu_reg) {
|
||||
case 0 ... 3:
|
||||
cpu_state.regs[cpu_rm].l = dr[cpu_reg];
|
||||
break;
|
||||
case 4:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 6:
|
||||
cpu_state.regs[cpu_rm].l = dr[6];
|
||||
break;
|
||||
case 5:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 7:
|
||||
cpu_state.regs[cpu_rm].l = dr[7];
|
||||
break;
|
||||
default:
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
cpu_state.regs[cpu_rm].l = dr[cpu_reg] | (cpu_reg == 6 ? 0xffff0ff0u : 0);
|
||||
CLOCK_CYCLES(6);
|
||||
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
|
||||
return 0;
|
||||
@@ -236,24 +282,41 @@ opMOV_CRx_r_a32(uint32_t fetchdat)
|
||||
static int
|
||||
opMOV_DRx_r_a16(uint32_t fetchdat)
|
||||
{
|
||||
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
|
||||
if ((CPL > 0) && (cr0 & 1)) {
|
||||
x86gpf(NULL, 0);
|
||||
return 1;
|
||||
}
|
||||
if ((dr[6] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
|
||||
dr[7] |= 0x2000;
|
||||
dr[6] &= ~0x2000;
|
||||
x86gen();
|
||||
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
|
||||
trap |= 1;
|
||||
return 1;
|
||||
}
|
||||
fetch_ea_16(fetchdat);
|
||||
if (cpu_reg == 4 || cpu_reg == 5) {
|
||||
if (cr4 & 0x8)
|
||||
switch (cpu_reg) {
|
||||
case 0 ... 3:
|
||||
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
|
||||
break;
|
||||
case 4:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 6:
|
||||
dr[6] = (dr[6] & 0xffff0ff0) | (cpu_state.regs[cpu_rm].l & 0x0000f00f);
|
||||
break;
|
||||
case 5:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 7:
|
||||
dr[7] = cpu_state.regs[cpu_rm].l | 0x00000400;
|
||||
break;
|
||||
default:
|
||||
x86illegal();
|
||||
else
|
||||
cpu_reg += 2;
|
||||
return 1;
|
||||
}
|
||||
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
|
||||
CLOCK_CYCLES(6);
|
||||
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
|
||||
CPU_BLOCK_END();
|
||||
@@ -262,18 +325,41 @@ opMOV_DRx_r_a16(uint32_t fetchdat)
|
||||
static int
|
||||
opMOV_DRx_r_a32(uint32_t fetchdat)
|
||||
{
|
||||
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
|
||||
if ((CPL > 0) && (cr0 & 1)) {
|
||||
x86gpf(NULL, 0);
|
||||
return 1;
|
||||
}
|
||||
fetch_ea_16(fetchdat);
|
||||
if (cpu_reg == 4 || cpu_reg == 5) {
|
||||
if (cr4 & 0x8)
|
||||
x86illegal();
|
||||
else
|
||||
cpu_reg += 2;
|
||||
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
|
||||
trap |= 1;
|
||||
return 1;
|
||||
}
|
||||
fetch_ea_32(fetchdat);
|
||||
switch (cpu_reg) {
|
||||
case 0 ... 3:
|
||||
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
|
||||
break;
|
||||
case 4:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 6:
|
||||
dr[6] = (dr[6] & 0xffff0ff0) | (cpu_state.regs[cpu_rm].l & 0x0000f00f);
|
||||
break;
|
||||
case 5:
|
||||
if (cr4 & 0x8) {
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
fallthrough;
|
||||
case 7:
|
||||
dr[7] = cpu_state.regs[cpu_rm].l | 0x00000400;
|
||||
break;
|
||||
default:
|
||||
x86illegal();
|
||||
return 1;
|
||||
}
|
||||
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
|
||||
CLOCK_CYCLES(6);
|
||||
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
|
||||
CPU_BLOCK_END();
|
||||
|
||||
@@ -195,7 +195,11 @@ opMOV_seg_w_a16(uint32_t fetchdat)
|
||||
cpu_state.pc++;
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
#ifdef OPS_286_386
|
||||
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#else
|
||||
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#endif
|
||||
return 1;
|
||||
case 0x20: /*FS*/
|
||||
op_loadseg(new_seg, &cpu_state.seg_fs);
|
||||
@@ -240,7 +244,11 @@ opMOV_seg_w_a32(uint32_t fetchdat)
|
||||
cpu_state.pc++;
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
#ifdef OPS_286_386
|
||||
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#else
|
||||
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#endif
|
||||
return 1;
|
||||
case 0x20: /*FS*/
|
||||
op_loadseg(new_seg, &cpu_state.seg_fs);
|
||||
|
||||
179
src/cpu/x86_ops_prefix_2386.h
Normal file
179
src/cpu/x86_ops_prefix_2386.h
Normal file
@@ -0,0 +1,179 @@
|
||||
#define op_seg(name, seg, opcode_table, normal_opcode_table) \
|
||||
static int op##name##_w_a16(uint32_t fetchdat) \
|
||||
{ \
|
||||
fetchdat = fastreadl(cs + cpu_state.pc); \
|
||||
if (cpu_state.abrt) \
|
||||
return 1; \
|
||||
cpu_state.pc++; \
|
||||
\
|
||||
cpu_state.ea_seg = &seg; \
|
||||
cpu_state.ssegs = 1; \
|
||||
CLOCK_CYCLES(4); \
|
||||
PREFETCH_PREFIX(); \
|
||||
\
|
||||
if (opcode_table[fetchdat & 0xff]) \
|
||||
return opcode_table[fetchdat & 0xff](fetchdat >> 8); \
|
||||
return normal_opcode_table[fetchdat & 0xff](fetchdat >> 8); \
|
||||
} \
|
||||
\
|
||||
static int op##name##_l_a16(uint32_t fetchdat) \
|
||||
{ \
|
||||
fetchdat = fastreadl(cs + cpu_state.pc); \
|
||||
if (cpu_state.abrt) \
|
||||
return 1; \
|
||||
cpu_state.pc++; \
|
||||
\
|
||||
cpu_state.ea_seg = &seg; \
|
||||
cpu_state.ssegs = 1; \
|
||||
CLOCK_CYCLES(4); \
|
||||
PREFETCH_PREFIX(); \
|
||||
\
|
||||
if (opcode_table[(fetchdat & 0xff) | 0x100]) \
|
||||
return opcode_table[(fetchdat & 0xff) | 0x100](fetchdat >> 8); \
|
||||
return normal_opcode_table[(fetchdat & 0xff) | 0x100](fetchdat >> 8); \
|
||||
} \
|
||||
\
|
||||
static int op##name##_w_a32(uint32_t fetchdat) \
|
||||
{ \
|
||||
fetchdat = fastreadl(cs + cpu_state.pc); \
|
||||
if (cpu_state.abrt) \
|
||||
return 1; \
|
||||
cpu_state.pc++; \
|
||||
\
|
||||
cpu_state.ea_seg = &seg; \
|
||||
cpu_state.ssegs = 1; \
|
||||
CLOCK_CYCLES(4); \
|
||||
PREFETCH_PREFIX(); \
|
||||
\
|
||||
if (opcode_table[(fetchdat & 0xff) | 0x200]) \
|
||||
return opcode_table[(fetchdat & 0xff) | 0x200](fetchdat >> 8); \
|
||||
return normal_opcode_table[(fetchdat & 0xff) | 0x200](fetchdat >> 8); \
|
||||
} \
|
||||
\
|
||||
static int op##name##_l_a32(uint32_t fetchdat) \
|
||||
{ \
|
||||
fetchdat = fastreadl(cs + cpu_state.pc); \
|
||||
if (cpu_state.abrt) \
|
||||
return 1; \
|
||||
cpu_state.pc++; \
|
||||
\
|
||||
cpu_state.ea_seg = &seg; \
|
||||
cpu_state.ssegs = 1; \
|
||||
CLOCK_CYCLES(4); \
|
||||
PREFETCH_PREFIX(); \
|
||||
\
|
||||
if (opcode_table[(fetchdat & 0xff) | 0x300]) \
|
||||
return opcode_table[(fetchdat & 0xff) | 0x300](fetchdat >> 8); \
|
||||
return normal_opcode_table[(fetchdat & 0xff) | 0x300](fetchdat >> 8); \
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
op_seg(CS, cpu_state.seg_cs, x86_2386_opcodes, x86_2386_opcodes)
|
||||
op_seg(DS, cpu_state.seg_ds, x86_2386_opcodes, x86_2386_opcodes)
|
||||
op_seg(ES, cpu_state.seg_es, x86_2386_opcodes, x86_2386_opcodes)
|
||||
op_seg(FS, cpu_state.seg_fs, x86_2386_opcodes, x86_2386_opcodes)
|
||||
op_seg(GS, cpu_state.seg_gs, x86_2386_opcodes, x86_2386_opcodes)
|
||||
op_seg(SS, cpu_state.seg_ss, x86_2386_opcodes, x86_2386_opcodes)
|
||||
|
||||
op_seg(CS_REPE, cpu_state.seg_cs, x86_2386_opcodes_REPE, x86_2386_opcodes)
|
||||
op_seg(DS_REPE, cpu_state.seg_ds, x86_2386_opcodes_REPE, x86_2386_opcodes)
|
||||
op_seg(ES_REPE, cpu_state.seg_es, x86_2386_opcodes_REPE, x86_2386_opcodes)
|
||||
op_seg(FS_REPE, cpu_state.seg_fs, x86_2386_opcodes_REPE, x86_2386_opcodes)
|
||||
op_seg(GS_REPE, cpu_state.seg_gs, x86_2386_opcodes_REPE, x86_2386_opcodes)
|
||||
op_seg(SS_REPE, cpu_state.seg_ss, x86_2386_opcodes_REPE, x86_2386_opcodes)
|
||||
|
||||
op_seg(CS_REPNE, cpu_state.seg_cs, x86_2386_opcodes_REPNE, x86_2386_opcodes)
|
||||
op_seg(DS_REPNE, cpu_state.seg_ds, x86_2386_opcodes_REPNE, x86_2386_opcodes)
|
||||
op_seg(ES_REPNE, cpu_state.seg_es, x86_2386_opcodes_REPNE, x86_2386_opcodes)
|
||||
op_seg(FS_REPNE, cpu_state.seg_fs, x86_2386_opcodes_REPNE, x86_2386_opcodes)
|
||||
op_seg(GS_REPNE, cpu_state.seg_gs, x86_2386_opcodes_REPNE, x86_2386_opcodes)
|
||||
op_seg(SS_REPNE, cpu_state.seg_ss, x86_2386_opcodes_REPNE, x86_2386_opcodes)
|
||||
// clang-format on
|
||||
|
||||
static int
|
||||
op_66(uint32_t fetchdat) /*Data size select*/
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
|
||||
cpu_state.op32 = ((use32 & 0x100) ^ 0x100) | (cpu_state.op32 & 0x200);
|
||||
CLOCK_CYCLES(2);
|
||||
PREFETCH_PREFIX();
|
||||
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
static int
|
||||
op_67(uint32_t fetchdat) /*Address size select*/
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
|
||||
cpu_state.op32 = ((use32 & 0x200) ^ 0x200) | (cpu_state.op32 & 0x100);
|
||||
CLOCK_CYCLES(2);
|
||||
PREFETCH_PREFIX();
|
||||
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
|
||||
static int
|
||||
op_66_REPE(uint32_t fetchdat) /*Data size select*/
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
|
||||
cpu_state.op32 = ((use32 & 0x100) ^ 0x100) | (cpu_state.op32 & 0x200);
|
||||
CLOCK_CYCLES(2);
|
||||
PREFETCH_PREFIX();
|
||||
if (x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32])
|
||||
return x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
static int
|
||||
op_67_REPE(uint32_t fetchdat) /*Address size select*/
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
|
||||
cpu_state.op32 = ((use32 & 0x200) ^ 0x200) | (cpu_state.op32 & 0x100);
|
||||
CLOCK_CYCLES(2);
|
||||
PREFETCH_PREFIX();
|
||||
if (x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32])
|
||||
return x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
static int
|
||||
op_66_REPNE(uint32_t fetchdat) /*Data size select*/
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
|
||||
cpu_state.op32 = ((use32 & 0x100) ^ 0x100) | (cpu_state.op32 & 0x200);
|
||||
CLOCK_CYCLES(2);
|
||||
PREFETCH_PREFIX();
|
||||
if (x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32])
|
||||
return x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
static int
|
||||
op_67_REPNE(uint32_t fetchdat) /*Address size select*/
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
|
||||
cpu_state.op32 = ((use32 & 0x200) ^ 0x200) | (cpu_state.op32 & 0x100);
|
||||
CLOCK_CYCLES(2);
|
||||
PREFETCH_PREFIX();
|
||||
if (x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32])
|
||||
return x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
}
|
||||
@@ -836,7 +836,7 @@ REP_OPS_CMPS_SCAS(a32_E, ECX, ESI, EDI, 1)
|
||||
static int
|
||||
opREPNE(uint32_t fetchdat)
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
@@ -850,7 +850,7 @@ opREPNE(uint32_t fetchdat)
|
||||
static int
|
||||
opREPE(uint32_t fetchdat)
|
||||
{
|
||||
fetchdat = fastreadl(cs + cpu_state.pc);
|
||||
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
cpu_state.pc++;
|
||||
|
||||
@@ -667,7 +667,11 @@ opPOP_SS_w(uint32_t fetchdat)
|
||||
cpu_state.pc++;
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
#ifdef OPS_286_386
|
||||
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#else
|
||||
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -695,7 +699,11 @@ opPOP_SS_l(uint32_t fetchdat)
|
||||
cpu_state.pc++;
|
||||
if (cpu_state.abrt)
|
||||
return 1;
|
||||
#ifdef OPS_286_386
|
||||
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#else
|
||||
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -303,6 +303,7 @@ extern int mmu_perm;
|
||||
extern uint8_t high_page; /* if a high (> 4 gb) page was detected */
|
||||
|
||||
extern uint32_t pages_sz; /* #pages in table */
|
||||
extern int read_type;
|
||||
|
||||
extern int mem_a20_state;
|
||||
extern int mem_a20_alt;
|
||||
|
||||
@@ -39,52 +39,43 @@
|
||||
#include <86box/rom.h>
|
||||
#include <86box/gdbstub.h>
|
||||
|
||||
/* Set trap for data address breakpoints. */
|
||||
void
|
||||
mem_debug_check_addr(uint32_t addr, int write)
|
||||
{
|
||||
int i = 0;
|
||||
int set_trap = 0;
|
||||
/* As below, 1 = exec, 4 = read. */
|
||||
int read_type = 4;
|
||||
|
||||
if (!(dr[7] & 0xFF))
|
||||
/* Set trap for data address breakpoints - 1 = exec, 2 = write, 4 = read. */
|
||||
void
|
||||
mem_debug_check_addr(uint32_t addr, int flags)
|
||||
{
|
||||
uint32_t bp_addr;
|
||||
uint32_t bp_mask;
|
||||
uint32_t len_type_pair;
|
||||
int bp_enabled;
|
||||
uint8_t match_flags[4] = { 0, 2, 0, 6 };
|
||||
char *bp_types[5] = { "N/A 0", "EXEC ", "WRITE", "N/A 3", "READ " };
|
||||
|
||||
if (cpu_state.abrt || ((flags == 1) && (cpu_state.eflags & RF_FLAG)))
|
||||
return;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
uint32_t dr_addr = dr[i];
|
||||
int breakpoint_enabled = !!(dr[7] & (0x3 << (2 * i)));
|
||||
int len_type_pair = ((dr[7] >> 16) & (0xF << (4 * i))) >> (4 * i);
|
||||
if (!breakpoint_enabled)
|
||||
continue;
|
||||
if (!write && (len_type_pair & 3) != 3)
|
||||
continue;
|
||||
if ((len_type_pair & 3) != 1)
|
||||
continue;
|
||||
|
||||
switch ((len_type_pair >> 2) & 3)
|
||||
{
|
||||
case 0x00:
|
||||
if (dr_addr == addr) {
|
||||
set_trap = 1;
|
||||
dr[6] |= (1 << i);
|
||||
}
|
||||
break;
|
||||
case 0x01:
|
||||
if ((dr_addr & ~1) == addr || ((dr_addr & ~1) + 1) == (addr + 1)) {
|
||||
set_trap = 1;
|
||||
dr[6] |= (1 << i);
|
||||
}
|
||||
break;
|
||||
case 0x03:
|
||||
dr_addr &= ~3;
|
||||
if (addr >= dr_addr && addr < (dr_addr + 4)) {
|
||||
set_trap = 1;
|
||||
dr[6] |= (1 << i);
|
||||
}
|
||||
break;
|
||||
if (dr[7] & 0x000000ff) for (uint8_t i = 0; i < 4; i++) {
|
||||
bp_addr = dr[i];
|
||||
bp_enabled = (dr[7] >> (i << 1)) & 0x03;
|
||||
len_type_pair = (dr[7] >> (16 + (i << 2))) & 0x0f;
|
||||
bp_mask = ~((len_type_pair >> 2) & 0x03);
|
||||
|
||||
if ((flags & match_flags[len_type_pair & 0x03]) && ((bp_addr & bp_mask) == (addr & bp_mask))) {
|
||||
/*
|
||||
From the Intel i386 documemntation:
|
||||
|
||||
(Note that the processor sets Bn regardless of whether Gn or
|
||||
Ln is set. If more than one breakpoint condition occurs at one time and if
|
||||
the breakpoint trap occurs due to an enabled condition other than n, Bn may
|
||||
be set, even though neither Gn nor Ln is set.)
|
||||
*/
|
||||
dr[6] |= (1 << i);
|
||||
if (bp_enabled)
|
||||
trap |= (read_type == 1) ? 8 : 4;
|
||||
}
|
||||
}
|
||||
if (set_trap)
|
||||
trap |= 4;
|
||||
}
|
||||
|
||||
uint8_t
|
||||
@@ -291,7 +282,7 @@ readmembl_2386(uint32_t addr)
|
||||
|
||||
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_READ, 1);
|
||||
|
||||
mem_debug_check_addr(addr, 0);
|
||||
mem_debug_check_addr(addr, read_type);
|
||||
addr64 = (uint64_t) addr;
|
||||
mem_logical_addr = addr;
|
||||
|
||||
@@ -319,7 +310,7 @@ writemembl_2386(uint32_t addr, uint8_t val)
|
||||
mem_mapping_t *map;
|
||||
uint64_t a;
|
||||
|
||||
mem_debug_check_addr(addr, 1);
|
||||
mem_debug_check_addr(addr, 2);
|
||||
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_WRITE, 1);
|
||||
|
||||
addr64 = (uint64_t) addr;
|
||||
@@ -397,8 +388,8 @@ readmemwl_2386(uint32_t addr)
|
||||
|
||||
addr64a[0] = addr;
|
||||
addr64a[1] = addr + 1;
|
||||
mem_debug_check_addr(addr, 0);
|
||||
mem_debug_check_addr(addr + 1, 0);
|
||||
mem_debug_check_addr(addr, read_type);
|
||||
mem_debug_check_addr(addr + 1, read_type);
|
||||
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 2);
|
||||
|
||||
mem_logical_addr = addr;
|
||||
@@ -419,7 +410,8 @@ readmemwl_2386(uint32_t addr)
|
||||
}
|
||||
}
|
||||
|
||||
return readmembl_no_mmut(addr, addr64a[0]) | (((uint16_t) readmembl_no_mmut(addr + 1, addr64a[1])) << 8);
|
||||
return readmembl_no_mmut_2386(addr, addr64a[0]) |
|
||||
(((uint16_t) readmembl_no_mmut_2386(addr + 1, addr64a[1])) << 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,8 +446,8 @@ writememwl_2386(uint32_t addr, uint16_t val)
|
||||
|
||||
addr64a[0] = addr;
|
||||
addr64a[1] = addr + 1;
|
||||
mem_debug_check_addr(addr, 1);
|
||||
mem_debug_check_addr(addr + 1, 1);
|
||||
mem_debug_check_addr(addr, 2);
|
||||
mem_debug_check_addr(addr + 1, 2);
|
||||
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 2);
|
||||
|
||||
mem_logical_addr = addr;
|
||||
@@ -482,8 +474,8 @@ writememwl_2386(uint32_t addr, uint16_t val)
|
||||
|
||||
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
|
||||
their result as a parameter to be used if needed. */
|
||||
writemembl_no_mmut(addr, addr64a[0], val);
|
||||
writemembl_no_mmut(addr + 1, addr64a[1], val >> 8);
|
||||
writemembl_no_mmut_2386(addr, addr64a[0], val);
|
||||
writemembl_no_mmut_2386(addr + 1, addr64a[1], val >> 8);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -531,7 +523,8 @@ readmemwl_no_mmut_2386(uint32_t addr, uint32_t *a64)
|
||||
return 0xffff;
|
||||
}
|
||||
|
||||
return readmembl_no_mmut(addr, a64[0]) | (((uint16_t) readmembl_no_mmut(addr + 1, a64[1])) << 8);
|
||||
return readmembl_no_mmut_2386(addr, a64[0]) |
|
||||
(((uint16_t) readmembl_no_mmut_2386(addr + 1, a64[1])) << 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -574,8 +567,8 @@ writememwl_no_mmut_2386(uint32_t addr, uint32_t *a64, uint16_t val)
|
||||
return;
|
||||
}
|
||||
|
||||
writemembl_no_mmut(addr, a64[0], val);
|
||||
writemembl_no_mmut(addr + 1, a64[1], val >> 8);
|
||||
writemembl_no_mmut_2386(addr, a64[0], val);
|
||||
writemembl_no_mmut_2386(addr + 1, a64[1], val >> 8);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -611,7 +604,7 @@ readmemll_2386(uint32_t addr)
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
addr64a[i] = (uint64_t) (addr + i);
|
||||
mem_debug_check_addr(addr + i, 0);
|
||||
mem_debug_check_addr(addr + i, read_type);
|
||||
}
|
||||
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 4);
|
||||
|
||||
@@ -619,8 +612,8 @@ readmemll_2386(uint32_t addr)
|
||||
|
||||
high_page = 0;
|
||||
|
||||
if (addr & 3) {
|
||||
if (!cpu_cyrix_alignment || (addr & 7) > 4)
|
||||
if (cpu_16bitbus || (addr & 3)) {
|
||||
if ((addr & 3) && (!cpu_cyrix_alignment || (addr & 7) > 4))
|
||||
cycles -= timing_misaligned;
|
||||
if ((addr & 0xfff) > 0xffc) {
|
||||
if (cr0 >> 31) {
|
||||
@@ -647,7 +640,8 @@ readmemll_2386(uint32_t addr)
|
||||
|
||||
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
|
||||
their result as a parameter to be used if needed. */
|
||||
return readmemwl_no_mmut(addr, addr64a) | (((uint32_t) readmemwl_no_mmut(addr + 2, &(addr64a[2]))) << 16);
|
||||
return readmemwl_no_mmut_2386(addr, addr64a) |
|
||||
(((uint32_t) readmemwl_no_mmut(addr + 2, &(addr64a[2]))) << 16);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -684,7 +678,7 @@ writememll_2386(uint32_t addr, uint32_t val)
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
addr64a[i] = (uint64_t) (addr + i);
|
||||
mem_debug_check_addr(addr + i, 1);
|
||||
mem_debug_check_addr(addr + i, 2);
|
||||
}
|
||||
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 4);
|
||||
|
||||
@@ -692,8 +686,8 @@ writememll_2386(uint32_t addr, uint32_t val)
|
||||
|
||||
high_page = 0;
|
||||
|
||||
if (addr & 3) {
|
||||
if (!cpu_cyrix_alignment || (addr & 7) > 4)
|
||||
if (cpu_16bitbus || (addr & 3)) {
|
||||
if ((addr & 3) && (!cpu_cyrix_alignment || (addr & 7) > 4))
|
||||
cycles -= timing_misaligned;
|
||||
if ((addr & 0xfff) > 0xffc) {
|
||||
if (cr0 >> 31) {
|
||||
@@ -724,8 +718,8 @@ writememll_2386(uint32_t addr, uint32_t val)
|
||||
|
||||
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
|
||||
their result as a parameter to be used if needed. */
|
||||
writememwl_no_mmut(addr, &(addr64a[0]), val);
|
||||
writememwl_no_mmut(addr + 2, &(addr64a[2]), val >> 16);
|
||||
writememwl_no_mmut_2386(addr, &(addr64a[0]), val);
|
||||
writememwl_no_mmut_2386(addr + 2, &(addr64a[2]), val >> 16);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -770,8 +764,8 @@ readmemll_no_mmut_2386(uint32_t addr, uint32_t *a64)
|
||||
|
||||
mem_logical_addr = addr;
|
||||
|
||||
if (addr & 3) {
|
||||
if (!cpu_cyrix_alignment || (addr & 7) > 4)
|
||||
if (cpu_16bitbus || (addr & 3)) {
|
||||
if ((addr & 3) && (!cpu_cyrix_alignment || (addr & 7) > 4))
|
||||
cycles -= timing_misaligned;
|
||||
if ((addr & 0xfff) > 0xffc) {
|
||||
if (cr0 >> 31) {
|
||||
@@ -779,7 +773,8 @@ readmemll_no_mmut_2386(uint32_t addr, uint32_t *a64)
|
||||
return 0xffffffff;
|
||||
}
|
||||
|
||||
return readmemwl_no_mmut(addr, a64) | ((uint32_t) (readmemwl_no_mmut(addr + 2, &(a64[2]))) << 16);
|
||||
return readmemwl_no_mmut_2386(addr, a64) |
|
||||
((uint32_t) (readmemwl_no_mmut_2386(addr + 2, &(a64[2]))) << 16);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -815,8 +810,8 @@ writememll_no_mmut_2386(uint32_t addr, uint32_t *a64, uint32_t val)
|
||||
|
||||
mem_logical_addr = addr;
|
||||
|
||||
if (addr & 3) {
|
||||
if (!cpu_cyrix_alignment || (addr & 7) > 4)
|
||||
if (cpu_16bitbus || (addr & 3)) {
|
||||
if ((addr & 3) && (!cpu_cyrix_alignment || (addr & 7) > 4))
|
||||
cycles -= timing_misaligned;
|
||||
if ((addr & 0xfff) > 0xffc) {
|
||||
if (cr0 >> 31) {
|
||||
@@ -824,8 +819,8 @@ writememll_no_mmut_2386(uint32_t addr, uint32_t *a64, uint32_t val)
|
||||
return;
|
||||
}
|
||||
|
||||
writememwl_no_mmut(addr, &(a64[0]), val);
|
||||
writememwl_no_mmut(addr + 2, &(a64[2]), val >> 16);
|
||||
writememwl_no_mmut_2386(addr, &(a64[0]), val);
|
||||
writememwl_no_mmut_2386(addr + 2, &(a64[2]), val >> 16);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -867,7 +862,7 @@ readmemql_2386(uint32_t addr)
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
addr64a[i] = (uint64_t) (addr + i);
|
||||
mem_debug_check_addr(addr + i, 0);
|
||||
mem_debug_check_addr(addr + i, read_type);
|
||||
}
|
||||
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 8);
|
||||
|
||||
@@ -902,7 +897,8 @@ readmemql_2386(uint32_t addr)
|
||||
|
||||
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
|
||||
their result as a parameter to be used if needed. */
|
||||
return readmemll_no_mmut(addr, addr64a) | (((uint64_t) readmemll_no_mmut(addr + 4, &(addr64a[4]))) << 32);
|
||||
return readmemll_no_mmut_2386(addr, addr64a) |
|
||||
(((uint64_t) readmemll_no_mmut_2386(addr + 4, &(addr64a[4]))) << 32);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -932,7 +928,7 @@ writememql_2386(uint32_t addr, uint64_t val)
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
addr64a[i] = (uint64_t) (addr + i);
|
||||
mem_debug_check_addr(addr + i, 1);
|
||||
mem_debug_check_addr(addr + i, 2);
|
||||
}
|
||||
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 8);
|
||||
|
||||
@@ -971,8 +967,8 @@ writememql_2386(uint32_t addr, uint64_t val)
|
||||
|
||||
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
|
||||
their result as a parameter to be used if needed. */
|
||||
writememll_no_mmut(addr, addr64a, val);
|
||||
writememll_no_mmut(addr + 4, &(addr64a[4]), val >> 32);
|
||||
writememll_no_mmut_2386(addr, addr64a, val);
|
||||
writememll_no_mmut_2386(addr + 4, &(addr64a[4]), val >> 32);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1019,7 +1015,7 @@ do_mmutranslate_2386(uint32_t addr, uint32_t *a64, int num, int write)
|
||||
uint32_t last_addr = addr + (num - 1);
|
||||
uint64_t a = 0x0000000000000000ULL;
|
||||
|
||||
mem_debug_check_addr(addr, write);
|
||||
mem_debug_check_addr(addr, write ? 2 : read_type);
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
a64[i] = (uint64_t) addr;
|
||||
|
||||
@@ -296,6 +296,12 @@ rom_load_linear_inverted(const char *fn, uint32_t addr, int sz, int off, uint8_t
|
||||
fatal("rom_load_linear_inverted(): Error reading the upper half of the data\n");
|
||||
if (fread(ptr + addr, sz >> 1, 1, fp) > (sz >> 1))
|
||||
fatal("rom_load_linear_inverted(): Error reading the lower half of the data\n");
|
||||
if (sz == 0x40000) {
|
||||
if (fread(ptr + addr + 0x30000, 1, sz >> 1, fp) > (sz >> 1))
|
||||
fatal("rom_load_linear_inverted(): Error reading the upper half of the data\n");
|
||||
if (fread(ptr + addr + 0x20000, sz >> 1, 1, fp) > (sz >> 1))
|
||||
fatal("rom_load_linear_inverted(): Error reading the lower half of the data\n");
|
||||
}
|
||||
}
|
||||
|
||||
(void) fclose(fp);
|
||||
|
||||
Reference in New Issue
Block a user