Rewrote the disc sector poller again for simplified code and more accuracy;

Floppy formats are now accurately timed and also more accurately implemented;
Applied all mainline PCem commits.
This commit is contained in:
OBattler
2016-08-20 03:40:12 +02:00
parent 24a6ab13fb
commit a924f37f43
51 changed files with 2471 additions and 1302 deletions

View File

@@ -1,6 +1,3 @@
/* Copyright holders: Sarah Walker
see COPYING for more details
*/
#define ROP_LOGIC(name, op, writeback) \
static uint32_t rop ## name ## _b_rmw(uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc, codeblock_t *block) \
{ \
@@ -67,7 +64,7 @@
else \
{ \
x86seg *target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32); \
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc); \
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc); \
MEM_LOAD_ADDR_EA_B(target_seg); \
src_reg = 0; \
} \
@@ -93,7 +90,7 @@
else \
{ \
x86seg *target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32); \
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc); \
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc); \
MEM_LOAD_ADDR_EA_W(target_seg); \
src_reg = 0; \
} \
@@ -119,7 +116,7 @@
else \
{ \
x86seg *target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32); \
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc); \
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc); \
MEM_LOAD_ADDR_EA_L(target_seg); \
src_reg = 0; \
} \
@@ -150,7 +147,7 @@ static uint32_t ropTEST_b_rm(uint8_t opcode, uint32_t fetchdat, uint32_t op_32,
else
{
x86seg *target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32);
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc);
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc);
MEM_LOAD_ADDR_EA_B(target_seg);
src_reg = 0;
}
@@ -175,7 +172,7 @@ static uint32_t ropTEST_w_rm(uint8_t opcode, uint32_t fetchdat, uint32_t op_32,
else
{
x86seg *target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32);
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc);
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc);
MEM_LOAD_ADDR_EA_W(target_seg);
src_reg = 0;
}
@@ -200,7 +197,7 @@ static uint32_t ropTEST_l_rm(uint8_t opcode, uint32_t fetchdat, uint32_t op_32,
else
{
x86seg *target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32);
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc);
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc);
MEM_LOAD_ADDR_EA_L(target_seg);
src_reg = 0;
}
@@ -373,7 +370,7 @@ static uint32_t ropF6(uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_
{
target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32);
imm = fastreadb(cs + op_pc + 1);
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc);
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc);
MEM_LOAD_ADDR_EA_B(target_seg);
host_reg = 0;
}
@@ -424,7 +421,7 @@ static uint32_t ropF7_w(uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint3
{
target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32);
imm = fastreadw(cs + op_pc + 1);
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc);
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc);
MEM_LOAD_ADDR_EA_W(target_seg);
host_reg = 0;
}
@@ -475,7 +472,7 @@ static uint32_t ropF7_l(uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint3
{
target_seg = FETCH_EA(op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32);
imm = fastreadl(cs + op_pc + 1);
STORE_IMM_ADDR_L((uintptr_t)&oldpc, op_old_pc);
STORE_IMM_ADDR_L((uintptr_t)&cpu_state.oldpc, op_old_pc);
MEM_LOAD_ADDR_EA_L(target_seg);
host_reg = 0;
}