This commit is contained in:
RichardG867
2021-04-03 00:17:09 -03:00
8 changed files with 181 additions and 183 deletions

View File

@@ -33,6 +33,7 @@
#include <86box/port_92.h>
#include <86box/chipset.h>
#ifdef ENABLE_ALI1217_LOG
int ali1217_do_log = ENABLE_ALI1217_LOG;
static void
@@ -57,18 +58,12 @@ typedef struct
int cfg_locked;
} ali1217_t;
static void ali1217_shadow_recalc(ali1217_t *dev)
static void ali1217_shadow_recalc(int reg_15, ali1217_t *dev)
{
for (uint8_t i = 0; i < 4; i++)
{
mem_set_mem_state_both(0xc0000 + (i << 15), 0x8000, ((dev->regs[0x14] & (1 << (i * 2))) ? MEM_READ_INTERNAL : MEM_READ_EXTANY) | ((dev->regs[0x14] & (1 << ((i * 2) + 1))) ? MEM_WRITE_INTERNAL : MEM_WRITE_EXTANY));
mem_set_mem_state_both(0xe0000 + (i << 15), 0x8000, ((dev->regs[0x15] & (1 << (i * 2))) ? MEM_READ_INTERNAL : MEM_READ_EXTANY) | ((dev->regs[0x15] & (1 << ((i * 2) + 1))) ? MEM_WRITE_INTERNAL : MEM_WRITE_EXTANY));
}
mem_set_mem_state_both((reg_15 ? 0xe0000 : 0xc0000) + (i << 15), 0x8000, ((dev->regs[0x14 + reg_15] & (1 << (i * 2))) ? MEM_READ_INTERNAL : MEM_READ_EXTANY) | ((dev->regs[0x14 + reg_15] & (1 << ((i * 2) + 1))) ? MEM_WRITE_INTERNAL : MEM_WRITE_EXTANY));
shadowbios = !!(dev->regs[0x15] & 5);
shadowbios_write = !!(dev->regs[0x15] & 0x0a);
flushmmucache();
flushmmucache_nopc();
}
static void
@@ -84,8 +79,7 @@ ali1217_write(uint16_t addr, uint8_t val, void *priv)
case 0x23:
if (dev->index != 0x13)
ali1217_log("ALi M1217: dev->regs[%02x] = %02x\n", dev->index, val);
if (dev->index == 0x13)
else
dev->cfg_locked = !(val == 0xc5);
if (!dev->cfg_locked)
@@ -93,7 +87,7 @@ ali1217_write(uint16_t addr, uint8_t val, void *priv)
dev->regs[dev->index] = val;
if ((dev->index == 0x14) || (dev->index == 0x15))
ali1217_shadow_recalc(dev);
ali1217_shadow_recalc(dev->index & 1, dev);
}
break;
}
@@ -104,7 +98,7 @@ ali1217_read(uint16_t addr, void *priv)
{
ali1217_t *dev = (ali1217_t *)priv;
return !(addr == 0x22) ? dev->regs[dev->index] : dev->index;
return (addr == 0x23) ? dev->regs[dev->index] : 0xff;
}
static void
@@ -134,7 +128,6 @@ ali1217_init(const device_t *info)
*/
io_sethandler(0x0022, 0x0002, ali1217_read, NULL, NULL, ali1217_write, NULL, NULL, dev);
ali1217_shadow_recalc(dev);
return dev;
}

View File

@@ -141,6 +141,8 @@ void hb4_shadow(int cur_addr, hb4_t *dev)
mem_set_mem_state_both(0xc8000 + ((i - 2) << 14), 0x4000, (dev->pci_conf[0x54] & (1 << i)) ? (CAN_READ | CAN_WRITE) : DISABLE);
mem_set_mem_state_both(0xe0000, 0x20000, CAN_READ | CAN_WRITE);
flushmmucache_nopc();
}
static void

View File

@@ -970,7 +970,7 @@ const cpu_family_t cpu_families[] = {
}, {
.package = CPU_PKG_EBGA368,
.manufacturer = "VIA",
.name = "Eden(Model 7)",
.name = "Eden Model 7",
.internal_name = "c3_eden",
.cpus = (const CPU[]) {
{"100", CPU_EDEN, fpus_internal, 100000000, 1.5, 2050, 0x673, 0x673, 0, CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER, 9, 9, 4, 4, 12}, /* out of spec */

View File

@@ -117,6 +117,6 @@ void sb_dsp_poll(sb_dsp_t *dsp, int16_t *l, int16_t *r);
void sb_dsp_set_stereo(sb_dsp_t *dsp, int stereo);
void sb_dsp_update(sb_dsp_t *dsp);
void sb_update_irq(sb_dsp_t *dsp);
void sb_update_mask(sb_dsp_t *dsp, int irqm8, int irqm16, int irqm401);
#endif /* SOUND_SND_SB_DSP_H */

View File

@@ -793,6 +793,7 @@ writemembl(uint32_t addr, uint8_t val)
{
uint64_t addr64 = (uint64_t) addr;
mem_mapping_t *map;
mem_logical_addr = addr;
if (page_lookup[addr>>12] && page_lookup[addr>>12]->write_b) {
@@ -842,6 +843,7 @@ void
writemembl_no_mmut(uint32_t addr, uint64_t addr64, uint8_t val)
{
mem_mapping_t *map;
mem_logical_addr = addr;
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_b) {
@@ -869,8 +871,6 @@ readmemwl(uint32_t addr)
uint64_t addr64[2];
mem_mapping_t *map;
int i;
uint16_t ret = 0x0000;
uint32_t prev_page = 0xffffffff;
addr64[0] = (uint64_t) addr;
addr64[1] = (uint64_t) (addr + 1);
@@ -883,25 +883,15 @@ readmemwl(uint32_t addr)
if ((addr & 0xfff) > 0xffe) {
if (cr0 >> 31) {
for (i = 0; i < 2; i++) {
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
if ((i > 0) && (((addr + i) & ~0xfff) == prev_page))
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
else
addr64[i] = mmutranslate_read(addr + i);
prev_page = ((addr + i) & ~0xfff);
addr64[i] = mmutranslate_read(addr + i);
if (addr64[i] > 0xffffffffULL)
return 0xffff;
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
for (i = 0; i < 2; i++)
ret |= (readmembl_no_mmut(addr + i, addr64[i]) << (i << 3));
return ret;
return readmembl_no_mmut(addr, addr64[0]) |
(((uint16_t) readmembl_no_mmut(addr + 1, addr64[1])) << 8);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
return *(uint16_t *)(readlookup2[addr >> 12] + addr);
}
@@ -935,7 +925,6 @@ writememwl(uint32_t addr, uint16_t val)
uint64_t addr64[2];
mem_mapping_t *map;
int i;
uint32_t prev_page = 0xffffffff;
addr64[0] = (uint64_t) addr;
addr64[1] = (uint64_t) (addr + 1);
@@ -951,26 +940,18 @@ writememwl(uint32_t addr, uint16_t val)
/* Do not translate a page that has a valid lookup, as that is by definition valid
and the whole purpose of the lookup is to avoid repeat identical translations. */
if (!page_lookup[(addr + i) >> 12] || !page_lookup[(addr + i) >> 12]->write_b) {
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
if ((i > 0) && (((addr + i) & ~0xfff) == prev_page))
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
else
addr64[i] = mmutranslate_write(addr + i);
prev_page = ((addr + i) & ~0xfff);
addr64[i] = mmutranslate_write(addr + i);
if (addr64[i] > 0xffffffffULL)
return;
} else
prev_page = 0xffffffff;
}
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
for (i = 0; i < 2; i++)
writemembl_no_mmut(addr + i, addr64[i], val >> (i << 3));
writemembl_no_mmut(addr, addr64[0], val);
writemembl_no_mmut(addr + 1, addr64[1], val >> 8);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
*(uint16_t *)(writelookup2[addr >> 12] + addr) = val;
@@ -1013,7 +994,6 @@ readmemwl_no_mmut(uint32_t addr, uint64_t *addr64)
{
mem_mapping_t *map;
int i;
uint16_t ret = 0x0000;
mem_logical_addr = addr;
@@ -1021,14 +1001,15 @@ readmemwl_no_mmut(uint32_t addr, uint64_t *addr64)
if (!cpu_cyrix_alignment || (addr & 7) == 7)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffe) {
for (i = 0; i < 2; i++) {
if ((cr0 >> 31) && (addr64[i] > 0xffffffffULL))
return 0xffff;
ret |= (readmembl_no_mmut(addr + i, addr64[i]) << (i << 3));
if (cr0 >> 31) {
for (i = 0; i < 2; i++) {
if (addr64[i] > 0xffffffffULL)
return 0xffff;
}
}
return ret;
return readmembl_no_mmut(addr, addr64[0]) |
(((uint16_t) readmembl_no_mmut(addr + 1, addr64[1])) << 8);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
return *(uint16_t *)(readlookup2[addr >> 12] + addr);
}
@@ -1068,13 +1049,15 @@ writememwl_no_mmut(uint32_t addr, uint64_t *addr64, uint16_t val)
if (!cpu_cyrix_alignment || (addr & 7) == 7)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffe) {
for (i = 0; i < 2; i++) {
if ((cr0 >> 31) && (addr64[i] > 0xffffffffULL))
return;
writemembl_no_mmut(addr + i, addr64[i], val >> (i << 3));
if (cr0 >> 31) {
for (i = 0; i < 2; i++) {
if (addr64[i] > 0xffffffffULL)
return;
}
}
writemembl_no_mmut(addr, addr64[0], val);
writemembl_no_mmut(addr + 1, addr64[1], val >> 8);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
*(uint16_t *)(writelookup2[addr >> 12] + addr) = val;
@@ -1116,9 +1099,7 @@ readmemll(uint32_t addr)
{
uint64_t addr64[4];
mem_mapping_t *map;
int i;
uint32_t ret = 0x00000000;
uint32_t prev_page = 0xffffffff;
int i, wrap_i = 1;
for (i = 0; i < 4; i++)
addr64[i] = (uint64_t) (addr + i);
@@ -1130,26 +1111,31 @@ readmemll(uint32_t addr)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
if (cr0 >> 31) {
for (i = 0; i < 4; i++) {
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
if ((i > 0) && (((addr + i) & ~0xfff) == prev_page))
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
else
addr64[i] = mmutranslate_read(addr + i);
wrap_i = 4 - (addr & 0x3);
prev_page = ((addr + i) & ~0xfff);
addr64[0] = mmutranslate_read(addr);
if (addr64[i] > 0xffffffffULL)
return 0xffff;
if (addr64[0] > 0xffffffffULL)
return 0xffffffff;
for (i = 1; i < wrap_i; i++)
addr64[i] = (addr64[0] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64[wrap_i] = mmutranslate_read(addr + wrap_i);
if (addr64[wrap_i] > 0xffffffffULL)
return 0xffffffff;
if (wrap_i != 3) {
for (i = (wrap_i) + 1; i <= 3; i++)
addr64[i] = (addr64[wrap_i] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
for (i = 0; i < 4; i += 2)
ret |= (readmemwl_no_mmut(addr + i, &(addr64[i])) << (i << 3));
return ret;
return readmemwl_no_mmut(addr, addr64) |
(((uint32_t) readmemwl_no_mmut(addr + 2, &(addr64[2]))) << 16);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
return *(uint32_t *)(readlookup2[addr >> 12] + addr);
}
@@ -1187,8 +1173,7 @@ writememll(uint32_t addr, uint32_t val)
{
uint64_t addr64[4];
mem_mapping_t *map;
int i;
uint32_t prev_page = 0xffffffff;
int i, wrap_i = 1;
for (i = 0; i < 4; i++)
addr64[i] = (uint64_t) (addr + i);
@@ -1200,30 +1185,37 @@ writememll(uint32_t addr, uint32_t val)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
if (cr0 >> 31) {
for (i = 0; i < 4; i++) {
/* Do not translate a page that has a valid lookup, as that is by definition valid
and the whole purpose of the lookup is to avoid repeat identical translations. */
if (!page_lookup[(addr + i) >> 12] || !page_lookup[(addr + i) >> 12]->write_b) {
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
if ((i > 0) && (((addr + i) & ~0xfff) == prev_page))
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
else
addr64[i] = mmutranslate_write(addr + i);
wrap_i = 4 - (addr & 0x3);
prev_page = ((addr + i) & ~0xfff);
if (!page_lookup[(addr + 0) >> 12] || !page_lookup[(addr + 0) >> 12]->write_b)
addr64[0] = mmutranslate_write(addr);
else
addr64[0] = 0xffffffff;
if (addr64[i] > 0xffffffffULL)
return;
} else
prev_page = 0xffffffff;
if (addr64[0] > 0xffffffffULL)
return;
for (i = 1; i < wrap_i; i++)
addr64[i] = (addr64[0] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
if (!page_lookup[(addr + wrap_i) >> 12] || !page_lookup[(addr + wrap_i) >> 12]->write_b)
addr64[wrap_i] = mmutranslate_write(addr + wrap_i);
else
addr64[wrap_i] = 0xffffffff;
if (addr64[wrap_i] > 0xffffffffULL)
return;
if (wrap_i != 3) {
for (i = (wrap_i) + 1; i <= 3; i++)
addr64[i] = (addr64[wrap_i] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
for (i = 0; i < 4; i += 2)
writememwl_no_mmut(addr + i, &(addr64[i]), val >> (i << 3));
writememwl_no_mmut(addr, &(addr64[0]), val);
writememwl_no_mmut(addr + 2, &(addr64[2]), val >> 16);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
*(uint32_t *)(writelookup2[addr >> 12] + addr) = val;
@@ -1272,7 +1264,6 @@ readmemll_no_mmut(uint32_t addr, uint64_t *addr64)
{
mem_mapping_t *map;
int i;
uint32_t ret = 0x00000000;
mem_logical_addr = addr;
@@ -1280,14 +1271,15 @@ readmemll_no_mmut(uint32_t addr, uint64_t *addr64)
if (!cpu_cyrix_alignment || (addr & 7) > 4)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
for (i = 0; i < 4; i += 2) {
if ((cr0 >> 31) && (addr64[i] > 0xffffffffULL))
return 0xffffffff;
ret |= (readmemwl_no_mmut(addr + i, &(addr64[i])) << (i << 3));
if (cr0 >> 31) {
for (i = 0; i < 4; i += 2) {
if (addr64[i] > 0xffffffffULL)
return 0xffffffff;
}
}
return ret;
return readmemwl_no_mmut(addr, addr64) |
(((uint32_t) readmemwl_no_mmut(addr + 2, &(addr64[2]))) << 16);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
return *(uint32_t *)(readlookup2[addr >> 12] + addr);
}
@@ -1332,13 +1324,15 @@ writememll_no_mmut(uint32_t addr, uint64_t *addr64, uint32_t val)
if (!cpu_cyrix_alignment || (addr & 7) > 4)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
for (i = 0; i < 4; i += 2) {
if ((cr0 >> 31) && (addr64[i] > 0xffffffffULL))
return;
writememwl_no_mmut(addr + i, &(addr64[i]), val >> (i << 3));
if (cr0 >> 31) {
for (i = 0; i < 4; i += 2) {
if (addr64[i] > 0xffffffffULL)
return;
}
}
writememwl_no_mmut(addr, &(addr64[0]), val);
writememwl_no_mmut(addr + 2, &(addr64[2]), val >> 16);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
*(uint32_t *)(writelookup2[addr >> 12] + addr) = val;
@@ -1385,9 +1379,7 @@ readmemql(uint32_t addr)
{
uint64_t addr64[8];
mem_mapping_t *map;
int i;
uint64_t ret = 0x0000000000000000ULL;
uint32_t prev_page = 0xffffffff;
int i, wrap_i = 1;
for (i = 0; i < 8; i++)
addr64[i] = (uint64_t) (addr + i);
@@ -1398,26 +1390,30 @@ readmemql(uint32_t addr)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xff8) {
if (cr0 >> 31) {
for (i = 0; i < 8; i++) {
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
if ((i > 0) && (((addr + i) & ~0xfff) == prev_page))
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
else
addr64[i] = mmutranslate_read(addr + i);
wrap_i = 8 - (addr & 0x7);
prev_page = ((addr + i) & ~0xfff);
addr64[0] = mmutranslate_read(addr);
if (addr64[i] > 0xffffffffULL)
return 0xffff;
if (addr64[0] > 0xffffffffULL)
return 0xffffffffffffffffULL;
for (i = 1; i < wrap_i; i++)
addr64[i] = (addr64[0] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64[wrap_i] = mmutranslate_read(addr + wrap_i);
if (addr64[wrap_i] > 0xffffffffULL)
return 0xffffffffffffffffULL;
if (wrap_i != 7) {
for (i = (wrap_i) + 1; i <= 7; i++)
addr64[i] = (addr64[wrap_i] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
for (i = 0; i < 8; i += 4)
ret |= (readmemll_no_mmut(addr + i, &(addr64[i])) << (i << 3));
return ret;
return readmemll_no_mmut(addr, addr64) |
(((uint64_t) readmemll_no_mmut(addr + 4, &(addr64[4]))) << 32);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
return *(uint64_t *)(readlookup2[addr >> 12] + addr);
}
@@ -1444,8 +1440,7 @@ writememql(uint32_t addr, uint64_t val)
{
uint64_t addr64[8];
mem_mapping_t *map;
int i;
uint32_t prev_page = 0xffffffff;
int i, wrap_i;
for (i = 0; i < 8; i++)
addr64[i] = (uint64_t) (addr + i);
@@ -1456,30 +1451,37 @@ writememql(uint32_t addr, uint64_t val)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xff8) {
if (cr0 >> 31) {
for (i = 0; i < 8; i++) {
/* Do not translate a page that has a valid lookup, as that is by definition valid
and the whole purpose of the lookup is to avoid repeat identical translations. */
if (!page_lookup[(addr + i) >> 12] || !page_lookup[(addr + i) >> 12]->write_b) {
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
if ((i > 0) && (((addr + i) & ~0xfff) == prev_page))
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
else
addr64[i] = mmutranslate_write(addr + i);
wrap_i = 8 - (addr & 0x7);
prev_page = ((addr + i) & ~0xfff);
if (!page_lookup[(addr + 0) >> 12] || !page_lookup[(addr + 0) >> 12]->write_b)
addr64[0] = mmutranslate_write(addr);
else
addr64[0] = 0xffffffff;
if (addr64[i] > 0xffffffffULL)
return;
} else
prev_page = 0xffffffff;
if (addr64[0] > 0xffffffffULL)
return;
for (i = 1; i < wrap_i; i++)
addr64[i] = (addr64[wrap_i] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
if (!page_lookup[(addr + wrap_i) >> 12] || !page_lookup[(addr + wrap_i) >> 12]->write_b)
addr64[wrap_i] = mmutranslate_write(addr + wrap_i);
else
addr64[wrap_i] = 0xffffffff;
if (addr64[wrap_i] > 0xffffffffULL)
return;
if (wrap_i != 7) {
for (i = (wrap_i) + 1; i <= 7; i++)
addr64[i] = (addr64[wrap_i] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
for (i = 0; i < 8; i += 4)
writememll_no_mmut(addr + i, &(addr64[i]), val >> (i << 3));
writememll_no_mmut(addr, &(addr64[0]), val);
writememll_no_mmut(addr + 4, &(addr64[4]), val >> 32);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
*(uint64_t *)(writelookup2[addr >> 12] + addr) = val;
@@ -1533,36 +1535,29 @@ writememql(uint32_t addr, uint64_t val)
void
do_mmutranslate(uint32_t addr, uint64_t *addr64, int num, int write)
{
int i, cond = 1;
uint32_t prev_page = 0xffffffff;
int i, wrap_i;
int cond = 1;
for (i = 0; i < num; i++) {
addr64[i] = (uint64_t) (addr + i);
if (cr0 >> 31) {
wrap_i = (num - (addr & (num - 1))) & (num - 1);
if (cr0 >> 31) {
/* Do not translate a page that has a valid lookup, as that is by definition valid
and the whole purpose of the lookup is to avoid repeat identical translations. */
if (write)
cond = (!page_lookup[(addr + i) >> 12] || !page_lookup[(addr + i) >> 12]->write_b);
for (i = 0; i < num; i++) {
if (write && ((i == 0) || (wrap_i && (i == wrap_i))))
cond = (!page_lookup[addr >> 12] || !page_lookup[addr >> 12]->write_b);
if (cond) {
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
if ((i > 0) && (((addr + i) & ~0xfff) == prev_page))
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
else
addr64[i] = mmutranslatereal(addr + i, write);
if (((i == 0) || (wrap_i && (i == wrap_i)))) {
addr64[i] = mmutranslatereal(addr, write);
prev_page = ((addr + i) & ~0xfff);
if (addr64[i] > 0xffffffffULL)
return;
} else
addr64[i] = (addr64[i - 1] & ~0xfffLL) | ((uint64_t) (addr & 0xfff));
}
if (addr64[i] == 0xffffffffffffffffULL)
return;
if (addr64[i] > 0xffffffffULL)
return;
if (cpu_state.abrt)
return;
} else
prev_page = 0xffffffff;
addr++;
}
}
}

View File

@@ -858,10 +858,7 @@ sb_ct1745_mixer_write(uint16_t addr, uint8_t val, void *p)
case 0x83:
/* Interrupt mask. */
sb->dsp.sb_irqm8 = !(val & 0x01);
sb->dsp.sb_irqm16 = !(val & 0x02);
sb->dsp.sb_irqm401 = !(val & 0x04);
sb_update_irq(&sb->dsp);
sb_update_mask(&sb->dsp, !(val & 0x01), !(val & 0x02), !(val & 0x04));
break;
case 0x84:

View File

@@ -178,17 +178,21 @@ recalc_sb16_filter(int c, int playback_freq)
void
sb_update_irq(sb_dsp_t *dsp)
sb_update_mask(sb_dsp_t *dsp, int irqm8, int irqm16, int irqm401)
{
int irq_pending;
int clear = 0;
irq_pending = (dsp->sb_irq8 && !dsp->sb_irqm8) ||
(dsp->sb_irq16 && !dsp->sb_irqm16) ||
(dsp->sb_irq401 && !dsp->sb_irqm401);
if (!dsp->sb_irqm8 && irqm8)
clear |= 1;
dsp->sb_irqm8 = irqm8;
if (!dsp->sb_irqm16 && irqm16)
clear |= 1;
dsp->sb_irqm16 = irqm16;
if (!dsp->sb_irqm401 && irqm401)
clear |= 1;
dsp->sb_irqm401 = irqm401;
if (irq_pending)
picint(1 << dsp->sb_irqnum);
else
if (clear)
picintc(1 << dsp->sb_irqnum);
}
@@ -196,20 +200,28 @@ sb_update_irq(sb_dsp_t *dsp)
void
sb_update_status(sb_dsp_t *dsp, int bit, int set)
{
int masked = 0;
switch (bit) {
case 0:
default:
dsp->sb_irq8 = set;
masked = dsp->sb_irqm8;
break;
case 1:
dsp->sb_irq16 = set;
masked = dsp->sb_irqm16;
break;
case 2:
dsp->sb_irq401 = set;
masked = dsp->sb_irqm401;
break;
}
sb_update_irq(dsp);
if (set && !masked)
picint(1 << dsp->sb_irqnum);
else if (!set)
picintc(1 << dsp->sb_irqnum);
}
@@ -258,8 +270,8 @@ sb_dsp_set_mpu(sb_dsp_t *dsp, mpu_t *mpu)
void
sb_dsp_reset(sb_dsp_t *dsp)
{
midi_clear_buffer();
midi_clear_buffer();
timer_disable(&dsp->output_timer);
timer_disable(&dsp->input_timer);
@@ -271,9 +283,8 @@ sb_dsp_reset(sb_dsp_t *dsp)
dsp->sb_irq8 = 0;
dsp->sb_irq16 = 0;
dsp->sb_irq401 = 0;
sb_update_irq(dsp);
dsp->sb_16_pause = 0;
dsp->sb_read_wp = dsp->sb_read_rp = 0;
dsp->sb_read_wp = dsp->sb_read_rp = 0;
dsp->sb_data_stat = -1;
dsp->sb_speaker = 0;
dsp->sb_pausetime = -1LL;