Files
86Box/src/mem/mem.c

2979 lines
89 KiB
C
Raw Normal View History

/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* Memory handling and MMU.
*
* Authors: Sarah Walker, <tommowalker@tommowalker.co.uk>
* Miran Grca, <mgrca8@gmail.com>
* Fred N. van Kempen, <decwiz@yahoo.com>
*
* Copyright 2008-2020 Sarah Walker.
* Copyright 2016-2020 Miran Grca.
* Copyright 2017-2020 Fred N. van Kempen.
*/
#include <inttypes.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <wchar.h>
#define HAVE_STDARG_H
#include <86box/86box.h>
#include <86box/version.h>
#include "cpu.h"
#include "x86_ops.h"
#include "x86.h"
#include <86box/machine.h>
#include <86box/m_xt_xi8088.h>
#include <86box/config.h>
#include <86box/io.h>
#include <86box/mem.h>
#include <86box/plat.h>
#include <86box/rom.h>
2022-03-16 00:33:01 -03:00
#include <86box/gdbstub.h>
#ifdef USE_DYNAREC
2022-09-18 17:18:07 -04:00
# include "codegen_public.h"
#else
2022-09-18 17:18:07 -04:00
# ifdef USE_NEW_DYNAREC
# define PAGE_MASK_SHIFT 6
# else
# define PAGE_MASK_INDEX_MASK 3
# define PAGE_MASK_INDEX_SHIFT 10
# define PAGE_MASK_SHIFT 4
# endif
# define PAGE_MASK_MASK 63
#endif
#if (!defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
2022-09-18 17:18:07 -04:00
# define BLOCK_PC_INVALID 0xffffffff
# define BLOCK_INVALID 0
#endif
2022-09-18 17:18:07 -04:00
mem_mapping_t ram_low_mapping, /* 0..640K mapping */
ram_mid_mapping, /* 640..1024K mapping */
ram_mid_mapping2, /* 640..1024K mapping, second part, for SiS 471 in relocate mode */
2022-09-18 17:18:07 -04:00
ram_remapped_mapping, /* 640..1024K mapping */
ram_remapped_mapping2,/* 640..1024K second mapping, for SiS 471 mode */
2022-09-18 17:18:07 -04:00
ram_high_mapping, /* 1024K+ mapping */
ram_2gb_mapping, /* 1024M+ mapping */
ram_split_mapping,
bios_mapping,
bios_high_mapping;
page_t *pages, /* RAM page table */
**page_lookup; /* pagetable lookup */
uint32_t pages_sz; /* #pages in table */
uint8_t *ram, *ram2; /* the virtual RAM */
uint8_t page_ff[4096];
uint32_t rammask;
uint32_t addr_space_size;
2022-09-18 17:18:07 -04:00
uint8_t *rom; /* the virtual ROM */
uint32_t biosmask, biosaddr;
uint32_t pccache;
uint8_t *pccache2;
int readlnext;
int readlookup[256];
uintptr_t *readlookup2;
uintptr_t old_rl2;
uint8_t uncached = 0;
int writelnext;
int writelookup[256];
uintptr_t *writelookup2;
uint32_t mem_logical_addr;
int shadowbios = 0,
shadowbios_write;
int readlnum = 0,
writelnum = 0;
int cachesize = 256;
uint32_t get_phys_virt,
get_phys_phys;
int mem_a20_key = 0,
mem_a20_alt = 0,
mem_a20_state = 0;
int mmuflush = 0;
int mmu_perm = 4;
#ifdef USE_NEW_DYNAREC
2022-09-18 17:18:07 -04:00
uint64_t *byte_dirty_mask;
uint64_t *byte_code_present_mask;
2022-09-18 17:18:07 -04:00
uint32_t purgable_page_list_head = 0;
int purgeable_page_count = 0;
#endif
2022-09-18 17:18:07 -04:00
uint8_t high_page = 0; /* if a high (> 4 gb) page was detected */
/* FIXME: re-do this with a 'mem_ops' struct. */
2022-09-18 17:18:07 -04:00
static uint8_t *page_lookupp; /* pagetable mmu_perm lookup */
static uint8_t *readlookupp;
static uint8_t *writelookupp;
static mem_mapping_t *base_mapping, *last_mapping;
static mem_mapping_t *read_mapping[MEM_MAPPINGS_NO];
static mem_mapping_t *write_mapping[MEM_MAPPINGS_NO];
static mem_mapping_t *read_mapping_bus[MEM_MAPPINGS_NO];
static mem_mapping_t *write_mapping_bus[MEM_MAPPINGS_NO];
static uint8_t *_mem_exec[MEM_MAPPINGS_NO];
static uint8_t ff_pccache[4] = { 0xff, 0xff, 0xff, 0xff };
static mem_state_t _mem_state[MEM_MAPPINGS_NO];
static uint32_t remap_start_addr, remap_start_addr2;
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
2022-09-18 17:18:07 -04:00
static size_t ram_size = 0, ram2_size = 0;
#else
2022-09-18 17:18:07 -04:00
static size_t ram_size = 0;
#endif
#ifdef ENABLE_MEM_LOG
int mem_do_log = ENABLE_MEM_LOG;
static void
mem_log(const char *fmt, ...)
{
va_list ap;
if (mem_do_log) {
2022-09-18 17:18:07 -04:00
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
2022-09-18 17:18:07 -04:00
# define mem_log(fmt, ...)
#endif
int
mem_addr_is_ram(uint32_t addr)
{
mem_mapping_t *mapping = read_mapping[addr >> MEM_GRANULARITY_BITS];
return (mapping == &ram_low_mapping) || (mapping == &ram_high_mapping) || (mapping == &ram_mid_mapping) ||
(mapping == &ram_mid_mapping2) || (mapping == &ram_remapped_mapping);
}
void
resetreadlookup(void)
{
int c;
/* Initialize the page lookup table. */
2022-09-18 17:18:07 -04:00
memset(page_lookup, 0x00, (1 << 20) * sizeof(page_t *));
/* Initialize the tables for lower (<= 1024K) RAM. */
for (c = 0; c < 256; c++) {
2022-09-18 17:18:07 -04:00
readlookup[c] = 0xffffffff;
writelookup[c] = 0xffffffff;
}
/* Initialize the tables for high (> 1024K) RAM. */
2022-09-18 17:18:07 -04:00
memset(readlookup2, 0xff, (1 << 20) * sizeof(uintptr_t));
memset(readlookupp, 0x04, (1 << 20) * sizeof(uint8_t));
2022-09-18 17:18:07 -04:00
memset(writelookup2, 0xff, (1 << 20) * sizeof(uintptr_t));
memset(writelookupp, 0x04, (1 << 20) * sizeof(uint8_t));
2022-09-18 17:18:07 -04:00
readlnext = 0;
writelnext = 0;
2022-09-18 17:18:07 -04:00
pccache = 0xffffffff;
high_page = 0;
}
void
flushmmucache(void)
{
int c;
for (c = 0; c < 256; c++) {
2022-09-18 17:18:07 -04:00
if (readlookup[c] != (int) 0xffffffff) {
readlookup2[readlookup[c]] = LOOKUP_INV;
readlookupp[readlookup[c]] = 4;
readlookup[c] = 0xffffffff;
}
if (writelookup[c] != (int) 0xffffffff) {
page_lookup[writelookup[c]] = NULL;
page_lookupp[writelookup[c]] = 4;
writelookup2[writelookup[c]] = LOOKUP_INV;
writelookupp[writelookup[c]] = 4;
writelookup[c] = 0xffffffff;
}
}
mmuflush++;
2022-09-18 17:18:07 -04:00
pccache = (uint32_t) 0xffffffff;
pccache2 = (uint8_t *) 0xffffffff;
#ifdef USE_DYNAREC
codegen_flush();
#endif
}
void
flushmmucache_nopc(void)
{
int c;
for (c = 0; c < 256; c++) {
2022-09-18 17:18:07 -04:00
if (readlookup[c] != (int) 0xffffffff) {
readlookup2[readlookup[c]] = LOOKUP_INV;
readlookupp[readlookup[c]] = 4;
readlookup[c] = 0xffffffff;
}
if (writelookup[c] != (int) 0xffffffff) {
page_lookup[writelookup[c]] = NULL;
page_lookupp[writelookup[c]] = 4;
writelookup2[writelookup[c]] = LOOKUP_INV;
writelookupp[writelookup[c]] = 4;
writelookup[c] = 0xffffffff;
}
}
}
void
mem_flush_write_page(uint32_t addr, uint32_t virt)
{
page_t *page_target = &pages[addr >> 12];
2022-09-18 17:18:07 -04:00
int c;
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
uint32_t a;
#endif
for (c = 0; c < 256; c++) {
2022-09-18 17:18:07 -04:00
if (writelookup[c] != (int) 0xffffffff) {
#if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
2022-09-18 17:18:07 -04:00
uintptr_t target = (uintptr_t) &ram[(uintptr_t) (addr & ~0xfff) - (virt & ~0xfff)];
#else
2022-09-18 17:18:07 -04:00
a = (uintptr_t) (addr & ~0xfff) - (virt & ~0xfff);
uintptr_t target;
2022-09-18 17:18:07 -04:00
if ((addr & ~0xfff) >= (1 << 30))
target = (uintptr_t) &ram2[a - (1 << 30)];
else
target = (uintptr_t) &ram[a];
#endif
2022-09-18 17:18:07 -04:00
if (writelookup2[writelookup[c]] == target || page_lookup[writelookup[c]] == page_target) {
writelookup2[writelookup[c]] = LOOKUP_INV;
page_lookup[writelookup[c]] = NULL;
writelookup[c] = 0xffffffff;
}
}
}
}
2022-09-18 17:18:07 -04:00
#define mmutranslate_read(addr) mmutranslatereal(addr, 0)
#define mmutranslate_write(addr) mmutranslatereal(addr, 1)
#define rammap(x) ((uint32_t *) (_mem_exec[(x) >> MEM_GRANULARITY_BITS]))[((x) >> 2) & MEM_GRANULARITY_QMASK]
#define rammap64(x) ((uint64_t *) (_mem_exec[(x) >> MEM_GRANULARITY_BITS]))[((x) >> 3) & MEM_GRANULARITY_PMASK]
static __inline uint64_t
mmutranslatereal_normal(uint32_t addr, int rw)
{
uint32_t temp, temp2, temp3;
uint32_t addr2;
if (cpu_state.abrt)
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
addr2 = ((cr3 & ~0xfff) + ((addr >> 20) & 0xffc));
temp = temp2 = rammap(addr2);
if (!(temp & 1)) {
2022-09-18 17:18:07 -04:00
cr2 = addr;
temp &= 1;
if (CPL == 3)
temp |= 4;
if (rw)
temp |= 2;
cpu_state.abrt = ABRT_PF;
abrt_error = temp;
return 0xffffffffffffffffULL;
}
if ((temp & 0x80) && (cr4 & CR4_PSE)) {
2022-09-18 17:18:07 -04:00
/*4MB page*/
if (((CPL == 3) && !(temp & 4) && !cpl_override) || (rw && !(temp & 2) && (((CPL == 3) && !cpl_override) || (is486 && (cr0 & WP_FLAG))))) {
cr2 = addr;
temp &= 1;
if (CPL == 3)
temp |= 4;
if (rw)
temp |= 2;
cpu_state.abrt = ABRT_PF;
abrt_error = temp;
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
}
2022-09-18 17:18:07 -04:00
mmu_perm = temp & 4;
rammap(addr2) |= (rw ? 0x60 : 0x20);
2022-09-18 17:18:07 -04:00
return (temp & ~0x3fffff) + (addr & 0x3fffff);
}
2022-09-18 17:18:07 -04:00
temp = rammap((temp & ~0xfff) + ((addr >> 10) & 0xffc));
temp3 = temp & temp2;
if (!(temp & 1) || ((CPL == 3) && !(temp3 & 4) && !cpl_override) || (rw && !(temp3 & 2) && (((CPL == 3) && !cpl_override) || (is486 && (cr0 & WP_FLAG))))) {
2022-09-18 17:18:07 -04:00
cr2 = addr;
temp &= 1;
if (CPL == 3)
temp |= 4;
if (rw)
temp |= 2;
cpu_state.abrt = ABRT_PF;
abrt_error = temp;
return 0xffffffffffffffffULL;
}
mmu_perm = temp & 4;
rammap(addr2) |= 0x20;
rammap((temp2 & ~0xfff) + ((addr >> 10) & 0xffc)) |= (rw ? 0x60 : 0x20);
return (uint64_t) ((temp & ~0xfff) + (addr & 0xfff));
}
static __inline uint64_t
mmutranslatereal_pae(uint32_t addr, int rw)
{
uint64_t temp, temp2, temp3, temp4;
uint64_t addr2, addr3, addr4;
if (cpu_state.abrt)
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
addr2 = (cr3 & ~0x1f) + ((addr >> 27) & 0x18);
temp = temp2 = rammap64(addr2) & 0x000000ffffffffffULL;
if (!(temp & 1)) {
2022-09-18 17:18:07 -04:00
cr2 = addr;
temp &= 1;
if (CPL == 3)
temp |= 4;
if (rw)
temp |= 2;
cpu_state.abrt = ABRT_PF;
abrt_error = temp;
return 0xffffffffffffffffULL;
}
addr3 = (temp & ~0xfffULL) + ((addr >> 18) & 0xff8);
temp = temp4 = rammap64(addr3) & 0x000000ffffffffffULL;
2022-09-18 17:18:07 -04:00
temp3 = temp & temp2;
if (!(temp & 1)) {
2022-09-18 17:18:07 -04:00
cr2 = addr;
temp &= 1;
if (CPL == 3)
temp |= 4;
if (rw)
temp |= 2;
cpu_state.abrt = ABRT_PF;
abrt_error = temp;
return 0xffffffffffffffffULL;
}
if (temp & 0x80) {
2022-09-18 17:18:07 -04:00
/*2MB page*/
if (((CPL == 3) && !(temp & 4) && !cpl_override) || (rw && !(temp & 2) && (((CPL == 3) && !cpl_override) || (cr0 & WP_FLAG)))) {
cr2 = addr;
temp &= 1;
if (CPL == 3)
temp |= 4;
if (rw)
temp |= 2;
cpu_state.abrt = ABRT_PF;
abrt_error = temp;
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
}
mmu_perm = temp & 4;
rammap64(addr3) |= (rw ? 0x60 : 0x20);
2022-09-18 17:18:07 -04:00
return ((temp & ~0x1fffffULL) + (addr & 0x1fffffULL)) & 0x000000ffffffffffULL;
}
addr4 = (temp & ~0xfffULL) + ((addr >> 9) & 0xff8);
2022-09-18 17:18:07 -04:00
temp = rammap64(addr4) & 0x000000ffffffffffULL;
temp3 = temp & temp4;
if (!(temp & 1) || ((CPL == 3) && !(temp3 & 4) && !cpl_override) || (rw && !(temp3 & 2) && (((CPL == 3) && !cpl_override) || (cr0 & WP_FLAG)))) {
2022-09-18 17:18:07 -04:00
cr2 = addr;
temp &= 1;
if (CPL == 3)
temp |= 4;
if (rw)
temp |= 2;
cpu_state.abrt = ABRT_PF;
abrt_error = temp;
return 0xffffffffffffffffULL;
}
mmu_perm = temp & 4;
rammap64(addr3) |= 0x20;
rammap64(addr4) |= (rw ? 0x60 : 0x20);
return ((temp & ~0xfffULL) + ((uint64_t) (addr & 0xfff))) & 0x000000ffffffffffULL;
}
uint64_t
mmutranslatereal(uint32_t addr, int rw)
{
/* Fast path to return invalid without any call if an exception has occurred beforehand. */
2022-02-20 02:26:27 -05:00
if (cpu_state.abrt)
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
if (cr4 & CR4_PAE)
2022-09-18 17:18:07 -04:00
return mmutranslatereal_pae(addr, rw);
else
2022-09-18 17:18:07 -04:00
return mmutranslatereal_normal(addr, rw);
}
/* This is needed because the old recompiler calls this to check for page fault. */
uint32_t
mmutranslatereal32(uint32_t addr, int rw)
{
/* Fast path to return invalid without any call if an exception has occurred beforehand. */
2022-02-20 02:26:27 -05:00
if (cpu_state.abrt)
2022-09-18 17:18:07 -04:00
return (uint32_t) 0xffffffffffffffffULL;
return (uint32_t) mmutranslatereal(addr, rw);
}
static __inline uint64_t
mmutranslate_noabrt_normal(uint32_t addr, int rw)
{
2022-09-18 17:18:07 -04:00
uint32_t temp, temp2, temp3;
uint32_t addr2;
2022-02-20 02:26:27 -05:00
if (cpu_state.abrt)
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
addr2 = ((cr3 & ~0xfff) + ((addr >> 20) & 0xffc));
temp = temp2 = rammap(addr2);
2022-09-18 17:18:07 -04:00
if (!(temp & 1))
return 0xffffffffffffffffULL;
if ((temp & 0x80) && (cr4 & CR4_PSE)) {
2022-09-18 17:18:07 -04:00
/*4MB page*/
if (((CPL == 3) && !(temp & 4) && !cpl_override) || (rw && !(temp & 2) && ((CPL == 3) || (cr0 & WP_FLAG))))
return 0xffffffffffffffffULL;
2022-09-18 17:18:07 -04:00
return (temp & ~0x3fffff) + (addr & 0x3fffff);
}
2022-09-18 17:18:07 -04:00
temp = rammap((temp & ~0xfff) + ((addr >> 10) & 0xffc));
temp3 = temp & temp2;
if (!(temp & 1) || ((CPL == 3) && !(temp3 & 4) && !cpl_override) || (rw && !(temp3 & 2) && ((CPL == 3) || (cr0 & WP_FLAG))))
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
return (uint64_t) ((temp & ~0xfff) + (addr & 0xfff));
}
static __inline uint64_t
mmutranslate_noabrt_pae(uint32_t addr, int rw)
{
2022-09-18 17:18:07 -04:00
uint64_t temp, temp2, temp3, temp4;
uint64_t addr2, addr3, addr4;
2022-02-20 02:26:27 -05:00
if (cpu_state.abrt)
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
addr2 = (cr3 & ~0x1f) + ((addr >> 27) & 0x18);
temp = temp2 = rammap64(addr2) & 0x000000ffffffffffULL;
2022-09-18 17:18:07 -04:00
if (!(temp & 1))
return 0xffffffffffffffffULL;
addr3 = (temp & ~0xfffULL) + ((addr >> 18) & 0xff8);
temp = temp4 = rammap64(addr3) & 0x000000ffffffffffULL;
2022-09-18 17:18:07 -04:00
temp3 = temp & temp2;
2022-09-18 17:18:07 -04:00
if (!(temp & 1))
return 0xffffffffffffffffULL;
if (temp & 0x80) {
2022-09-18 17:18:07 -04:00
/*2MB page*/
if (((CPL == 3) && !(temp & 4) && !cpl_override) || (rw && !(temp & 2) && ((CPL == 3) || (cr0 & WP_FLAG))))
return 0xffffffffffffffffULL;
2022-09-18 17:18:07 -04:00
return ((temp & ~0x1fffffULL) + (addr & 0x1fffff)) & 0x000000ffffffffffULL;
}
addr4 = (temp & ~0xfffULL) + ((addr >> 9) & 0xff8);
2022-09-18 17:18:07 -04:00
temp = rammap64(addr4) & 0x000000ffffffffffULL;
;
temp3 = temp & temp4;
2022-09-18 17:18:07 -04:00
if (!(temp & 1) || ((CPL == 3) && !(temp3 & 4) && !cpl_override) || (rw && !(temp3 & 2) && ((CPL == 3) || (cr0 & WP_FLAG))))
return 0xffffffffffffffffULL;
return ((temp & ~0xfffULL) + ((uint64_t) (addr & 0xfff))) & 0x000000ffffffffffULL;
}
uint64_t
mmutranslate_noabrt(uint32_t addr, int rw)
{
/* Fast path to return invalid without any call if an exception has occurred beforehand. */
2022-02-20 02:26:27 -05:00
if (cpu_state.abrt)
2022-09-18 17:18:07 -04:00
return 0xffffffffffffffffULL;
if (cr4 & CR4_PAE)
2022-09-18 17:18:07 -04:00
return mmutranslate_noabrt_pae(addr, rw);
else
2022-09-18 17:18:07 -04:00
return mmutranslate_noabrt_normal(addr, rw);
}
void
mmu_invalidate(uint32_t addr)
{
flushmmucache_cr3();
}
uint8_t
mem_addr_range_match(uint32_t addr, uint32_t start, uint32_t len)
{
if (addr < start)
2022-09-18 17:18:07 -04:00
return 0;
else if (addr >= (start + len))
return 0;
else
return 1;
}
uint32_t
mem_addr_translate(uint32_t addr, uint32_t chunk_start, uint32_t len)
{
uint32_t mask = len - 1;
return chunk_start + (addr & mask);
}
void
addreadlookup(uint32_t virt, uint32_t phys)
{
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
uint32_t a;
2020-07-16 01:14:24 +02:00
#endif
2022-09-18 17:18:07 -04:00
if (virt == 0xffffffff)
return;
2022-09-18 17:18:07 -04:00
if (readlookup2[virt >> 12] != (uintptr_t) LOOKUP_INV)
return;
if (readlookup[readlnext] != (int) 0xffffffff) {
2022-09-18 17:18:07 -04:00
if ((readlookup[readlnext] == ((es + DI) >> 12)) || (readlookup[readlnext] == ((es + EDI) >> 12)))
uncached = 1;
readlookup2[readlookup[readlnext]] = LOOKUP_INV;
}
#if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
2022-09-18 17:18:07 -04:00
readlookup2[virt >> 12] = (uintptr_t) &ram[(uintptr_t) (phys & ~0xFFF) - (uintptr_t) (virt & ~0xfff)];
2020-07-16 01:14:24 +02:00
#else
2022-09-18 17:18:07 -04:00
a = ((uint32_t) (phys & ~0xfff) - (uint32_t) (virt & ~0xfff));
if ((phys & ~0xfff) >= (1 << 30))
2022-09-18 17:18:07 -04:00
readlookup2[virt >> 12] = (uintptr_t) &ram2[a - (1 << 30)];
else
2022-09-18 17:18:07 -04:00
readlookup2[virt >> 12] = (uintptr_t) &ram[a];
#endif
2022-09-18 17:18:07 -04:00
readlookupp[virt >> 12] = mmu_perm;
readlookup[readlnext++] = virt >> 12;
2022-09-18 17:18:07 -04:00
readlnext &= (cachesize - 1);
cycles -= 9;
}
void
addwritelookup(uint32_t virt, uint32_t phys)
{
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
uint32_t a;
2020-07-16 01:14:24 +02:00
#endif
2022-09-18 17:18:07 -04:00
if (virt == 0xffffffff)
return;
2022-09-18 17:18:07 -04:00
if (page_lookup[virt >> 12])
return;
if (writelookup[writelnext] != -1) {
2022-09-18 17:18:07 -04:00
page_lookup[writelookup[writelnext]] = NULL;
writelookup2[writelookup[writelnext]] = LOOKUP_INV;
}
#ifdef USE_NEW_DYNAREC
2022-09-18 17:18:07 -04:00
# ifdef USE_DYNAREC
if (pages[phys >> 12].block || (phys & ~0xfff) == recomp_page) {
2022-09-18 17:18:07 -04:00
# else
if (pages[phys >> 12].block) {
2022-09-18 17:18:07 -04:00
# endif
#else
2022-09-18 17:18:07 -04:00
# ifdef USE_DYNAREC
if (pages[phys >> 12].block[0] || pages[phys >> 12].block[1] || pages[phys >> 12].block[2] || pages[phys >> 12].block[3] || (phys & ~0xfff) == recomp_page) {
2022-09-18 17:18:07 -04:00
# else
if (pages[phys >> 12].block[0] || pages[phys >> 12].block[1] || pages[phys >> 12].block[2] || pages[phys >> 12].block[3]) {
2022-09-18 17:18:07 -04:00
# endif
#endif
2022-09-18 17:18:07 -04:00
page_lookup[virt >> 12] = &pages[phys >> 12];
page_lookupp[virt >> 12] = mmu_perm;
} else {
#if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
2022-09-18 17:18:07 -04:00
writelookup2[virt >> 12] = (uintptr_t) &ram[(uintptr_t) (phys & ~0xFFF) - (uintptr_t) (virt & ~0xfff)];
2020-07-16 01:14:24 +02:00
#else
2022-09-18 17:18:07 -04:00
a = ((uint32_t) (phys & ~0xfff) - (uint32_t) (virt & ~0xfff));
2022-09-18 17:18:07 -04:00
if ((phys & ~0xfff) >= (1 << 30))
writelookup2[virt >> 12] = (uintptr_t) &ram2[a - (1 << 30)];
else
writelookup2[virt >> 12] = (uintptr_t) &ram[a];
#endif
}
2022-09-18 17:18:07 -04:00
writelookupp[virt >> 12] = mmu_perm;
writelookup[writelnext++] = virt >> 12;
writelnext &= (cachesize - 1);
cycles -= 9;
}
uint8_t *
getpccache(uint32_t a)
{
uint64_t a64 = (uint64_t) a;
uint32_t a2;
a2 = a;
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a64 = mmutranslate_read(a64);
2022-09-18 17:18:07 -04:00
if (a64 == 0xffffffffffffffffULL)
return ram;
}
a64 &= rammask;
if (_mem_exec[a64 >> MEM_GRANULARITY_BITS]) {
2022-09-18 17:18:07 -04:00
if (is286) {
if (read_mapping[a64 >> MEM_GRANULARITY_BITS] && (read_mapping[a64 >> MEM_GRANULARITY_BITS]->flags & MEM_MAPPING_ROM_WS))
cpu_prefetch_cycles = cpu_rom_prefetch_cycles;
else
cpu_prefetch_cycles = cpu_mem_prefetch_cycles;
}
2022-02-20 02:26:27 -05:00
2022-09-18 17:18:07 -04:00
return &_mem_exec[a64 >> MEM_GRANULARITY_BITS][(uintptr_t) (a64 & MEM_GRANULARITY_PAGE) - (uintptr_t) (a2 & ~0xfff)];
}
mem_log("Bad getpccache %08X%08X\n", (uint32_t) (a64 >> 32), (uint32_t) (a64 & 0xffffffffULL));
2022-09-18 17:18:07 -04:00
return (uint8_t *) &ff_pccache;
}
uint8_t
read_mem_b(uint32_t addr)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
uint8_t ret = 0xff;
int old_cycles = cycles;
mem_logical_addr = addr;
addr &= rammask;
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_b)
2022-09-18 17:18:07 -04:00
ret = map->read_b(addr, map->p);
resub_cycles(old_cycles);
return ret;
}
uint16_t
read_mem_w(uint32_t addr)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
uint16_t ret = 0xffff;
int old_cycles = cycles;
mem_logical_addr = addr;
addr &= rammask;
if (addr & 1)
2022-09-18 17:18:07 -04:00
ret = read_mem_b(addr) | (read_mem_b(addr + 1) << 8);
else {
2022-09-18 17:18:07 -04:00
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
2022-09-18 17:18:07 -04:00
if (map && map->read_w)
ret = map->read_w(addr, map->p);
else if (map && map->read_b)
ret = map->read_b(addr, map->p) | (map->read_b(addr + 1, map->p) << 8);
}
resub_cycles(old_cycles);
return ret;
}
void
write_mem_b(uint32_t addr, uint8_t val)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int old_cycles = cycles;
mem_logical_addr = addr;
addr &= rammask;
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_b)
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
resub_cycles(old_cycles);
}
void
write_mem_w(uint32_t addr, uint16_t val)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int old_cycles = cycles;
mem_logical_addr = addr;
addr &= rammask;
if (addr & 1) {
2022-09-18 17:18:07 -04:00
write_mem_b(addr, val);
write_mem_b(addr + 1, val >> 8);
} else {
2022-09-18 17:18:07 -04:00
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map) {
if (map->write_w)
map->write_w(addr, val, map->p);
else if (map->write_b) {
map->write_b(addr, val, map->p);
map->write_b(addr + 1, val >> 8, map->p);
}
}
}
resub_cycles(old_cycles);
}
uint8_t
readmembl(uint32_t addr)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
uint64_t a;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_READ, 1);
2022-09-18 17:18:07 -04:00
addr64 = (uint64_t) addr;
mem_logical_addr = addr;
high_page = 0;
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a = mmutranslate_read(addr);
addr64 = (uint32_t) a;
2022-09-18 17:18:07 -04:00
if (a > 0xffffffffULL)
return 0xff;
}
addr = (uint32_t) (addr64 & rammask);
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_b)
2022-09-18 17:18:07 -04:00
return map->read_b(addr, map->p);
return 0xff;
}
void
writemembl(uint32_t addr, uint8_t val)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
uint64_t a;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_WRITE, 1);
2022-09-18 17:18:07 -04:00
addr64 = (uint64_t) addr;
mem_logical_addr = addr;
high_page = 0;
2022-09-18 17:18:07 -04:00
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_b) {
page_lookup[addr >> 12]->write_b(addr, val, page_lookup[addr >> 12]);
return;
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a = mmutranslate_write(addr);
addr64 = (uint32_t) a;
2022-09-18 17:18:07 -04:00
if (a > 0xffffffffULL)
return;
}
addr = (uint32_t) (addr64 & rammask);
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_b)
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
}
/* Read a byte from memory without MMU translation - result of previous MMU translation passed as value. */
uint8_t
readmembl_no_mmut(uint32_t addr, uint32_t a64)
{
mem_mapping_t *map;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_READ, 1);
mem_logical_addr = addr;
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
if (cpu_state.abrt || high_page)
return 0xff;
2022-09-18 17:18:07 -04:00
addr = a64 & rammask;
} else
2022-09-18 17:18:07 -04:00
addr &= rammask;
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_b)
2022-09-18 17:18:07 -04:00
return map->read_b(addr, map->p);
return 0xff;
}
/* Write a byte to memory without MMU translation - result of previous MMU translation passed as value. */
void
writemembl_no_mmut(uint32_t addr, uint32_t a64, uint8_t val)
{
mem_mapping_t *map;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_WRITE, 1);
mem_logical_addr = addr;
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_b) {
2022-09-18 17:18:07 -04:00
page_lookup[addr >> 12]->write_b(addr, val, page_lookup[addr >> 12]);
return;
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
if (cpu_state.abrt || high_page)
return;
2022-09-18 17:18:07 -04:00
addr = a64 & rammask;
} else
2022-09-18 17:18:07 -04:00
addr &= rammask;
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_b)
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
}
uint16_t
readmemwl(uint32_t addr)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int i;
uint64_t a;
addr64a[0] = addr;
addr64a[1] = addr + 1;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 2);
mem_logical_addr = addr;
high_page = 0;
if (addr & 1) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) == 7)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffe) {
if (cr0 >> 31) {
for (i = 0; i < 2; i++) {
a = mmutranslate_read(addr + i);
addr64a[i] = (uint32_t) a;
if (a > 0xffffffffULL)
return 0xffff;
}
}
return readmembl_no_mmut(addr, addr64a[0]) | (((uint16_t) readmembl_no_mmut(addr + 1, addr64a[1])) << 8);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = readlookupp[addr >> 12];
return *(uint16_t *) (readlookup2[addr >> 12] + addr);
}
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a = mmutranslate_read(addr);
addr64a[0] = (uint32_t) a;
2022-09-18 17:18:07 -04:00
if (a > 0xffffffffULL)
return 0xffff;
} else
2022-09-18 17:18:07 -04:00
addr64a[0] = (uint64_t) addr;
addr = addr64a[0] & rammask;
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_w)
2022-09-18 17:18:07 -04:00
return map->read_w(addr, map->p);
if (map && map->read_b) {
2022-09-18 17:18:07 -04:00
return map->read_b(addr, map->p) | ((uint16_t) (map->read_b(addr + 1, map->p)) << 8);
}
return 0xffff;
}
void
writememwl(uint32_t addr, uint16_t val)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int i;
uint64_t a;
addr64a[0] = addr;
addr64a[1] = addr + 1;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 2);
mem_logical_addr = addr;
high_page = 0;
if (addr & 1) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) == 7)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffe) {
if (cr0 >> 31) {
for (i = 0; i < 2; i++) {
/* Do not translate a page that has a valid lookup, as that is by definition valid
and the whole purpose of the lookup is to avoid repeat identical translations. */
if (!page_lookup[(addr + i) >> 12] || !page_lookup[(addr + i) >> 12]->write_b) {
a = mmutranslate_write(addr + i);
addr64a[i] = (uint32_t) a;
if (a > 0xffffffffULL)
return;
}
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
writemembl_no_mmut(addr, addr64a[0], val);
writemembl_no_mmut(addr + 1, addr64a[1], val >> 8);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = writelookupp[addr >> 12];
*(uint16_t *) (writelookup2[addr >> 12] + addr) = val;
return;
}
}
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_w) {
2022-09-18 17:18:07 -04:00
page_lookup[addr >> 12]->write_w(addr, val, page_lookup[addr >> 12]);
mmu_perm = page_lookupp[addr >> 12];
return;
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a = mmutranslate_write(addr);
addr64a[0] = (uint32_t) a;
2022-09-18 17:18:07 -04:00
if (a > 0xffffffffULL)
return;
}
addr = addr64a[0] & rammask;
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_w) {
2022-09-18 17:18:07 -04:00
map->write_w(addr, val, map->p);
return;
}
if (map && map->write_b) {
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
map->write_b(addr + 1, val >> 8, map->p);
return;
}
}
/* Read a word from memory without MMU translation - results of previous MMU translation passed as array. */
uint16_t
readmemwl_no_mmut(uint32_t addr, uint32_t *a64)
{
mem_mapping_t *map;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_READ, 2);
mem_logical_addr = addr;
if (addr & 1) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) == 7)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffe) {
if (cr0 >> 31) {
if (cpu_state.abrt || high_page)
return 0xffff;
}
return readmembl_no_mmut(addr, a64[0]) | (((uint16_t) readmembl_no_mmut(addr + 1, a64[1])) << 8);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = readlookupp[addr >> 12];
return *(uint16_t *) (readlookup2[addr >> 12] + addr);
}
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
if (cpu_state.abrt || high_page)
return 0xffff;
2022-09-18 17:18:07 -04:00
addr = (uint32_t) (a64[0] & rammask);
} else
2022-09-18 17:18:07 -04:00
addr &= rammask;
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_w)
2022-09-18 17:18:07 -04:00
return map->read_w(addr, map->p);
if (map && map->read_b) {
2022-09-18 17:18:07 -04:00
return map->read_b(addr, map->p) | ((uint16_t) (map->read_b(addr + 1, map->p)) << 8);
}
return 0xffff;
}
/* Write a word to memory without MMU translation - results of previous MMU translation passed as array. */
void
writememwl_no_mmut(uint32_t addr, uint32_t *a64, uint16_t val)
{
mem_mapping_t *map;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_WRITE, 2);
mem_logical_addr = addr;
if (addr & 1) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) == 7)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffe) {
if (cr0 >> 31) {
if (cpu_state.abrt || high_page)
return;
}
writemembl_no_mmut(addr, a64[0], val);
writemembl_no_mmut(addr + 1, a64[1], val >> 8);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = writelookupp[addr >> 12];
*(uint16_t *) (writelookup2[addr >> 12] + addr) = val;
return;
}
}
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_w) {
2022-09-18 17:18:07 -04:00
mmu_perm = page_lookupp[addr >> 12];
page_lookup[addr >> 12]->write_w(addr, val, page_lookup[addr >> 12]);
return;
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
if (cpu_state.abrt || high_page)
return;
2022-09-18 17:18:07 -04:00
addr = (uint32_t) (a64[0] & rammask);
} else
2022-09-18 17:18:07 -04:00
addr &= rammask;
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_w) {
2022-09-18 17:18:07 -04:00
map->write_w(addr, val, map->p);
return;
}
if (map && map->write_b) {
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
map->write_b(addr + 1, val >> 8, map->p);
return;
}
}
uint32_t
readmemll(uint32_t addr)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int i;
uint64_t a = 0x0000000000000000ULL;
for (i = 0; i < 4; i++)
2022-09-18 17:18:07 -04:00
addr64a[i] = (uint64_t) (addr + i);
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 4);
mem_logical_addr = addr;
high_page = 0;
if (addr & 3) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) > 4)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
if (cr0 >> 31) {
for (i = 0; i < 4; i++) {
if (i == 0) {
a = mmutranslate_read(addr + i);
addr64a[i] = (uint32_t) a;
} else if (!((addr + i) & 0xfff)) {
a = mmutranslate_read(addr + 3);
addr64a[i] = (uint32_t) a;
if (!cpu_state.abrt) {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
} else {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
if (a > 0xffffffffULL)
return 0xffff;
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
return readmemwl_no_mmut(addr, addr64a) | (((uint32_t) readmemwl_no_mmut(addr + 2, &(addr64a[2]))) << 16);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = readlookupp[addr >> 12];
return *(uint32_t *) (readlookup2[addr >> 12] + addr);
}
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a = mmutranslate_read(addr);
addr64a[0] = (uint32_t) a;
2022-09-18 17:18:07 -04:00
if (a > 0xffffffffULL)
return 0xffffffff;
}
addr = addr64a[0] & rammask;
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_l)
2022-09-18 17:18:07 -04:00
return map->read_l(addr, map->p);
if (map && map->read_w)
2022-09-18 17:18:07 -04:00
return map->read_w(addr, map->p) | ((uint32_t) (map->read_w(addr + 2, map->p)) << 16);
if (map && map->read_b)
2022-09-18 17:18:07 -04:00
return map->read_b(addr, map->p) | ((uint32_t) (map->read_b(addr + 1, map->p)) << 8) | ((uint32_t) (map->read_b(addr + 2, map->p)) << 16) | ((uint32_t) (map->read_b(addr + 3, map->p)) << 24);
return 0xffffffff;
}
void
writememll(uint32_t addr, uint32_t val)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int i;
uint64_t a = 0x0000000000000000ULL;
for (i = 0; i < 4; i++)
2022-09-18 17:18:07 -04:00
addr64a[i] = (uint64_t) (addr + i);
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 4);
mem_logical_addr = addr;
high_page = 0;
if (addr & 3) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) > 4)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
if (cr0 >> 31) {
for (i = 0; i < 4; i++) {
/* Do not translate a page that has a valid lookup, as that is by definition valid
and the whole purpose of the lookup is to avoid repeat identical translations. */
if (!page_lookup[(addr + i) >> 12] || !page_lookup[(addr + i) >> 12]->write_b) {
if (i == 0) {
a = mmutranslate_write(addr + i);
addr64a[i] = (uint32_t) a;
} else if (!((addr + i) & 0xfff)) {
a = mmutranslate_write(addr + 3);
addr64a[i] = (uint32_t) a;
if (!cpu_state.abrt) {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
} else {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
if (a > 0xffffffffULL)
return;
}
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
writememwl_no_mmut(addr, &(addr64a[0]), val);
writememwl_no_mmut(addr + 2, &(addr64a[2]), val >> 16);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = writelookupp[addr >> 12];
*(uint32_t *) (writelookup2[addr >> 12] + addr) = val;
return;
}
}
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_l) {
2022-09-18 17:18:07 -04:00
mmu_perm = page_lookupp[addr >> 12];
page_lookup[addr >> 12]->write_l(addr, val, page_lookup[addr >> 12]);
return;
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a = mmutranslate_write(addr);
addr64a[0] = (uint32_t) a;
2022-09-18 17:18:07 -04:00
if (a > 0xffffffffULL)
return;
}
addr = addr64a[0] & rammask;
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_l) {
2022-09-18 17:18:07 -04:00
map->write_l(addr, val, map->p);
return;
}
if (map && map->write_w) {
2022-09-18 17:18:07 -04:00
map->write_w(addr, val, map->p);
map->write_w(addr + 2, val >> 16, map->p);
return;
}
if (map && map->write_b) {
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
map->write_b(addr + 1, val >> 8, map->p);
map->write_b(addr + 2, val >> 16, map->p);
map->write_b(addr + 3, val >> 24, map->p);
return;
}
}
/* Read a long from memory without MMU translation - results of previous MMU translation passed as array. */
uint32_t
readmemll_no_mmut(uint32_t addr, uint32_t *a64)
{
mem_mapping_t *map;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_READ, 4);
mem_logical_addr = addr;
if (addr & 3) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) > 4)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
if (cr0 >> 31) {
if (cpu_state.abrt || high_page)
return 0xffffffff;
}
return readmemwl_no_mmut(addr, a64) | ((uint32_t) (readmemwl_no_mmut(addr + 2, &(a64[2]))) << 16);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = readlookupp[addr >> 12];
return *(uint32_t *) (readlookup2[addr >> 12] + addr);
}
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
if (cpu_state.abrt || high_page)
return 0xffffffff;
2022-09-18 17:18:07 -04:00
addr = (uint32_t) (a64[0] & rammask);
} else
2022-09-18 17:18:07 -04:00
addr &= rammask;
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_l)
2022-09-18 17:18:07 -04:00
return map->read_l(addr, map->p);
if (map && map->read_w)
2022-09-18 17:18:07 -04:00
return map->read_w(addr, map->p) | ((uint32_t) (map->read_w(addr + 2, map->p)) << 16);
if (map && map->read_b)
2022-09-18 17:18:07 -04:00
return map->read_b(addr, map->p) | ((uint32_t) (map->read_b(addr + 1, map->p)) << 8) | ((uint32_t) (map->read_b(addr + 2, map->p)) << 16) | ((uint32_t) (map->read_b(addr + 3, map->p)) << 24);
return 0xffffffff;
}
/* Write a long to memory without MMU translation - results of previous MMU translation passed as array. */
void
writememll_no_mmut(uint32_t addr, uint32_t *a64, uint32_t val)
{
mem_mapping_t *map;
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS(addr, GDBSTUB_MEM_WRITE, 4);
mem_logical_addr = addr;
if (addr & 3) {
2022-09-18 17:18:07 -04:00
if (!cpu_cyrix_alignment || (addr & 7) > 4)
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xffc) {
if (cr0 >> 31) {
if (cpu_state.abrt || high_page)
return;
}
writememwl_no_mmut(addr, &(a64[0]), val);
writememwl_no_mmut(addr + 2, &(a64[2]), val >> 16);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = writelookupp[addr >> 12];
*(uint32_t *) (writelookup2[addr >> 12] + addr) = val;
return;
}
}
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_l) {
2022-09-18 17:18:07 -04:00
mmu_perm = page_lookupp[addr >> 12];
page_lookup[addr >> 12]->write_l(addr, val, page_lookup[addr >> 12]);
return;
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
if (cpu_state.abrt || high_page)
return;
2022-09-18 17:18:07 -04:00
addr = (uint32_t) (a64[0] & rammask);
} else
2022-09-18 17:18:07 -04:00
addr &= rammask;
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_l) {
2022-09-18 17:18:07 -04:00
map->write_l(addr, val, map->p);
return;
}
if (map && map->write_w) {
2022-09-18 17:18:07 -04:00
map->write_w(addr, val, map->p);
map->write_w(addr + 2, val >> 16, map->p);
return;
}
if (map && map->write_b) {
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
map->write_b(addr + 1, val >> 8, map->p);
map->write_b(addr + 2, val >> 16, map->p);
map->write_b(addr + 3, val >> 24, map->p);
return;
}
}
uint64_t
readmemql(uint32_t addr)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int i;
uint64_t a = 0x0000000000000000ULL;
for (i = 0; i < 8; i++)
2022-09-18 17:18:07 -04:00
addr64a[i] = (uint64_t) (addr + i);
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_READ, 8);
mem_logical_addr = addr;
high_page = 0;
if (addr & 7) {
2022-09-18 17:18:07 -04:00
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xff8) {
if (cr0 >> 31) {
for (i = 0; i < 8; i++) {
if (i == 0) {
a = mmutranslate_read(addr + i);
addr64a[i] = (uint32_t) a;
} else if (!((addr + i) & 0xfff)) {
a = mmutranslate_read(addr + 7);
addr64a[i] = (uint32_t) a;
if (!cpu_state.abrt) {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
} else {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
if (a > 0xffffffffULL)
return 0xffff;
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
return readmemll_no_mmut(addr, addr64a) | (((uint64_t) readmemll_no_mmut(addr + 4, &(addr64a[4]))) << 32);
} else if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = readlookupp[addr >> 12];
return *(uint64_t *) (readlookup2[addr >> 12] + addr);
}
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
a = mmutranslate_read(addr);
addr64a[0] = (uint32_t) a;
2022-09-18 17:18:07 -04:00
if (a > 0xffffffffULL)
return 0xffffffffffffffffULL;
}
addr = addr64a[0] & rammask;
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->read_l)
2022-09-18 17:18:07 -04:00
return map->read_l(addr, map->p) | ((uint64_t) map->read_l(addr + 4, map->p) << 32);
return readmemll(addr) | ((uint64_t) readmemll(addr + 4) << 32);
}
void
writememql(uint32_t addr, uint64_t val)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int i;
uint64_t a = 0x0000000000000000ULL;
for (i = 0; i < 8; i++)
2022-09-18 17:18:07 -04:00
addr64a[i] = (uint64_t) (addr + i);
2022-03-16 00:33:01 -03:00
GDBSTUB_MEM_ACCESS_FAST(addr64a, GDBSTUB_MEM_WRITE, 8);
mem_logical_addr = addr;
high_page = 0;
if (addr & 7) {
2022-09-18 17:18:07 -04:00
cycles -= timing_misaligned;
if ((addr & 0xfff) > 0xff8) {
if (cr0 >> 31) {
for (i = 0; i < 8; i++) {
/* Do not translate a page that has a valid lookup, as that is by definition valid
and the whole purpose of the lookup is to avoid repeat identical translations. */
if (!page_lookup[(addr + i) >> 12] || !page_lookup[(addr + i) >> 12]->write_b) {
if (i == 0) {
a = mmutranslate_write(addr + i);
addr64a[i] = (uint32_t) a;
} else if (!((addr + i) & 0xfff)) {
a = mmutranslate_write(addr + 7);
addr64a[i] = (uint32_t) a;
if (!cpu_state.abrt) {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
} else {
a = (a & ~0xfffLL) | ((uint64_t) ((addr + i) & 0xfff));
addr64a[i] = (uint32_t) a;
}
if (addr64a[i] > 0xffffffffULL)
return;
}
}
}
/* No need to waste precious CPU host cycles on mmutranslate's that were already done, just pass
their result as a parameter to be used if needed. */
writememll_no_mmut(addr, addr64a, val);
writememll_no_mmut(addr + 4, &(addr64a[4]), val >> 32);
return;
} else if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV) {
mmu_perm = writelookupp[addr >> 12];
*(uint64_t *) (writelookup2[addr >> 12] + addr) = val;
return;
}
}
if (page_lookup[addr >> 12] && page_lookup[addr >> 12]->write_l) {
2022-09-18 17:18:07 -04:00
mmu_perm = page_lookupp[addr >> 12];
page_lookup[addr >> 12]->write_l(addr, val, page_lookup[addr >> 12]);
page_lookup[addr >> 12]->write_l(addr + 4, val >> 32, page_lookup[addr >> 12]);
return;
}
if (cr0 >> 31) {
2022-09-18 17:18:07 -04:00
addr64a[0] = mmutranslate_write(addr);
if (addr64a[0] > 0xffffffffULL)
return;
}
addr = addr64a[0] & rammask;
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
if (map && map->write_l) {
2022-09-18 17:18:07 -04:00
map->write_l(addr, val, map->p);
map->write_l(addr + 4, val >> 32, map->p);
return;
}
if (map && map->write_w) {
2022-09-18 17:18:07 -04:00
map->write_w(addr, val, map->p);
map->write_w(addr + 2, val >> 16, map->p);
map->write_w(addr + 4, val >> 32, map->p);
map->write_w(addr + 6, val >> 48, map->p);
return;
}
if (map && map->write_b) {
2022-09-18 17:18:07 -04:00
map->write_b(addr, val, map->p);
map->write_b(addr + 1, val >> 8, map->p);
map->write_b(addr + 2, val >> 16, map->p);
map->write_b(addr + 3, val >> 24, map->p);
map->write_b(addr + 4, val >> 32, map->p);
map->write_b(addr + 5, val >> 40, map->p);
map->write_b(addr + 6, val >> 48, map->p);
map->write_b(addr + 7, val >> 56, map->p);
return;
}
}
void
do_mmutranslate(uint32_t addr, uint32_t *a64, int num, int write)
{
2022-09-18 17:18:07 -04:00
int i, cond = 1;
uint32_t last_addr = addr + (num - 1);
2022-09-18 17:18:07 -04:00
uint64_t a = 0x0000000000000000ULL;
for (i = 0; i < num; i++)
2022-09-18 17:18:07 -04:00
a64[i] = (uint64_t) addr;
for (i = 0; i < num; i++) {
2022-09-18 17:18:07 -04:00
if (cr0 >> 31) {
if (write && ((i == 0) || !(addr & 0xfff)))
cond = (!page_lookup[addr >> 12] || !page_lookup[addr >> 12]->write_b);
if (cond) {
/* If we have encountered at least one page fault, mark all subsequent addresses as
having page faulted, prevents false negatives in readmem*l_no_mmut. */
if ((i > 0) && cpu_state.abrt && !high_page)
a64[i] = a64[i - 1];
/* If we are on the same page, there is no need to translate again, as we can just
reuse the previous result. */
else if (i == 0) {
a = mmutranslatereal(addr, write);
a64[i] = (uint32_t) a;
high_page = high_page || (!cpu_state.abrt && (a > 0xffffffffULL));
} else if (!(addr & 0xfff)) {
a = mmutranslatereal(last_addr, write);
a64[i] = (uint32_t) a;
high_page = high_page || (!cpu_state.abrt && (a64[i] > 0xffffffffULL));
if (!cpu_state.abrt) {
a = (a & 0xfffffffffffff000ULL) | ((uint64_t) (addr & 0xfff));
a64[i] = (uint32_t) a;
}
} else {
a = (a & 0xfffffffffffff000ULL) | ((uint64_t) (addr & 0xfff));
a64[i] = (uint32_t) a;
}
} else
mmu_perm = page_lookupp[addr >> 12];
}
addr++;
}
}
uint8_t
mem_readb_phys(uint32_t addr)
{
mem_mapping_t *map = read_mapping_bus[addr >> MEM_GRANULARITY_BITS];
2022-09-18 17:18:07 -04:00
uint8_t ret = 0xff;
mem_logical_addr = 0xffffffff;
if (map) {
2022-09-18 17:18:07 -04:00
if (map->exec)
ret = map->exec[(addr - map->base) & map->mask];
else if (map->read_b)
ret = map->read_b(addr, map->p);
}
return ret;
}
uint16_t
mem_readw_phys(uint32_t addr)
{
mem_mapping_t *map = read_mapping_bus[addr >> MEM_GRANULARITY_BITS];
2022-09-18 17:18:07 -04:00
uint16_t ret, *p;
mem_logical_addr = 0xffffffff;
if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_HBOUND) && (map && map->exec)) {
2022-09-18 17:18:07 -04:00
p = (uint16_t *) &(map->exec[(addr - map->base) & map->mask]);
ret = *p;
} else if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_HBOUND) && (map && map->read_w))
2022-09-18 17:18:07 -04:00
ret = map->read_w(addr, map->p);
else {
2022-09-18 17:18:07 -04:00
ret = mem_readb_phys(addr + 1) << 8;
ret |= mem_readb_phys(addr);
}
return ret;
}
uint32_t
mem_readl_phys(uint32_t addr)
{
mem_mapping_t *map = read_mapping_bus[addr >> MEM_GRANULARITY_BITS];
2022-09-18 17:18:07 -04:00
uint32_t ret, *p;
mem_logical_addr = 0xffffffff;
if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_QBOUND) && (map && map->exec)) {
2022-09-18 17:18:07 -04:00
p = (uint32_t *) &(map->exec[(addr - map->base) & map->mask]);
ret = *p;
} else if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_QBOUND) && (map && map->read_l))
2022-09-18 17:18:07 -04:00
ret = map->read_l(addr, map->p);
else {
2022-09-18 17:18:07 -04:00
ret = mem_readw_phys(addr + 2) << 16;
ret |= mem_readw_phys(addr);
}
return ret;
}
void
mem_read_phys(void *dest, uint32_t addr, int transfer_size)
{
2022-09-18 17:18:07 -04:00
uint8_t *pb;
uint16_t *pw;
uint32_t *pl;
if (transfer_size == 4) {
2022-09-18 17:18:07 -04:00
pl = (uint32_t *) dest;
*pl = mem_readl_phys(addr);
} else if (transfer_size == 2) {
2022-09-18 17:18:07 -04:00
pw = (uint16_t *) dest;
*pw = mem_readw_phys(addr);
} else if (transfer_size == 1) {
2022-09-18 17:18:07 -04:00
pb = (uint8_t *) dest;
*pb = mem_readb_phys(addr);
}
}
void
mem_writeb_phys(uint32_t addr, uint8_t val)
{
mem_mapping_t *map = write_mapping_bus[addr >> MEM_GRANULARITY_BITS];
mem_logical_addr = 0xffffffff;
if (map) {
2022-09-18 17:18:07 -04:00
if (map->exec)
map->exec[(addr - map->base) & map->mask] = val;
else if (map->write_b)
map->write_b(addr, val, map->p);
}
}
void
mem_writew_phys(uint32_t addr, uint16_t val)
{
mem_mapping_t *map = write_mapping_bus[addr >> MEM_GRANULARITY_BITS];
2022-09-18 17:18:07 -04:00
uint16_t *p;
mem_logical_addr = 0xffffffff;
if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_HBOUND) && (map && map->exec)) {
2022-09-18 17:18:07 -04:00
p = (uint16_t *) &(map->exec[(addr - map->base) & map->mask]);
*p = val;
} else if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_HBOUND) && (map && map->write_w))
2022-09-18 17:18:07 -04:00
map->write_w(addr, val, map->p);
else {
2022-09-18 17:18:07 -04:00
mem_writeb_phys(addr, val & 0xff);
mem_writeb_phys(addr + 1, (val >> 8) & 0xff);
}
}
void
mem_writel_phys(uint32_t addr, uint32_t val)
{
mem_mapping_t *map = write_mapping_bus[addr >> MEM_GRANULARITY_BITS];
2022-09-18 17:18:07 -04:00
uint32_t *p;
mem_logical_addr = 0xffffffff;
if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_QBOUND) && (map && map->exec)) {
2022-09-18 17:18:07 -04:00
p = (uint32_t *) &(map->exec[(addr - map->base) & map->mask]);
*p = val;
} else if (((addr & MEM_GRANULARITY_MASK) <= MEM_GRANULARITY_QBOUND) && (map && map->write_l))
2022-09-18 17:18:07 -04:00
map->write_l(addr, val, map->p);
else {
2022-09-18 17:18:07 -04:00
mem_writew_phys(addr, val & 0xffff);
mem_writew_phys(addr + 2, (val >> 16) & 0xffff);
}
}
void
mem_write_phys(void *src, uint32_t addr, int transfer_size)
{
2022-09-18 17:18:07 -04:00
uint8_t *pb;
uint16_t *pw;
uint32_t *pl;
if (transfer_size == 4) {
2022-09-18 17:18:07 -04:00
pl = (uint32_t *) src;
mem_writel_phys(addr, *pl);
} else if (transfer_size == 2) {
2022-09-18 17:18:07 -04:00
pw = (uint16_t *) src;
mem_writew_phys(addr, *pw);
} else if (transfer_size == 1) {
2022-09-18 17:18:07 -04:00
pb = (uint8_t *) src;
mem_writeb_phys(addr, *pb);
}
}
uint8_t
mem_read_ram(uint32_t addr, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Read B %02X from %08X\n", ram[addr], addr);
#endif
if (is286)
2022-09-18 17:18:07 -04:00
addreadlookup(mem_logical_addr, addr);
return ram[addr];
}
uint16_t
mem_read_ramw(uint32_t addr, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Read W %04X from %08X\n", *(uint16_t *) &ram[addr], addr);
#endif
if (is286)
2022-09-18 17:18:07 -04:00
addreadlookup(mem_logical_addr, addr);
2022-09-18 17:18:07 -04:00
return *(uint16_t *) &ram[addr];
}
uint32_t
mem_read_raml(uint32_t addr, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Read L %08X from %08X\n", *(uint32_t *) &ram[addr], addr);
#endif
if (is286)
2022-09-18 17:18:07 -04:00
addreadlookup(mem_logical_addr, addr);
2022-09-18 17:18:07 -04:00
return *(uint32_t *) &ram[addr];
}
uint8_t
mem_read_ram_2gb(uint32_t addr, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Read B %02X from %08X\n", ram[addr], addr);
#endif
addreadlookup(mem_logical_addr, addr);
return ram2[addr - (1 << 30)];
}
uint16_t
mem_read_ram_2gbw(uint32_t addr, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Read W %04X from %08X\n", *(uint16_t *) &ram[addr], addr);
#endif
addreadlookup(mem_logical_addr, addr);
2022-09-18 17:18:07 -04:00
return *(uint16_t *) &ram2[addr - (1 << 30)];
}
uint32_t
mem_read_ram_2gbl(uint32_t addr, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Read L %08X from %08X\n", *(uint32_t *) &ram[addr], addr);
#endif
addreadlookup(mem_logical_addr, addr);
2022-09-18 17:18:07 -04:00
return *(uint32_t *) &ram2[addr - (1 << 30)];
}
#ifdef USE_NEW_DYNAREC
static inline int
page_index(page_t *p)
{
2022-09-18 17:18:07 -04:00
return ((uintptr_t) p - (uintptr_t) pages) / sizeof(page_t);
}
void
page_add_to_evict_list(page_t *p)
{
pages[purgable_page_list_head].evict_prev = page_index(p);
2022-09-18 17:18:07 -04:00
p->evict_next = purgable_page_list_head;
p->evict_prev = 0;
purgable_page_list_head = pages[purgable_page_list_head].evict_prev;
purgeable_page_count++;
}
void
page_remove_from_evict_list(page_t *p)
{
if (!page_in_evict_list(p))
2022-09-18 17:18:07 -04:00
fatal("page_remove_from_evict_list: not in evict list!\n");
if (p->evict_prev)
2022-09-18 17:18:07 -04:00
pages[p->evict_prev].evict_next = p->evict_next;
else
2022-09-18 17:18:07 -04:00
purgable_page_list_head = p->evict_next;
if (p->evict_next)
2022-09-18 17:18:07 -04:00
pages[p->evict_next].evict_prev = p->evict_prev;
p->evict_prev = EVICT_NOT_IN_LIST;
2022-09-18 17:18:07 -04:00
purgeable_page_count--;
}
void
mem_write_ramb_page(uint32_t addr, uint8_t val, page_t *p)
{
if (p == NULL)
2022-09-18 17:18:07 -04:00
return;
2022-09-18 17:18:07 -04:00
# ifdef USE_DYNAREC
if ((p->mem == NULL) || (p->mem == page_ff) || (val != p->mem[addr & 0xfff]) || codegen_in_recompile) {
2022-09-18 17:18:07 -04:00
# else
if ((p->mem == NULL) || (p->mem == page_ff) || (val != p->mem[addr & 0xfff])) {
2022-09-18 17:18:07 -04:00
# endif
uint64_t mask = (uint64_t) 1 << ((addr >> PAGE_MASK_SHIFT) & PAGE_MASK_MASK);
int byte_offset = (addr >> PAGE_BYTE_MASK_SHIFT) & PAGE_BYTE_MASK_OFFSET_MASK;
uint64_t byte_mask = (uint64_t) 1 << (addr & PAGE_BYTE_MASK_MASK);
2022-09-18 17:18:07 -04:00
p->mem[addr & 0xfff] = val;
p->dirty_mask |= mask;
if ((p->code_present_mask & mask) && !page_in_evict_list(p))
page_add_to_evict_list(p);
p->byte_dirty_mask[byte_offset] |= byte_mask;
if ((p->byte_code_present_mask[byte_offset] & byte_mask) && !page_in_evict_list(p))
page_add_to_evict_list(p);
}
}
void
mem_write_ramw_page(uint32_t addr, uint16_t val, page_t *p)
{
if (p == NULL)
2022-09-18 17:18:07 -04:00
return;
2022-09-18 17:18:07 -04:00
# ifdef USE_DYNAREC
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint16_t *) &p->mem[addr & 0xfff]) || codegen_in_recompile) {
# else
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint16_t *) &p->mem[addr & 0xfff])) {
# endif
uint64_t mask = (uint64_t) 1 << ((addr >> PAGE_MASK_SHIFT) & PAGE_MASK_MASK);
int byte_offset = (addr >> PAGE_BYTE_MASK_SHIFT) & PAGE_BYTE_MASK_OFFSET_MASK;
uint64_t byte_mask = (uint64_t) 1 << (addr & PAGE_BYTE_MASK_MASK);
2022-09-18 17:18:07 -04:00
if ((addr & 0xf) == 0xf)
mask |= (mask << 1);
*(uint16_t *) &p->mem[addr & 0xfff] = val;
p->dirty_mask |= mask;
if ((p->code_present_mask & mask) && !page_in_evict_list(p))
page_add_to_evict_list(p);
if ((addr & PAGE_BYTE_MASK_MASK) == PAGE_BYTE_MASK_MASK) {
p->byte_dirty_mask[byte_offset + 1] |= 1;
if ((p->byte_code_present_mask[byte_offset + 1] & 1) && !page_in_evict_list(p))
page_add_to_evict_list(p);
} else
byte_mask |= (byte_mask << 1);
2022-09-18 17:18:07 -04:00
p->byte_dirty_mask[byte_offset] |= byte_mask;
2022-09-18 17:18:07 -04:00
if ((p->byte_code_present_mask[byte_offset] & byte_mask) && !page_in_evict_list(p))
page_add_to_evict_list(p);
}
}
void
mem_write_raml_page(uint32_t addr, uint32_t val, page_t *p)
{
if (p == NULL)
2022-09-18 17:18:07 -04:00
return;
# ifdef USE_DYNAREC
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint32_t *) &p->mem[addr & 0xfff]) || codegen_in_recompile) {
# else
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint32_t *) &p->mem[addr & 0xfff])) {
# endif
uint64_t mask = (uint64_t) 1 << ((addr >> PAGE_MASK_SHIFT) & PAGE_MASK_MASK);
int byte_offset = (addr >> PAGE_BYTE_MASK_SHIFT) & PAGE_BYTE_MASK_OFFSET_MASK;
uint64_t byte_mask = (uint64_t) 0xf << (addr & PAGE_BYTE_MASK_MASK);
if ((addr & 0xf) >= 0xd)
mask |= (mask << 1);
*(uint32_t *) &p->mem[addr & 0xfff] = val;
p->dirty_mask |= mask;
p->byte_dirty_mask[byte_offset] |= byte_mask;
if (!page_in_evict_list(p) && ((p->code_present_mask & mask) || (p->byte_code_present_mask[byte_offset] & byte_mask)))
page_add_to_evict_list(p);
if ((addr & PAGE_BYTE_MASK_MASK) > (PAGE_BYTE_MASK_MASK - 3)) {
uint32_t byte_mask_2 = 0xf >> (4 - (addr & 3));
p->byte_dirty_mask[byte_offset + 1] |= byte_mask_2;
if ((p->byte_code_present_mask[byte_offset + 1] & byte_mask_2) && !page_in_evict_list(p))
page_add_to_evict_list(p);
}
}
}
#else
void
mem_write_ramb_page(uint32_t addr, uint8_t val, page_t *p)
{
if (p == NULL)
2022-09-18 17:18:07 -04:00
return;
2022-09-18 17:18:07 -04:00
# ifdef USE_DYNAREC
if ((p->mem == NULL) || (p->mem == page_ff) || (val != p->mem[addr & 0xfff]) || codegen_in_recompile) {
2022-09-18 17:18:07 -04:00
# else
if ((p->mem == NULL) || (p->mem == page_ff) || (val != p->mem[addr & 0xfff])) {
2022-09-18 17:18:07 -04:00
# endif
uint64_t mask = (uint64_t) 1 << ((addr >> PAGE_MASK_SHIFT) & PAGE_MASK_MASK);
p->dirty_mask[(addr >> PAGE_MASK_INDEX_SHIFT) & PAGE_MASK_INDEX_MASK] |= mask;
p->mem[addr & 0xfff] = val;
}
}
void
mem_write_ramw_page(uint32_t addr, uint16_t val, page_t *p)
{
if (p == NULL)
2022-09-18 17:18:07 -04:00
return;
2022-09-18 17:18:07 -04:00
# ifdef USE_DYNAREC
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint16_t *) &p->mem[addr & 0xfff]) || codegen_in_recompile) {
# else
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint16_t *) &p->mem[addr & 0xfff])) {
# endif
uint64_t mask = (uint64_t) 1 << ((addr >> PAGE_MASK_SHIFT) & PAGE_MASK_MASK);
if ((addr & 0xf) == 0xf)
mask |= (mask << 1);
p->dirty_mask[(addr >> PAGE_MASK_INDEX_SHIFT) & PAGE_MASK_INDEX_MASK] |= mask;
*(uint16_t *) &p->mem[addr & 0xfff] = val;
}
}
void
mem_write_raml_page(uint32_t addr, uint32_t val, page_t *p)
{
if (p == NULL)
2022-09-18 17:18:07 -04:00
return;
2022-09-18 17:18:07 -04:00
# ifdef USE_DYNAREC
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint32_t *) &p->mem[addr & 0xfff]) || codegen_in_recompile) {
# else
if ((p->mem == NULL) || (p->mem == page_ff) || (val != *(uint32_t *) &p->mem[addr & 0xfff])) {
# endif
uint64_t mask = (uint64_t) 1 << ((addr >> PAGE_MASK_SHIFT) & PAGE_MASK_MASK);
if ((addr & 0xf) >= 0xd)
mask |= (mask << 1);
p->dirty_mask[(addr >> PAGE_MASK_INDEX_SHIFT) & PAGE_MASK_INDEX_MASK] |= mask;
*(uint32_t *) &p->mem[addr & 0xfff] = val;
}
}
#endif
void
mem_write_ram(uint32_t addr, uint8_t val, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Write B %02X to %08X\n", val, addr);
#endif
2022-02-20 02:26:27 -05:00
if (is286) {
2022-09-18 17:18:07 -04:00
addwritelookup(mem_logical_addr, addr);
mem_write_ramb_page(addr, val, &pages[addr >> 12]);
} else
2022-09-18 17:18:07 -04:00
ram[addr] = val;
}
void
mem_write_ramw(uint32_t addr, uint16_t val, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Write W %04X to %08X\n", val, addr);
#endif
if (is286) {
2022-09-18 17:18:07 -04:00
addwritelookup(mem_logical_addr, addr);
mem_write_ramw_page(addr, val, &pages[addr >> 12]);
} else
2022-09-18 17:18:07 -04:00
*(uint16_t *) &ram[addr] = val;
}
void
mem_write_raml(uint32_t addr, uint32_t val, void *priv)
{
#ifdef ENABLE_MEM_LOG
if ((addr >= 0xa0000) && (addr <= 0xbffff))
2022-09-18 17:18:07 -04:00
mem_log("Write L %08X to %08X\n", val, addr);
#endif
if (is286) {
2022-09-18 17:18:07 -04:00
addwritelookup(mem_logical_addr, addr);
mem_write_raml_page(addr, val, &pages[addr >> 12]);
} else
2022-09-18 17:18:07 -04:00
*(uint32_t *) &ram[addr] = val;
}
static uint8_t
mem_read_remapped(uint32_t addr, void *priv)
{
addr = 0xA0000 + (addr - remap_start_addr);
if (is286)
2022-09-18 17:18:07 -04:00
addreadlookup(mem_logical_addr, addr);
return ram[addr];
}
static uint16_t
mem_read_remappedw(uint32_t addr, void *priv)
{
addr = 0xA0000 + (addr - remap_start_addr);
if (is286)
2022-09-18 17:18:07 -04:00
addreadlookup(mem_logical_addr, addr);
return *(uint16_t *) &ram[addr];
}
static uint32_t
mem_read_remappedl(uint32_t addr, void *priv)
{
addr = 0xA0000 + (addr - remap_start_addr);
if (is286)
2022-09-18 17:18:07 -04:00
addreadlookup(mem_logical_addr, addr);
return *(uint32_t *) &ram[addr];
}
static uint8_t
mem_read_remapped2(uint32_t addr, void *priv)
{
addr = 0xD0000 + (addr - remap_start_addr2);
if (is286)
addreadlookup(mem_logical_addr, addr);
return ram[addr];
}
static uint16_t
mem_read_remappedw2(uint32_t addr, void *priv)
{
addr = 0xD0000 + (addr - remap_start_addr2);
if (is286)
addreadlookup(mem_logical_addr, addr);
return *(uint16_t *) &ram[addr];
}
static uint32_t
mem_read_remappedl2(uint32_t addr, void *priv)
{
addr = 0xD0000 + (addr - remap_start_addr2);
if (is286)
addreadlookup(mem_logical_addr, addr);
return *(uint32_t *) &ram[addr];
}
static void
mem_write_remapped(uint32_t addr, uint8_t val, void *priv)
{
uint32_t oldaddr = addr;
2022-09-18 17:18:07 -04:00
addr = 0xA0000 + (addr - remap_start_addr);
if (is286) {
2022-09-18 17:18:07 -04:00
addwritelookup(mem_logical_addr, addr);
mem_write_ramb_page(addr, val, &pages[oldaddr >> 12]);
} else
2022-09-18 17:18:07 -04:00
ram[addr] = val;
}
static void
mem_write_remappedw(uint32_t addr, uint16_t val, void *priv)
{
uint32_t oldaddr = addr;
2022-09-18 17:18:07 -04:00
addr = 0xA0000 + (addr - remap_start_addr);
if (is286) {
2022-09-18 17:18:07 -04:00
addwritelookup(mem_logical_addr, addr);
mem_write_ramw_page(addr, val, &pages[oldaddr >> 12]);
} else
2022-09-18 17:18:07 -04:00
*(uint16_t *) &ram[addr] = val;
}
static void
mem_write_remappedl(uint32_t addr, uint32_t val, void *priv)
{
uint32_t oldaddr = addr;
2022-09-18 17:18:07 -04:00
addr = 0xA0000 + (addr - remap_start_addr);
if (is286) {
2022-09-18 17:18:07 -04:00
addwritelookup(mem_logical_addr, addr);
mem_write_raml_page(addr, val, &pages[oldaddr >> 12]);
} else
2022-09-18 17:18:07 -04:00
*(uint32_t *) &ram[addr] = val;
}
static void
mem_write_remapped2(uint32_t addr, uint8_t val, void *priv)
{
uint32_t oldaddr = addr;
addr = 0xD0000 + (addr - remap_start_addr2);
if (is286) {
addwritelookup(mem_logical_addr, addr);
mem_write_ramb_page(addr, val, &pages[oldaddr >> 12]);
} else
ram[addr] = val;
}
static void
mem_write_remappedw2(uint32_t addr, uint16_t val, void *priv)
{
uint32_t oldaddr = addr;
addr = 0xD0000 + (addr - remap_start_addr2);
if (is286) {
addwritelookup(mem_logical_addr, addr);
mem_write_ramw_page(addr, val, &pages[oldaddr >> 12]);
} else
*(uint16_t *) &ram[addr] = val;
}
static void
mem_write_remappedl2(uint32_t addr, uint32_t val, void *priv)
{
uint32_t oldaddr = addr;
addr = 0xD0000 + (addr - remap_start_addr2);
if (is286) {
addwritelookup(mem_logical_addr, addr);
mem_write_raml_page(addr, val, &pages[oldaddr >> 12]);
} else
*(uint32_t *) &ram[addr] = val;
}
void
mem_invalidate_range(uint32_t start_addr, uint32_t end_addr)
{
#ifdef USE_NEW_DYNAREC
page_t *p;
start_addr &= ~PAGE_MASK_MASK;
2022-02-20 02:26:27 -05:00
end_addr = (end_addr + PAGE_MASK_MASK) & ~PAGE_MASK_MASK;
for (; start_addr <= end_addr; start_addr += 0x1000) {
2022-09-18 17:18:07 -04:00
if ((start_addr >> 12) >= pages_sz)
continue;
2022-09-18 17:18:07 -04:00
p = &pages[start_addr >> 12];
if (p) {
p->dirty_mask = 0xffffffffffffffffULL;
2022-09-18 17:18:07 -04:00
if (p->byte_dirty_mask)
memset(p->byte_dirty_mask, 0xff, 64 * sizeof(uint64_t));
2022-09-18 17:18:07 -04:00
if (!page_in_evict_list(p))
page_add_to_evict_list(p);
}
}
#else
uint32_t cur_addr;
start_addr &= ~PAGE_MASK_MASK;
2022-02-20 02:26:27 -05:00
end_addr = (end_addr + PAGE_MASK_MASK) & ~PAGE_MASK_MASK;
for (; start_addr <= end_addr; start_addr += 0x1000) {
2022-09-18 17:18:07 -04:00
/* Do nothing if the pages array is empty or DMA reads/writes to/from PCI device memory addresses
may crash the emulator. */
cur_addr = (start_addr >> 12);
if (cur_addr < pages_sz)
memset(pages[cur_addr].dirty_mask, 0xff, sizeof(pages[cur_addr].dirty_mask));
}
#endif
}
static __inline int
mem_mapping_access_allowed(uint32_t flags, uint16_t access)
{
int ret = 0;
if (!(access & ACCESS_DISABLED)) {
2022-09-18 17:18:07 -04:00
if (access & ACCESS_CACHE)
ret = (flags & MEM_MAPPING_CACHE);
else if (access & ACCESS_SMRAM)
ret = (flags & MEM_MAPPING_SMRAM);
else if (!(access & ACCESS_INTERNAL)) {
if (flags & MEM_MAPPING_IS_ROM) {
if (access & ACCESS_ROMCS)
ret = (flags & MEM_MAPPING_ROMCS);
else
ret = !(flags & MEM_MAPPING_ROMCS);
} else
ret = 1;
ret = ret && !(flags & MEM_MAPPING_INTERNAL) && !(flags & MEM_MAPPING_SMRAM);
} else
ret = !(flags & MEM_MAPPING_EXTERNAL) && !(flags & MEM_MAPPING_SMRAM);
} else {
2022-09-18 17:18:07 -04:00
/* Still allow SMRAM if access is DISABLED but also has CACHE and/or SMRAM flags set. */
if (access & ACCESS_CACHE)
ret = (flags & MEM_MAPPING_CACHE);
else if (access & ACCESS_SMRAM)
ret = (flags & MEM_MAPPING_SMRAM);
}
return ret;
}
void
mem_mapping_recalc(uint64_t base, uint64_t size)
{
mem_mapping_t *map;
2022-09-18 17:18:07 -04:00
int n;
uint64_t c;
if (!size || (base_mapping == NULL))
2022-09-18 17:18:07 -04:00
return;
map = base_mapping;
/* Clear out old mappings. */
for (c = base; c < base + size; c += MEM_GRANULARITY_SIZE) {
2022-09-18 17:18:07 -04:00
_mem_exec[c >> MEM_GRANULARITY_BITS] = NULL;
write_mapping[c >> MEM_GRANULARITY_BITS] = NULL;
read_mapping[c >> MEM_GRANULARITY_BITS] = NULL;
write_mapping_bus[c >> MEM_GRANULARITY_BITS] = NULL;
read_mapping_bus[c >> MEM_GRANULARITY_BITS] = NULL;
}
/* Walk mapping list. */
while (map != NULL) {
2022-09-18 17:18:07 -04:00
/* In range? */
if (map->enable && (uint64_t) map->base < ((uint64_t) base + (uint64_t) size) && ((uint64_t) map->base + (uint64_t) map->size) > (uint64_t) base) {
uint64_t start = (map->base < base) ? map->base : base;
uint64_t end = (((uint64_t) map->base + (uint64_t) map->size) < (base + size)) ? ((uint64_t) map->base + (uint64_t) map->size) : (base + size);
if (start < map->base)
start = map->base;
for (c = start; c < end; c += MEM_GRANULARITY_SIZE) {
/* CPU */
n = !!in_smm;
if (map->exec && mem_mapping_access_allowed(map->flags, _mem_state[c >> MEM_GRANULARITY_BITS].states[n].x))
_mem_exec[c >> MEM_GRANULARITY_BITS] = map->exec + (c - map->base);
if ((map->write_b || map->write_w || map->write_l) && mem_mapping_access_allowed(map->flags, _mem_state[c >> MEM_GRANULARITY_BITS].states[n].w))
write_mapping[c >> MEM_GRANULARITY_BITS] = map;
if ((map->read_b || map->read_w || map->read_l) && mem_mapping_access_allowed(map->flags, _mem_state[c >> MEM_GRANULARITY_BITS].states[n].r))
read_mapping[c >> MEM_GRANULARITY_BITS] = map;
/* Bus */
n |= STATE_BUS;
if ((map->write_b || map->write_w || map->write_l) && mem_mapping_access_allowed(map->flags, _mem_state[c >> MEM_GRANULARITY_BITS].states[n].w))
write_mapping_bus[c >> MEM_GRANULARITY_BITS] = map;
if ((map->read_b || map->read_w || map->read_l) && mem_mapping_access_allowed(map->flags, _mem_state[c >> MEM_GRANULARITY_BITS].states[n].r))
read_mapping_bus[c >> MEM_GRANULARITY_BITS] = map;
}
}
map = map->next;
}
flushmmucache_cr3();
}
void
mem_mapping_set(mem_mapping_t *map,
2022-09-18 17:18:07 -04:00
uint32_t base,
uint32_t size,
uint8_t (*read_b)(uint32_t addr, void *p),
uint16_t (*read_w)(uint32_t addr, void *p),
uint32_t (*read_l)(uint32_t addr, void *p),
void (*write_b)(uint32_t addr, uint8_t val, void *p),
void (*write_w)(uint32_t addr, uint16_t val, void *p),
void (*write_l)(uint32_t addr, uint32_t val, void *p),
uint8_t *exec,
uint32_t fl,
void *p)
{
if (size != 0x00000000)
2022-09-18 17:18:07 -04:00
map->enable = 1;
else
2022-09-18 17:18:07 -04:00
map->enable = 0;
map->base = base;
map->size = size;
map->mask = (map->size ? 0xffffffff : 0x00000000);
map->read_b = read_b;
map->read_w = read_w;
map->read_l = read_l;
map->write_b = write_b;
map->write_w = write_w;
map->write_l = write_l;
map->exec = exec;
map->flags = fl;
map->p = p;
map->next = NULL;
mem_log("mem_mapping_add(): Linked list structure: %08X -> %08X -> %08X\n", map->prev, map, map->next);
/* If the mapping is disabled, there is no need to recalc anything. */
if (size != 0x00000000)
2022-09-18 17:18:07 -04:00
mem_mapping_recalc(map->base, map->size);
}
void
mem_mapping_add(mem_mapping_t *map,
2022-09-18 17:18:07 -04:00
uint32_t base,
uint32_t size,
uint8_t (*read_b)(uint32_t addr, void *p),
uint16_t (*read_w)(uint32_t addr, void *p),
uint32_t (*read_l)(uint32_t addr, void *p),
void (*write_b)(uint32_t addr, uint8_t val, void *p),
void (*write_w)(uint32_t addr, uint16_t val, void *p),
void (*write_l)(uint32_t addr, uint32_t val, void *p),
uint8_t *exec,
uint32_t fl,
void *p)
{
/* Do a sanity check */
if ((base_mapping == NULL) && (last_mapping != NULL)) {
2022-09-18 17:18:07 -04:00
fatal("mem_mapping_add(): NULL base mapping with non-NULL last mapping\n");
return;
} else if ((base_mapping != NULL) && (last_mapping == NULL)) {
2022-09-18 17:18:07 -04:00
fatal("mem_mapping_add(): Non-NULL base mapping with NULL last mapping\n");
return;
} else if ((base_mapping != NULL) && (base_mapping->prev != NULL)) {
2022-09-18 17:18:07 -04:00
fatal("mem_mapping_add(): Base mapping with a preceding mapping\n");
return;
} else if ((last_mapping != NULL) && (last_mapping->next != NULL)) {
2022-09-18 17:18:07 -04:00
fatal("mem_mapping_add(): Last mapping with a following mapping\n");
return;
}
/* Add mapping to the beginning of the list if necessary.*/
if (base_mapping == NULL)
2022-09-18 17:18:07 -04:00
base_mapping = map;
/* Add mapping to the end of the list.*/
if (last_mapping == NULL)
2022-09-18 17:18:07 -04:00
map->prev = NULL;
else {
map->prev = last_mapping;
last_mapping->next = map;
}
last_mapping = map;
mem_mapping_set(map, base, size, read_b, read_w, read_l,
2022-09-18 17:18:07 -04:00
write_b, write_w, write_l, exec, fl, p);
}
void
mem_mapping_do_recalc(mem_mapping_t *map)
{
mem_mapping_recalc(map->base, map->size);
}
void
mem_mapping_set_handler(mem_mapping_t *map,
2022-09-18 17:18:07 -04:00
uint8_t (*read_b)(uint32_t addr, void *p),
uint16_t (*read_w)(uint32_t addr, void *p),
uint32_t (*read_l)(uint32_t addr, void *p),
void (*write_b)(uint32_t addr, uint8_t val, void *p),
void (*write_w)(uint32_t addr, uint16_t val, void *p),
void (*write_l)(uint32_t addr, uint32_t val, void *p))
{
map->read_b = read_b;
map->read_w = read_w;
map->read_l = read_l;
map->write_b = write_b;
map->write_w = write_w;
map->write_l = write_l;
mem_mapping_recalc(map->base, map->size);
}
void
mem_mapping_set_addr(mem_mapping_t *map, uint32_t base, uint32_t size)
{
/* Remove old mapping. */
map->enable = 0;
mem_mapping_recalc(map->base, map->size);
/* Set new mapping. */
map->enable = 1;
2022-09-18 17:18:07 -04:00
map->base = base;
map->size = size;
mem_mapping_recalc(map->base, map->size);
}
void
mem_mapping_set_exec(mem_mapping_t *map, uint8_t *exec)
{
map->exec = exec;
mem_mapping_recalc(map->base, map->size);
}
void
mem_mapping_set_mask(mem_mapping_t *map, uint32_t mask)
{
map->mask = mask;
mem_mapping_recalc(map->base, map->size);
}
void
mem_mapping_set_p(mem_mapping_t *map, void *p)
{
map->p = p;
}
void
mem_mapping_disable(mem_mapping_t *map)
{
map->enable = 0;
mem_mapping_recalc(map->base, map->size);
}
void
mem_mapping_enable(mem_mapping_t *map)
{
map->enable = 1;
mem_mapping_recalc(map->base, map->size);
}
void
mem_set_access(uint8_t bitmap, int mode, uint32_t base, uint32_t size, uint16_t access)
{
2022-09-18 17:18:07 -04:00
uint32_t c;
uint16_t mask, smstate = 0x0000;
const uint16_t smstates[4] = { 0x0000, (MEM_READ_SMRAM | MEM_WRITE_SMRAM),
MEM_READ_SMRAM_EX, (MEM_READ_DISABLED_EX | MEM_WRITE_DISABLED_EX) };
int i;
if (mode)
2022-09-18 17:18:07 -04:00
mask = 0x2d6b;
else
2022-09-18 17:18:07 -04:00
mask = 0x1084;
if (mode) {
2022-09-18 17:18:07 -04:00
if (mode == 1)
access = !!access;
if (mode == 3) {
if (access & ACCESS_SMRAM_X)
smstate |= MEM_EXEC_SMRAM;
if (access & ACCESS_SMRAM_R)
smstate |= MEM_READ_SMRAM_2;
if (access & ACCESS_SMRAM_W)
smstate |= MEM_WRITE_SMRAM;
} else
smstate = smstates[access & 0x07];
} else
2022-09-18 17:18:07 -04:00
smstate = access & 0x6f7b;
for (c = 0; c < size; c += MEM_GRANULARITY_SIZE) {
2022-09-18 17:18:07 -04:00
for (i = 0; i < 4; i++) {
if (bitmap & (1 << i)) {
_mem_state[(c + base) >> MEM_GRANULARITY_BITS].vals[i] = (_mem_state[(c + base) >> MEM_GRANULARITY_BITS].vals[i] & mask) | smstate;
}
}
#ifdef ENABLE_MEM_LOG
2022-09-18 17:18:07 -04:00
if (((c + base) >= 0xa0000) && ((c + base) <= 0xbffff)) {
mem_log("Set mem state for block at %08X to %04X with bitmap %02X\n",
c + base, smstate, bitmap);
}
#endif
}
mem_mapping_recalc(base, size);
}
void
mem_a20_init(void)
{
if (is286) {
2022-09-18 17:18:07 -04:00
rammask = cpu_16bitbus ? 0xefffff : 0xffefffff;
if (is6117)
rammask |= 0x03000000;
flushmmucache();
mem_a20_state = mem_a20_key | mem_a20_alt;
} else {
2022-09-18 17:18:07 -04:00
rammask = 0xfffff;
flushmmucache();
mem_a20_key = mem_a20_alt = mem_a20_state = 0;
}
}
/* Close all the memory mappings. */
void
mem_close(void)
{
mem_mapping_t *map = base_mapping, *next;
while (map != NULL) {
2022-09-18 17:18:07 -04:00
next = map->next;
map->prev = map->next = NULL;
map = next;
}
base_mapping = last_mapping = 0;
}
static void
mem_add_ram_mapping(mem_mapping_t *mapping, uint32_t base, uint32_t size)
{
mem_mapping_add(mapping, base, size,
2022-09-18 17:18:07 -04:00
mem_read_ram, mem_read_ramw, mem_read_raml,
mem_write_ram, mem_write_ramw, mem_write_raml,
ram + base, MEM_MAPPING_INTERNAL, NULL);
}
static void
mem_init_ram_mapping(mem_mapping_t *mapping, uint32_t base, uint32_t size)
{
mem_set_mem_state_both(base, size, MEM_READ_INTERNAL | MEM_WRITE_INTERNAL);
mem_add_ram_mapping(mapping, base, size);
}
/* Reset the memory state. */
void
mem_reset(void)
{
uint32_t c, m;
memset(page_ff, 0xff, sizeof(page_ff));
2021-06-07 00:17:29 +02:00
#ifdef USE_NEW_DYNAREC
if (byte_dirty_mask) {
2022-09-18 17:18:07 -04:00
free(byte_dirty_mask);
byte_dirty_mask = NULL;
2021-06-07 00:17:29 +02:00
}
if (byte_code_present_mask) {
2022-09-18 17:18:07 -04:00
free(byte_code_present_mask);
byte_code_present_mask = NULL;
2021-06-07 00:17:29 +02:00
}
#endif
/* Free the old pages array, if necessary. */
if (pages) {
2022-09-18 17:18:07 -04:00
free(pages);
pages = NULL;
2021-06-07 00:17:29 +02:00
}
2020-07-16 01:14:24 +02:00
if (ram != NULL) {
2022-09-18 17:18:07 -04:00
plat_munmap(ram, ram_size);
ram = NULL;
ram_size = 0;
}
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
if (ram2 != NULL) {
2022-09-18 17:18:07 -04:00
plat_munmap(ram2, ram2_size);
ram2 = NULL;
ram2_size = 0;
}
2021-06-07 00:17:29 +02:00
if (mem_size > 2097152)
2022-09-18 17:18:07 -04:00
mem_size = 2097152;
#endif
2021-06-07 00:17:29 +02:00
m = 1024UL * mem_size;
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
if (mem_size > 1048576) {
2022-09-18 17:18:07 -04:00
ram_size = 1 << 30;
ram = (uint8_t *) plat_mmap(ram_size, 0); /* allocate and clear the RAM block of the first 1 GB */
if (ram == NULL) {
fatal("Failed to allocate primary RAM block. Make sure you have enough RAM available.\n");
return;
}
memset(ram, 0x00, ram_size);
ram2_size = m - (1 << 30);
ram2 = (uint8_t *) plat_mmap(ram2_size, 0); /* allocate and clear the RAM block above 1 GB */
if (ram2 == NULL) {
if (config_changed == 2)
fatal(EMU_NAME " must be restarted for the memory amount change to be applied.\n");
else
fatal("Failed to allocate secondary RAM block. Make sure you have enough RAM available.\n");
return;
}
memset(ram2, 0x00, ram2_size);
} else
#endif
{
2022-09-18 17:18:07 -04:00
ram_size = m;
ram = (uint8_t *) plat_mmap(ram_size, 0); /* allocate and clear the RAM block */
if (ram == NULL) {
fatal("Failed to allocate RAM block. Make sure you have enough RAM available.\n");
return;
}
memset(ram, 0x00, ram_size);
if (mem_size > 1048576)
ram2 = &(ram[1 << 30]);
2020-07-19 05:54:09 +02:00
}
/*
* Allocate the page table based on how much RAM we have.
* We re-allocate the table on each (hard) reset, as the
* memory amount could have changed.
*/
if (is286) {
2022-09-18 17:18:07 -04:00
if (cpu_16bitbus) {
/* 80286/386SX; maximum address space is 16MB. */
m = 4096;
/* ALi M6117; maximum address space is 64MB. */
if (is6117)
m <<= 2;
} else {
/* 80386DX+; maximum address space is 4GB. */
m = 1048576;
}
} else {
2022-09-18 17:18:07 -04:00
/* 8088/86; maximum address space is 1MB. */
m = 256;
}
addr_space_size = m;
/*
* Allocate and initialize the (new) page table.
*/
2021-06-07 00:17:29 +02:00
pages_sz = m;
2022-09-18 17:18:07 -04:00
pages = (page_t *) malloc(m * sizeof(page_t));
memset(page_lookup, 0x00, (1 << 20) * sizeof(page_t *));
memset(page_lookupp, 0x04, (1 << 20) * sizeof(uint8_t));
2022-09-18 17:18:07 -04:00
memset(pages, 0x00, pages_sz * sizeof(page_t));
#ifdef USE_NEW_DYNAREC
byte_dirty_mask = malloc((mem_size * 1024) / 8);
memset(byte_dirty_mask, 0, (mem_size * 1024) / 8);
byte_code_present_mask = malloc((mem_size * 1024) / 8);
memset(byte_code_present_mask, 0, (mem_size * 1024) / 8);
#endif
for (c = 0; c < pages_sz; c++) {
2022-09-18 17:18:07 -04:00
if ((c << 12) >= (mem_size << 10))
pages[c].mem = page_ff;
else {
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
2022-09-18 17:18:07 -04:00
if (mem_size > 1048576) {
if ((c << 12) < (1 << 30))
pages[c].mem = &ram[c << 12];
else
pages[c].mem = &ram2[(c << 12) - (1 << 30)];
} else
pages[c].mem = &ram[c << 12];
#else
2022-09-18 17:18:07 -04:00
pages[c].mem = &ram[c << 12];
#endif
2022-09-18 17:18:07 -04:00
}
if (c < m) {
pages[c].write_b = mem_write_ramb_page;
pages[c].write_w = mem_write_ramw_page;
pages[c].write_l = mem_write_raml_page;
}
#ifdef USE_NEW_DYNAREC
2022-09-18 17:18:07 -04:00
pages[c].evict_prev = EVICT_NOT_IN_LIST;
pages[c].byte_dirty_mask = &byte_dirty_mask[c * 64];
pages[c].byte_code_present_mask = &byte_code_present_mask[c * 64];
#endif
}
2022-09-18 17:18:07 -04:00
memset(_mem_exec, 0x00, sizeof(_mem_exec));
memset(write_mapping, 0x00, sizeof(write_mapping));
memset(read_mapping, 0x00, sizeof(read_mapping));
memset(write_mapping_bus, 0x00, sizeof(write_mapping_bus));
2022-09-18 17:18:07 -04:00
memset(read_mapping_bus, 0x00, sizeof(read_mapping_bus));
base_mapping = last_mapping = NULL;
/* Set the entire memory space as external. */
memset(_mem_state, 0x00, sizeof(_mem_state));
/* Set the low RAM space as internal. */
mem_init_ram_mapping(&ram_low_mapping, 0x000000, (mem_size > 640) ? 0xa0000 : mem_size * 1024);
if (mem_size > 1024) {
2022-09-18 17:18:07 -04:00
if (cpu_16bitbus && !is6117 && mem_size > 16256)
mem_init_ram_mapping(&ram_high_mapping, 0x100000, (16256 - 1024) * 1024);
else if (cpu_16bitbus && is6117 && mem_size > 65408)
mem_init_ram_mapping(&ram_high_mapping, 0x100000, (65408 - 1024) * 1024);
else {
if (mem_size > 1048576) {
mem_init_ram_mapping(&ram_high_mapping, 0x100000, (1048576 - 1024) * 1024);
mem_set_mem_state_both((1 << 30), (mem_size - 1048576) * 1024,
MEM_READ_INTERNAL | MEM_WRITE_INTERNAL);
mem_mapping_add(&ram_2gb_mapping, (1 << 30),
((mem_size - 1048576) * 1024),
mem_read_ram_2gb, mem_read_ram_2gbw, mem_read_ram_2gbl,
mem_write_ram, mem_write_ramw, mem_write_raml,
ram2, MEM_MAPPING_INTERNAL, NULL);
} else
mem_init_ram_mapping(&ram_high_mapping, 0x100000, (mem_size - 1024) * 1024);
}
}
if (mem_size > 768) {
2022-09-18 17:18:07 -04:00
mem_add_ram_mapping(&ram_mid_mapping, 0xa0000, 0x60000);
mem_add_ram_mapping(&ram_mid_mapping2, 0xa0000, 0x60000);
mem_mapping_disable(&ram_mid_mapping2);
}
mem_mapping_add(&ram_remapped_mapping, mem_size * 1024, 256 * 1024,
2022-09-18 17:18:07 -04:00
mem_read_remapped, mem_read_remappedw, mem_read_remappedl,
mem_write_remapped, mem_write_remappedw, mem_write_remappedl,
ram + 0xa0000, MEM_MAPPING_INTERNAL, NULL);
mem_mapping_disable(&ram_remapped_mapping);
/* Mapping for SiS 471 relocation which relocates A0000-BFFFF, D0000-EFFFF, which is non-contiguous. */
mem_mapping_add(&ram_remapped_mapping2, mem_size * 1024, 256 * 1024,
mem_read_remapped2, mem_read_remappedw2, mem_read_remappedl2,
mem_write_remapped2, mem_write_remappedw2, mem_write_remappedl2,
ram + 0xd0000, MEM_MAPPING_INTERNAL, NULL);
mem_mapping_disable(&ram_remapped_mapping2);
mem_a20_init();
#ifdef USE_NEW_DYNAREC
purgable_page_list_head = 0;
2022-09-18 17:18:07 -04:00
purgeable_page_count = 0;
#endif
}
void
mem_init(void)
{
/* Perform a one-time init. */
ram = rom = NULL;
2022-09-18 17:18:07 -04:00
ram2 = NULL;
pages = NULL;
/* Allocate the lookup tables. */
2022-09-18 17:18:07 -04:00
page_lookup = (page_t **) malloc((1 << 20) * sizeof(page_t *));
page_lookupp = (uint8_t *) malloc((1 << 20) * sizeof(uint8_t));
readlookup2 = malloc((1 << 20) * sizeof(uintptr_t));
readlookupp = malloc((1 << 20) * sizeof(uint8_t));
writelookup2 = malloc((1 << 20) * sizeof(uintptr_t));
writelookupp = malloc((1 << 20) * sizeof(uint8_t));
}
void
mem_remap_top(int kb)
{
2022-09-18 17:18:07 -04:00
uint32_t c;
uint32_t start = (mem_size >= 1024) ? mem_size : 1024;
int offset, size = mem_size - 640;
int set = 1;
static int old_kb = 0;
int sis_mode = 0;
uint32_t start_addr = 0, addr = 0;
mem_log("MEM: remapping top %iKB (mem=%i)\n", kb, mem_size);
2022-09-18 17:18:07 -04:00
if (mem_size <= 640)
return;
/* SiS 471 special mode. */
if (kb == -256) {
kb = 256;
sis_mode = 1;
}
if (kb == 0) {
2022-09-18 17:18:07 -04:00
kb = old_kb;
set = 0;
} else
2022-09-18 17:18:07 -04:00
old_kb = kb;
2022-02-20 02:26:27 -05:00
if (size > kb)
2022-09-18 17:18:07 -04:00
size = kb;
remap_start_addr = start << 10;
remap_start_addr2 = (start << 10) + 0x00020000;
for (c = ((start * 1024) >> 12); c < (((start + size) * 1024) >> 12); c++) {
2022-09-18 17:18:07 -04:00
offset = c - ((start * 1024) >> 12);
/* Use A0000-BFFFF, D0000-EFFFF instead of C0000-DFFFF, E0000-FFFFF. */
addr = 0xa0000 + (offset << 12);
if (sis_mode) {
/* A0000-DFFFF -> A0000-BFFFF, D0000-EFFFF */
if (addr >= 0x000c0000)
addr += 0x00010000;
}
if (start_addr != 0)
start_addr = addr;
pages[c].mem = set ? &ram[addr] : page_ff;
2022-09-18 17:18:07 -04:00
pages[c].write_b = set ? mem_write_ramb_page : NULL;
pages[c].write_w = set ? mem_write_ramw_page : NULL;
pages[c].write_l = set ? mem_write_raml_page : NULL;
#ifdef USE_NEW_DYNAREC
2022-09-18 17:18:07 -04:00
pages[c].evict_prev = EVICT_NOT_IN_LIST;
pages[c].byte_dirty_mask = &byte_dirty_mask[(addr >> 12) * 64];
pages[c].byte_code_present_mask = &byte_code_present_mask[(addr >> 12) * 64];
#endif
}
2022-09-18 17:18:07 -04:00
mem_set_mem_state_both(start * 1024, size * 1024, set ? (MEM_READ_INTERNAL | MEM_WRITE_INTERNAL) : (MEM_READ_EXTERNAL | MEM_WRITE_EXTERNAL));
for (c = 0xa0; c < 0xf0; c++) {
if ((c >= 0xc0) && (c <= 0xcf))
continue;
if (sis_mode || ((c << 12) >= (mem_size << 10)))
pages[c].mem = page_ff;
else {
#if (!(defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64))
if (mem_size > 1048576) {
if ((c << 12) < (1 << 30))
pages[c].mem = &ram[c << 12];
else
pages[c].mem = &ram2[(c << 12) - (1 << 30)];
} else
pages[c].mem = &ram[c << 12];
#else
pages[c].mem = &ram[c << 12];
#endif
}
if (!sis_mode && (c < addr_space_size)) {
pages[c].write_b = mem_write_ramb_page;
pages[c].write_w = mem_write_ramw_page;
pages[c].write_l = mem_write_raml_page;
} else {
pages[c].write_b = NULL;
pages[c].write_w = NULL;
pages[c].write_l = NULL;
}
#ifdef USE_NEW_DYNAREC
pages[c].evict_prev = EVICT_NOT_IN_LIST;
pages[c].byte_dirty_mask = &byte_dirty_mask[c * 64];
pages[c].byte_code_present_mask = &byte_code_present_mask[c * 64];
#endif
}
if (set) {
if (sis_mode) {
mem_mapping_set_addr(&ram_remapped_mapping, start * 1024, 0x00020000);
mem_mapping_set_exec(&ram_remapped_mapping, ram + 0x000a0000);
mem_mapping_set_addr(&ram_remapped_mapping2, (start * 1024) + 0x00020000, 0x00020000);
mem_mapping_set_exec(&ram_remapped_mapping2, ram + 0x000d0000);
mem_mapping_set_addr(&ram_mid_mapping, 0x000c0000, 0x00010000);
mem_mapping_set_exec(&ram_mid_mapping, ram + 0x000c0000);
mem_mapping_set_addr(&ram_mid_mapping2, 0x000f0000, 0x00010000);
mem_mapping_set_exec(&ram_mid_mapping2, ram + 0x000f0000);
} else {
mem_mapping_set_addr(&ram_remapped_mapping, start * 1024, size * 1024);
mem_mapping_set_exec(&ram_remapped_mapping, ram + start_addr);
mem_mapping_disable(&ram_remapped_mapping2);
mem_mapping_set_addr(&ram_mid_mapping, 0x000a0000, 0x00060000);
mem_mapping_set_exec(&ram_mid_mapping, ram + 0x000a0000);
mem_mapping_disable(&ram_mid_mapping2);
}
} else {
2022-09-18 17:18:07 -04:00
mem_mapping_disable(&ram_remapped_mapping);
mem_mapping_disable(&ram_remapped_mapping2);
mem_mapping_set_addr(&ram_mid_mapping, 0x000a0000, 0x00060000);
mem_mapping_set_exec(&ram_mid_mapping, ram + 0x000a0000);
mem_mapping_disable(&ram_mid_mapping2);
}
flushmmucache();
}
void
mem_reset_page_blocks(void)
{
uint32_t c;
2022-09-18 17:18:07 -04:00
if (pages == NULL)
return;
for (c = 0; c < pages_sz; c++) {
2022-09-18 17:18:07 -04:00
pages[c].write_b = mem_write_ramb_page;
pages[c].write_w = mem_write_ramw_page;
pages[c].write_l = mem_write_raml_page;
#ifdef USE_NEW_DYNAREC
2022-09-18 17:18:07 -04:00
pages[c].block = BLOCK_INVALID;
pages[c].block_2 = BLOCK_INVALID;
pages[c].head = BLOCK_INVALID;
#else
2022-09-18 17:18:07 -04:00
pages[c].block[0] = pages[c].block[1] = pages[c].block[2] = pages[c].block[3] = NULL;
pages[c].block_2[0] = pages[c].block_2[1] = pages[c].block_2[2] = pages[c].block_2[3] = NULL;
pages[c].head = NULL;
#endif
}
}
void
mem_a20_recalc(void)
{
int state;
2022-09-18 17:18:07 -04:00
if (!is286) {
rammask = 0xfffff;
flushmmucache();
mem_a20_key = mem_a20_alt = mem_a20_state = 0;
2022-09-18 17:18:07 -04:00
return;
}
state = mem_a20_key | mem_a20_alt;
if (state && !mem_a20_state) {
2022-09-18 17:18:07 -04:00
rammask = (cpu_16bitbus) ? 0xffffff : 0xffffffff;
if (is6117)
rammask |= 0x03000000;
flushmmucache();
} else if (!state && mem_a20_state) {
2022-09-18 17:18:07 -04:00
rammask = (cpu_16bitbus) ? 0xefffff : 0xffefffff;
if (is6117)
rammask |= 0x03000000;
flushmmucache();
}
mem_a20_state = state;
}