Fixed MSR's on AMD CPU's - the VIA machine now works without issues with the high-speed AMD CPU's.

This commit is contained in:
OBattler
2020-01-16 20:49:58 +01:00
parent 8f17d4ed8d
commit ebf0d1ef3a
7 changed files with 530 additions and 44 deletions

View File

@@ -191,8 +191,9 @@ uint64_t ecx570_msr = 0;
#if defined(DEV_BRANCH) && defined(USE_AMD_K)
uint64_t ecx83_msr = 0; /* AMD K5 and K6 MSR's. */
uint64_t star = 0; /* These are K6-only. */
uint64_t sfmask = 0;
uint64_t star = 0; /* AMD K6-2+. */
uint64_t amd_efer = 0, amd_whcr = 0; /* AMD K6-2+ registers. */
#endif
int timing_rr;
@@ -1193,9 +1194,9 @@ cpu_set(void)
case CPU_K5:
case CPU_5K86:
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_k6_0f, dynarec_ops_386, dynarec_ops_k6_0f);
x86_setopcodes(ops_386, ops_pentiummmx_0f, dynarec_ops_386, dynarec_ops_pentiummmx_0f);
#else
x86_setopcodes(ops_386, ops_k6_0f);
x86_setopcodes(ops_386, ops_pentiummmx_0f);
#endif
timing_rr = 1; /*register dest - register src*/
timing_rm = 2; /*register dest - memory src*/
@@ -1214,9 +1215,9 @@ cpu_set(void)
case CPU_K6:
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_k6_0f, dynarec_ops_386, dynarec_ops_k6_0f);
x86_setopcodes(ops_386, ops_pentiummmx_0f, dynarec_ops_386, dynarec_ops_pentiummmx_0f);
#else
x86_setopcodes(ops_386, ops_k6_0f);
x86_setopcodes(ops_386, ops_pentiummmx_0f);
#endif
timing_rr = 1; /*register dest - register src*/
timing_rm = 2; /*register dest - memory src*/
@@ -1801,6 +1802,24 @@ cpu_CPUID(void)
}
}
void cpu_ven_reset(void)
{
#if defined(DEV_BRANCH) && defined(USE_AMD_K)
switch (machines[machine].cpu[cpu_manufacturer].cpus[cpu_effective].cpu_type)
{
case CPU_K5:
case CPU_5K86:
case CPU_K6:
amd_efer = amd_whcr = 0ULL;
break;
case CPU_K6_2:
amd_efer = amd_whcr = 0ULL;
star = 0ULL;
break;
}
#endif
}
void cpu_RDMSR()
{
switch (machines[machine].cpu[cpu_manufacturer].cpus[cpu_effective].cpu_type)
@@ -1842,24 +1861,57 @@ void cpu_RDMSR()
EAX = EDX = 0;
switch (ECX)
{
case 0x0e:
case 0x0000000e:
EAX = msr.tr12;
break;
case 0x10:
case 0x00000010:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
case 0x83:
case 0x00000083:
EAX = ecx83_msr & 0xffffffff;
EDX = ecx83_msr >> 32;
break;
case 0xC0000080:
EAX = amd_efer & 0xffffffff;
EDX = amd_efer >> 32;
break;
case 0xC0000082:
EAX = amd_whcr & 0xffffffff;
EDX = amd_whcr >> 32;
break;
default:
x86gpf(NULL, 0);
break;
}
break;
case CPU_K6_2:
EAX = EDX = 0;
switch (ECX)
{
case 0x0000000e:
EAX = msr.tr12;
break;
case 0x00000010:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
case 0x00000083:
EAX = ecx83_msr & 0xffffffff;
EDX = ecx83_msr >> 32;
break;
case 0xC0000080:
EAX = amd_efer & 0xffffffff;
EDX = amd_efer >> 32;
break;
case 0xC0000081:
EAX = star & 0xffffffff;
EDX = star >> 32;
break;
case 0xC0000084:
EAX = sfmask & 0xffffffff;
EDX = sfmask >> 32;
case 0xC0000082:
EAX = amd_whcr & 0xffffffff;
EDX = amd_whcr >> 32;
break;
default:
x86gpf(NULL, 0);
@@ -2082,11 +2134,49 @@ void cpu_WRMSR()
case 0x83:
ecx83_msr = EAX | ((uint64_t)EDX << 32);
break;
case 0xC0000080:
temp = EAX | ((uint64_t)EDX << 32);
if (temp & ~1ULL)
x86gpf(NULL, 0);
else
amd_efer = temp;
break;
case 0xC0000082:
amd_whcr = EAX | ((uint64_t)EDX << 32);
break;
default:
x86gpf(NULL, 0);
break;
}
break;
case CPU_K6_2:
switch (ECX)
{
case 0x0e:
msr.tr12 = EAX & 0x228;
break;
case 0x10:
tsc = EAX | ((uint64_t)EDX << 32);
break;
case 0x83:
ecx83_msr = EAX | ((uint64_t)EDX << 32);
break;
case 0xC0000080:
temp = EAX | ((uint64_t)EDX << 32);
if (temp & ~1ULL)
x86gpf(NULL, 0);
else
amd_efer = temp;
break;
case 0xC0000081:
star = EAX | ((uint64_t)EDX << 32);
break;
case 0xC0000084:
sfmask = EAX | ((uint64_t)EDX << 32);
case 0xC0000082:
amd_whcr = EAX | ((uint64_t)EDX << 32);
break;
default:
x86gpf(NULL, 0);
break;
}
break;