diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 82c9b6389d..76546c6651 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1090,7 +1090,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, } } else { /* I/O or ROMD */ - iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; + iotlb = xlat; /* * Writes to romd devices must go through MMIO to enable write. * Reads to romd devices go through the ram_ptr found above, @@ -1141,10 +1141,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, /* * When memory region is ram, iotlb contains a TARGET_PAGE_BITS * aligned ram_addr_t of the page base of the target RAM. - * Otherwise, iotlb contains - * - a physical section number in the lower TARGET_PAGE_BITS - * - the offset within section->mr of the page base (I/O, ROMD) with the - * TARGET_PAGE_BITS masked off. + * Otherwise, iotlb contains a TARGET_PAGE_BITS aligned + * offset within section->mr of the page base (I/O, ROMD) + * * We subtract addr_page (which is page aligned and thus won't * disturb the low bits) to give an offset which can be added to the * (non-page-aligned) vaddr of the eventual memory access to get @@ -1154,7 +1153,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, */ desc->fulltlb[index] = *full; full = &desc->fulltlb[index]; - full->xlat_section = iotlb - addr_page; + full->xlat_offset = iotlb - addr_page; + full->section = section; full->phys_addr = paddr_page; /* Now calculate the new entry */ @@ -1276,8 +1276,8 @@ io_prepare(hwaddr *out_offset, CPUState *cpu, CPUTLBEntryFull *full, MemoryRegionSection *section; hwaddr mr_offset; - section = iotlb_to_section(cpu, full->xlat_section, attrs); - mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; + section = full->section; + mr_offset = full->xlat_offset + addr; cpu->mem_io_pc = retaddr; if (!cpu->neg.can_do_io) { cpu_io_recompile(cpu, retaddr); @@ -1336,7 +1336,7 @@ static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, CPUTLBEntryFull *full, uintptr_t retaddr) { - ram_addr_t ram_addr = mem_vaddr + full->xlat_section; + ram_addr_t ram_addr = mem_vaddr + full->xlat_offset; trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); @@ -1593,9 +1593,7 @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx, /* We must have an iotlb entry for MMIO */ if (tlb_addr & TLB_MMIO) { - MemoryRegionSection *section = - iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK, - full->attrs); + MemoryRegionSection *section = full->section; data->is_io = true; data->mr = section->mr; } else { diff --git a/include/accel/tcg/iommu.h b/include/accel/tcg/iommu.h index 90cfd6c0ed..547f8ea0ef 100644 --- a/include/accel/tcg/iommu.h +++ b/include/accel/tcg/iommu.h @@ -14,18 +14,6 @@ #include "exec/hwaddr.h" #include "exec/memattrs.h" -/** - * iotlb_to_section: - * @cpu: CPU performing the access - * @index: TCG CPU IOTLB entry - * - * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that - * it refers to. @index will have been initially created and returned - * by memory_region_section_get_iotlb(). - */ -MemoryRegionSection *iotlb_to_section(CPUState *cpu, - hwaddr index, MemTxAttrs attrs); - MemoryRegionSection *address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, @@ -34,8 +22,5 @@ MemoryRegionSection *address_space_translate_for_iotlb(CPUState *cpu, MemTxAttrs attrs, int *prot); -hwaddr memory_region_section_get_iotlb(CPUState *cpu, - MemoryRegionSection *section); - #endif diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h index 0d1d46429c..3a9603a696 100644 --- a/include/exec/cputlb.h +++ b/include/exec/cputlb.h @@ -44,8 +44,8 @@ void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); * @full: the details of the tlb entry * * Add an entry to @cpu tlb index @mmu_idx. All of the fields of - * @full must be filled, except for xlat_section, and constitute - * the complete description of the translated page. + * @full must be filled, except for xlat_offset & section, and + * constitute the complete description of the translated page. * * This is generally called by the target tlb_fill function after * having performed a successful page table walk to find the physical diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 61da2ea433..98678704a6 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -219,15 +219,16 @@ typedef uint32_t MMUIdxMap; */ struct CPUTLBEntryFull { /* - * @xlat_section contains: - * - in the lower TARGET_PAGE_BITS, a physical section number - * - with the lower TARGET_PAGE_BITS masked off, an offset which - * must be added to the virtual address to obtain: - * + the ram_addr_t of the target RAM (if the physical section - * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) - * + the offset within the target MemoryRegion (otherwise) + * @xlat_offset: TARGET_PAGE_BITS aligned offset which must be added to + * the virtual address to obtain: + * + the ram_addr_t of the target RAM (if the physical section + * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) + * + the offset within the target MemoryRegion (otherwise) */ - hwaddr xlat_section; + hwaddr xlat_offset; + + /* @section contains physical section. */ + MemoryRegionSection *section; /* * @phys_addr contains the physical address in the address space diff --git a/system/physmem.c b/system/physmem.c index b0311f4531..d17596a77f 100644 --- a/system/physmem.c +++ b/system/physmem.c @@ -747,31 +747,6 @@ translate_fail: return &d->map.sections[PHYS_SECTION_UNASSIGNED]; } -MemoryRegionSection *iotlb_to_section(CPUState *cpu, - hwaddr index, MemTxAttrs attrs) -{ - int asidx = cpu_asidx_from_attrs(cpu, attrs); - CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; - AddressSpaceDispatch *d = address_space_to_dispatch(cpuas->as); - int section_index = index & ~TARGET_PAGE_MASK; - MemoryRegionSection *ret; - - assert(section_index < d->map.sections_nb); - ret = d->map.sections + section_index; - assert(ret->mr); - assert(ret->mr->ops); - - return ret; -} - -/* Called from RCU critical section */ -hwaddr memory_region_section_get_iotlb(CPUState *cpu, - MemoryRegionSection *section) -{ - AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); - return section - d->map.sections; -} - #endif /* CONFIG_TCG */ void cpu_address_space_init(CPUState *cpu, int asidx,