accel/tcg: Add clear_flags argument to page_set_flags

Expand the interface of page_set_flags to separate the
set of flags to be set and the set of flags to be cleared.

This allows us to replace PAGE_RESET with the PAGE_VALID
bit within clear_flags.

Replace PAGE_TARGET_STICKY with TARGET_PAGE_NOTSTICKY;
aarch64-linux-user is the only user.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson
2025-09-23 18:16:58 -07:00
parent f3f2ad1193
commit f55fc1c092
10 changed files with 71 additions and 117 deletions

View File

@@ -269,48 +269,6 @@ static void pageflags_create(vaddr start, vaddr last, int flags)
interval_tree_insert(&p->itree, &pageflags_root);
}
/* A subroutine of page_set_flags: remove everything in [start,last]. */
static bool pageflags_unset(vaddr start, vaddr last)
{
bool inval_tb = false;
while (true) {
PageFlagsNode *p = pageflags_find(start, last);
vaddr p_last;
if (!p) {
break;
}
if (p->flags & PAGE_EXEC) {
inval_tb = true;
}
interval_tree_remove(&p->itree, &pageflags_root);
p_last = p->itree.last;
if (p->itree.start < start) {
/* Truncate the node from the end, or split out the middle. */
p->itree.last = start - 1;
interval_tree_insert(&p->itree, &pageflags_root);
if (last < p_last) {
pageflags_create(last + 1, p_last, p->flags);
break;
}
} else if (p_last <= last) {
/* Range completely covers node -- remove it. */
g_free_rcu(p, rcu);
} else {
/* Truncate the node from the start. */
p->itree.start = last + 1;
interval_tree_insert(&p->itree, &pageflags_root);
break;
}
}
return inval_tb;
}
/*
* A subroutine of page_set_flags: nothing overlaps [start,last],
* but check adjacent mappings and maybe merge into a single range.
@@ -356,15 +314,6 @@ static void pageflags_create_merge(vaddr start, vaddr last, int flags)
}
}
/*
* Allow the target to decide if PAGE_TARGET_[12] may be reset.
* By default, they are not kept.
*/
#ifndef PAGE_TARGET_STICKY
#define PAGE_TARGET_STICKY 0
#endif
#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
/* A subroutine of page_set_flags: add flags to [start,last]. */
static bool pageflags_set_clear(vaddr start, vaddr last,
int set_flags, int clear_flags)
@@ -377,7 +326,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
restart:
p = pageflags_find(start, last);
if (!p) {
if (set_flags) {
if (set_flags & PAGE_VALID) {
pageflags_create_merge(start, last, set_flags);
}
goto done;
@@ -391,11 +340,12 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
/*
* Need to flush if an overlapping executable region
* removes exec, or adds write.
* removes exec, adds write, or is a new mapping.
*/
if ((p_flags & PAGE_EXEC)
&& (!(merge_flags & PAGE_EXEC)
|| (merge_flags & ~p_flags & PAGE_WRITE))) {
|| (merge_flags & ~p_flags & PAGE_WRITE)
|| (clear_flags & PAGE_VALID))) {
inval_tb = true;
}
@@ -404,7 +354,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
* attempting to merge with adjacent regions.
*/
if (start == p_start && last == p_last) {
if (merge_flags) {
if (merge_flags & PAGE_VALID) {
p->flags = merge_flags;
} else {
interval_tree_remove(&p->itree, &pageflags_root);
@@ -424,12 +374,12 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
interval_tree_insert(&p->itree, &pageflags_root);
if (last < p_last) {
if (merge_flags) {
if (merge_flags & PAGE_VALID) {
pageflags_create(start, last, merge_flags);
}
pageflags_create(last + 1, p_last, p_flags);
} else {
if (merge_flags) {
if (merge_flags & PAGE_VALID) {
pageflags_create(start, p_last, merge_flags);
}
if (p_last < last) {
@@ -438,18 +388,18 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
}
}
} else {
if (start < p_start && set_flags) {
if (start < p_start && (set_flags & PAGE_VALID)) {
pageflags_create(start, p_start - 1, set_flags);
}
if (last < p_last) {
interval_tree_remove(&p->itree, &pageflags_root);
p->itree.start = last + 1;
interval_tree_insert(&p->itree, &pageflags_root);
if (merge_flags) {
if (merge_flags & PAGE_VALID) {
pageflags_create(start, last, merge_flags);
}
} else {
if (merge_flags) {
if (merge_flags & PAGE_VALID) {
p->flags = merge_flags;
} else {
interval_tree_remove(&p->itree, &pageflags_root);
@@ -497,7 +447,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
g_free_rcu(p, rcu);
goto restart;
}
if (set_flags) {
if (set_flags & PAGE_VALID) {
pageflags_create(start, last, set_flags);
}
@@ -505,42 +455,36 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
return inval_tb;
}
void page_set_flags(vaddr start, vaddr last, int flags)
void page_set_flags(vaddr start, vaddr last, int set_flags, int clear_flags)
{
bool reset = false;
bool inval_tb = false;
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
/*
* This function should never be called with addresses outside the
* guest address space. If this assert fires, it probably indicates
* a missing call to h2g_valid.
*/
assert(start <= last);
assert(last <= guest_addr_max);
/* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
start &= TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK;
if (!(flags & PAGE_VALID)) {
flags = 0;
} else {
reset = flags & PAGE_RESET;
flags &= ~PAGE_RESET;
if (flags & PAGE_WRITE) {
flags |= PAGE_WRITE_ORG;
}
if (set_flags & PAGE_WRITE) {
set_flags |= PAGE_WRITE_ORG;
}
if (clear_flags & PAGE_WRITE) {
clear_flags |= PAGE_WRITE_ORG;
}
if (!flags || reset) {
if (clear_flags & PAGE_VALID) {
page_reset_target_data(start, last);
inval_tb |= pageflags_unset(start, last);
clear_flags = -1;
} else {
/* Only set PAGE_ANON with new mappings. */
assert(!(set_flags & PAGE_ANON));
}
if (flags) {
inval_tb |= pageflags_set_clear(start, last, flags,
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
if (pageflags_set_clear(start, last, set_flags, clear_flags)) {
tb_invalidate_phys_range(NULL, start, last);
}
}

View File

@@ -390,8 +390,9 @@ static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
raddr = h2g(host_raddr);
page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
PAGE_VALID | PAGE_RESET | PAGE_READ |
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
PAGE_VALID | PAGE_READ |
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE),
PAGE_VALID);
for (int i = 0; i < N_BSD_SHM_REGIONS; i++) {
if (bsd_shm_regions[i].start == 0) {
@@ -428,7 +429,7 @@ static inline abi_long do_bsd_shmdt(abi_ulong shmaddr)
abi_ulong size = bsd_shm_regions[i].size;
bsd_shm_regions[i].start = 0;
page_set_flags(shmaddr, shmaddr + size - 1, 0);
page_set_flags(shmaddr, shmaddr + size - 1, 0, PAGE_VALID);
mmap_reserve(shmaddr, size);
}
}

View File

@@ -122,7 +122,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
if (ret != 0)
goto error;
}
page_set_flags(start, start + len - 1, prot | PAGE_VALID);
page_set_flags(start, start + len - 1, prot, PAGE_RWX);
mmap_unlock();
return 0;
error:
@@ -652,7 +652,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
}
}
the_end1:
page_set_flags(start, start + len - 1, prot | PAGE_VALID);
page_set_flags(start, start + len - 1, prot | PAGE_VALID, PAGE_VALID);
the_end:
#ifdef DEBUG_MMAP
printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
@@ -763,7 +763,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
}
if (ret == 0) {
page_set_flags(start, start + len - 1, 0);
page_set_flags(start, start + len - 1, 0, PAGE_VALID);
}
mmap_unlock();
return ret;

View File

@@ -23,8 +23,11 @@
* Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
*/
#define PAGE_WRITE_INV 0x0020
/* For use with page_set_flags: page is being replaced; target_data cleared. */
#define PAGE_RESET 0x0040
/*
* For linux-user, indicates that the page is mapped with the same semantics
* in both guest and host.
*/
#define PAGE_PASSTHROUGH 0x40
/* For linux-user, indicates that the page is MAP_ANON. */
#define PAGE_ANON 0x0080
@@ -32,10 +35,4 @@
#define PAGE_TARGET_1 0x0200
#define PAGE_TARGET_2 0x0400
/*
* For linux-user, indicates that the page is mapped with the same semantics
* in both guest and host.
*/
#define PAGE_PASSTHROUGH 0x0800
#endif

View File

@@ -23,14 +23,19 @@ int page_get_flags(vaddr address);
* page_set_flags:
* @start: first byte of range
* @last: last byte of range
* @flags: flags to set
* @set_flags: flags to set
* @clr_flags: flags to clear
* Context: holding mmap lock
*
* Modify the flags of a page and invalidate the code if necessary.
* The flag PAGE_WRITE_ORG is positioned automatically depending
* on PAGE_WRITE. The mmap_lock should already be held.
*
* For each page, flags = (flags & ~clr_flags) | set_flags.
* If clr_flags includes PAGE_VALID, this indicates a new mapping
* and page_reset_target_data will be called as well.
*/
void page_set_flags(vaddr start, vaddr last, int flags);
void page_set_flags(vaddr start, vaddr last, int set_flags, int clr_flags);
void page_reset_target_data(vaddr start, vaddr last);

View File

@@ -243,7 +243,7 @@ bool init_guest_commpage(void)
}
page_set_flags(commpage, commpage | (host_page_size - 1),
PAGE_READ | PAGE_EXEC | PAGE_VALID);
PAGE_READ | PAGE_EXEC | PAGE_VALID, PAGE_VALID);
return true;
}

View File

@@ -42,6 +42,6 @@ bool init_guest_commpage(void)
* Special case the entry points during translation (see do_page_zero).
*/
page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
PAGE_EXEC | PAGE_VALID);
PAGE_EXEC | PAGE_VALID, PAGE_VALID);
return true;
}

View File

@@ -165,6 +165,13 @@ static int target_to_host_prot(int prot)
(prot & PROT_EXEC ? PROT_READ : 0);
}
/* Target bits to be cleared by mprotect if not present in target_prot. */
#ifdef TARGET_AARCH64
#define TARGET_PAGE_NOTSTICKY PAGE_BTI
#else
#define TARGET_PAGE_NOTSTICKY 0
#endif
/* NOTE: all the constants are the HOST ones, but addresses are target. */
int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
{
@@ -262,7 +269,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
}
}
page_set_flags(start, last, page_flags);
page_set_flags(start, last, page_flags, PAGE_RWX | TARGET_PAGE_NOTSTICKY);
ret = 0;
error:
@@ -561,17 +568,17 @@ static abi_long mmap_end(abi_ulong start, abi_ulong last,
if (flags & MAP_ANONYMOUS) {
page_flags |= PAGE_ANON;
}
page_flags |= PAGE_RESET;
if (passthrough_start > passthrough_last) {
page_set_flags(start, last, page_flags);
page_set_flags(start, last, page_flags, PAGE_VALID);
} else {
if (start < passthrough_start) {
page_set_flags(start, passthrough_start - 1, page_flags);
page_set_flags(start, passthrough_start - 1,
page_flags, PAGE_VALID);
}
page_set_flags(passthrough_start, passthrough_last,
page_flags | PAGE_PASSTHROUGH);
page_flags | PAGE_PASSTHROUGH, PAGE_VALID);
if (passthrough_last < last) {
page_set_flags(passthrough_last + 1, last, page_flags);
page_set_flags(passthrough_last + 1, last, page_flags, PAGE_VALID);
}
}
shm_region_rm_complete(start, last);
@@ -1088,7 +1095,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
mmap_lock();
ret = mmap_reserve_or_unmap(start, len);
if (likely(ret == 0)) {
page_set_flags(start, start + len - 1, 0);
page_set_flags(start, start + len - 1, 0, PAGE_VALID);
shm_region_rm_complete(start, start + len - 1);
}
mmap_unlock();
@@ -1179,10 +1186,10 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
} else {
new_addr = h2g(host_addr);
prot = page_get_flags(old_addr);
page_set_flags(old_addr, old_addr + old_size - 1, 0);
page_set_flags(old_addr, old_addr + old_size - 1, 0, PAGE_VALID);
shm_region_rm_complete(old_addr, old_addr + old_size - 1);
page_set_flags(new_addr, new_addr + new_size - 1,
prot | PAGE_VALID | PAGE_RESET);
prot | PAGE_VALID, PAGE_VALID);
shm_region_rm_complete(new_addr, new_addr + new_size - 1);
}
mmap_unlock();
@@ -1428,9 +1435,10 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
last = shmaddr + m_len - 1;
page_set_flags(shmaddr, last,
PAGE_VALID | PAGE_RESET | PAGE_READ |
PAGE_VALID | PAGE_READ |
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
(shmflg & SHM_EXEC ? PAGE_EXEC : 0));
(shmflg & SHM_EXEC ? PAGE_EXEC : 0),
PAGE_VALID);
shm_region_rm_complete(shmaddr, last);
shm_region_add(shmaddr, last);
@@ -1471,7 +1479,7 @@ abi_long target_shmdt(abi_ulong shmaddr)
if (rv == 0) {
abi_ulong size = last - shmaddr + 1;
page_set_flags(shmaddr, last, 0);
page_set_flags(shmaddr, last, 0, PAGE_VALID);
shm_region_rm_complete(shmaddr, last);
mmap_reserve_or_unmap(shmaddr, size);
}

View File

@@ -37,7 +37,7 @@ bool init_guest_commpage(void)
}
page_set_flags(TARGET_VSYSCALL_PAGE,
TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
PAGE_EXEC | PAGE_VALID);
PAGE_EXEC | PAGE_VALID, PAGE_VALID);
return true;
}

View File

@@ -2642,7 +2642,6 @@ extern const uint64_t pred_esz_masks[5];
*/
#define PAGE_BTI PAGE_TARGET_1
#define PAGE_MTE PAGE_TARGET_2
#define PAGE_TARGET_STICKY PAGE_MTE
/* We associate one allocation tag per 16 bytes, the minimum. */
#define LOG2_TAG_GRANULE 4