linux-user: Explicitly untag memory management syscalls
We define target_mmap et al as untagged, so that they can be
used from the binary loaders. Explicitly call cpu_untagged_addr
for munmap, mprotect, mremap syscall entry points.
Add a few comments for the syscalls that are exempted by the
kernel's tagged-address-abi.rst.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210212184902.1251044-14-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index e7fd99f..d5badd7 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -894,6 +894,8 @@
abi_long mapped_addr;
abi_ulong new_alloc_size;
+ /* brk pointers are always untagged */
+
DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
if (!new_brk) {
@@ -4599,6 +4601,8 @@
int i,ret;
abi_ulong shmlba;
+ /* shmat pointers are always untagged */
+
/* find out the length of the shared memory segment */
ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
if (is_error(ret)) {
@@ -4666,6 +4670,8 @@
int i;
abi_long rv;
+ /* shmdt pointers are always untagged */
+
mmap_lock();
for (i = 0; i < N_SHM_REGIONS; ++i) {
@@ -9703,6 +9709,7 @@
v5, v6));
}
#else
+ /* mmap pointers are always untagged */
ret = get_errno(target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
arg5,
@@ -9721,8 +9728,10 @@
return get_errno(ret);
#endif
case TARGET_NR_munmap:
+ arg1 = cpu_untagged_addr(cpu, arg1);
return get_errno(target_munmap(arg1, arg2));
case TARGET_NR_mprotect:
+ arg1 = cpu_untagged_addr(cpu, arg1);
{
TaskState *ts = cpu->opaque;
/* Special hack to detect libc making the stack executable. */
@@ -9737,6 +9746,8 @@
return get_errno(target_mprotect(arg1, arg2, arg3));
#ifdef TARGET_NR_mremap
case TARGET_NR_mremap:
+ arg1 = cpu_untagged_addr(cpu, arg1);
+ /* mremap new_addr (arg5) is always untagged */
return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
#endif
/* ??? msync/mlock/munlock are broken for softmmu. */