| /* |
| * AArch64 specific prctl functions for linux-user |
| * |
| * SPDX-License-Identifier: GPL-2.0-or-later |
| */ |
| #ifndef AARCH64_TARGET_PRCTL_H |
| #define AARCH64_TARGET_PRCTL_H |
| |
| #include "qemu/units.h" |
| #include "target/arm/cpu-features.h" |
| #include "mte_user_helper.h" |
| #include "gcs-internal.h" |
| |
| static abi_long do_prctl_sve_get_vl(CPUArchState *env) |
| { |
| ARMCPU *cpu = env_archcpu(env); |
| if (cpu_isar_feature(aa64_sve, cpu)) { |
| /* PSTATE.SM is always unset on syscall entry. */ |
| return sve_vq(env) * 16; |
| } |
| return -TARGET_EINVAL; |
| } |
| #define do_prctl_sve_get_vl do_prctl_sve_get_vl |
| |
| static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) |
| { |
| /* |
| * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT. |
| * Note the kernel definition of sve_vl_valid allows for VQ=512, |
| * i.e. VL=8192, even though the current architectural maximum is VQ=16. |
| */ |
| if (cpu_isar_feature(aa64_sve, env_archcpu(env)) |
| && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { |
| uint32_t vq, old_vq; |
| |
| /* PSTATE.SM is always unset on syscall entry. */ |
| old_vq = sve_vq(env); |
| |
| /* |
| * Bound the value of arg2, so that we know that it fits into |
| * the 4-bit field in ZCR_EL1. Rely on the hflags rebuild to |
| * sort out the length supported by the cpu. |
| */ |
| vq = MAX(arg2 / 16, 1); |
| vq = MIN(vq, ARM_MAX_VQ); |
| env->vfp.zcr_el[1] = vq - 1; |
| arm_rebuild_hflags(env); |
| |
| vq = sve_vq(env); |
| if (vq < old_vq) { |
| aarch64_sve_narrow_vq(env, vq); |
| } |
| return vq * 16; |
| } |
| return -TARGET_EINVAL; |
| } |
| #define do_prctl_sve_set_vl do_prctl_sve_set_vl |
| |
| static abi_long do_prctl_sme_get_vl(CPUArchState *env) |
| { |
| ARMCPU *cpu = env_archcpu(env); |
| if (cpu_isar_feature(aa64_sme, cpu)) { |
| return sme_vq(env) * 16; |
| } |
| return -TARGET_EINVAL; |
| } |
| #define do_prctl_sme_get_vl do_prctl_sme_get_vl |
| |
| static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2) |
| { |
| /* |
| * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT. |
| * Note the kernel definition of sve_vl_valid allows for VQ=512, |
| * i.e. VL=8192, even though the architectural maximum is VQ=16. |
| */ |
| if (cpu_isar_feature(aa64_sme, env_archcpu(env)) |
| && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { |
| int vq, old_vq; |
| |
| old_vq = sme_vq(env); |
| |
| /* |
| * Bound the value of vq, so that we know that it fits into |
| * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared |
| * on syscall entry, we are not modifying the current SVE |
| * vector length. |
| */ |
| vq = MAX(arg2 / 16, 1); |
| vq = MIN(vq, 16); |
| env->vfp.smcr_el[1] = |
| FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1); |
| |
| /* Delay rebuilding hflags until we know if ZA must change. */ |
| vq = sve_vqm1_for_el_sm(env, 0, true) + 1; |
| |
| if (vq != old_vq) { |
| /* |
| * PSTATE.ZA state is cleared on any change to SVL. |
| * We need not call arm_rebuild_hflags because PSTATE.SM was |
| * cleared on syscall entry, so this hasn't changed VL. |
| */ |
| env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0); |
| arm_rebuild_hflags(env); |
| } |
| return vq * 16; |
| } |
| return -TARGET_EINVAL; |
| } |
| #define do_prctl_sme_set_vl do_prctl_sme_set_vl |
| |
| static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) |
| { |
| ARMCPU *cpu = env_archcpu(env); |
| |
| if (cpu_isar_feature(aa64_pauth, cpu)) { |
| int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY | |
| PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY); |
| int ret = 0; |
| Error *err = NULL; |
| |
| if (arg2 == 0) { |
| arg2 = all; |
| } else if (arg2 & ~all) { |
| return -TARGET_EINVAL; |
| } |
| if (arg2 & PR_PAC_APIAKEY) { |
| ret |= qemu_guest_getrandom(&env->keys.apia, |
| sizeof(ARMPACKey), &err); |
| } |
| if (arg2 & PR_PAC_APIBKEY) { |
| ret |= qemu_guest_getrandom(&env->keys.apib, |
| sizeof(ARMPACKey), &err); |
| } |
| if (arg2 & PR_PAC_APDAKEY) { |
| ret |= qemu_guest_getrandom(&env->keys.apda, |
| sizeof(ARMPACKey), &err); |
| } |
| if (arg2 & PR_PAC_APDBKEY) { |
| ret |= qemu_guest_getrandom(&env->keys.apdb, |
| sizeof(ARMPACKey), &err); |
| } |
| if (arg2 & PR_PAC_APGAKEY) { |
| ret |= qemu_guest_getrandom(&env->keys.apga, |
| sizeof(ARMPACKey), &err); |
| } |
| if (ret != 0) { |
| /* |
| * Some unknown failure in the crypto. The best |
| * we can do is log it and fail the syscall. |
| * The real syscall cannot fail this way. |
| */ |
| qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s", |
| error_get_pretty(err)); |
| error_free(err); |
| return -TARGET_EIO; |
| } |
| return 0; |
| } |
| return -TARGET_EINVAL; |
| } |
| #define do_prctl_reset_keys do_prctl_reset_keys |
| |
| static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2) |
| { |
| abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE; |
| ARMCPU *cpu = env_archcpu(env); |
| |
| if (cpu_isar_feature(aa64_mte, cpu)) { |
| valid_mask |= PR_MTE_TCF_MASK; |
| valid_mask |= PR_MTE_TAG_MASK; |
| } |
| |
| if (arg2 & ~valid_mask) { |
| return -TARGET_EINVAL; |
| } |
| env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE; |
| |
| if (cpu_isar_feature(aa64_mte, cpu)) { |
| arm_set_mte_tcf0(env, arg2); |
| |
| /* |
| * Write PR_MTE_TAG to GCR_EL1[Exclude]. |
| * Note that the syscall uses an include mask, |
| * and hardware uses an exclude mask -- invert. |
| */ |
| env->cp15.gcr_el1 = |
| deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT); |
| arm_rebuild_hflags(env); |
| } |
| return 0; |
| } |
| #define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl |
| |
| static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env) |
| { |
| ARMCPU *cpu = env_archcpu(env); |
| abi_long ret = 0; |
| |
| if (env->tagged_addr_enable) { |
| ret |= PR_TAGGED_ADDR_ENABLE; |
| } |
| if (cpu_isar_feature(aa64_mte, cpu)) { |
| /* See do_prctl_set_tagged_addr_ctrl. */ |
| ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT; |
| ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1); |
| } |
| return ret; |
| } |
| #define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl |
| |
| static abi_long do_prctl_get_shadow_stack_status(CPUArchState *env, |
| abi_long arg2) |
| { |
| ARMCPU *cpu = env_archcpu(env); |
| |
| if (!cpu_isar_feature(aa64_gcs, cpu)) { |
| return -TARGET_EINVAL; |
| } |
| return put_user_ual(gcs_get_el0_mode(env), arg2); |
| } |
| #define do_prctl_get_shadow_stack_status do_prctl_get_shadow_stack_status |
| |
| static abi_long gcs_alloc(abi_ulong hint, abi_ulong size) |
| { |
| /* |
| * Without softmmu, we cannot protect GCS memory properly. |
| * Make do with normal read/write permissions. This at least allows |
| * emulation of correct programs which don't access the gcs stack |
| * with normal instructions. |
| */ |
| return target_mmap(hint, size, PROT_READ | PROT_WRITE, |
| MAP_PRIVATE | MAP_ANONYMOUS | |
| (hint ? MAP_FIXED_NOREPLACE : 0), -1, 0); |
| } |
| |
| static abi_ulong gcs_new_stack(TaskState *ts) |
| { |
| /* Use guest_stack_size as a proxy for RLIMIT_STACK. */ |
| abi_ulong size = MIN(MAX(guest_stack_size / 2, TARGET_PAGE_SIZE), 2 * GiB); |
| abi_ulong base = gcs_alloc(0, size); |
| |
| if (base == -1) { |
| return -1; |
| } |
| |
| ts->gcs_base = base; |
| ts->gcs_size = size; |
| return base + size - 8; |
| } |
| |
| static abi_long do_prctl_set_shadow_stack_status(CPUArchState *env, |
| abi_long new_mode) |
| { |
| ARMCPU *cpu = env_archcpu(env); |
| TaskState *ts = get_task_state(env_cpu(env)); |
| abi_long cur_mode; |
| |
| if (!cpu_isar_feature(aa64_gcs, cpu)) { |
| return -TARGET_EINVAL; |
| } |
| if (new_mode & ~(PR_SHADOW_STACK_ENABLE | |
| PR_SHADOW_STACK_WRITE | |
| PR_SHADOW_STACK_PUSH)) { |
| return -TARGET_EINVAL; |
| } |
| |
| cur_mode = gcs_get_el0_mode(env); |
| if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) { |
| return -TARGET_EBUSY; |
| } |
| |
| if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) { |
| abi_long gcspr; |
| |
| if (ts->gcs_base || env->cp15.gcspr_el[0]) { |
| return -EINVAL; |
| } |
| gcspr = gcs_new_stack(ts); |
| if (gcspr == -1) { |
| return -TARGET_ENOMEM; |
| } |
| env->cp15.gcspr_el[0] = gcspr; |
| } |
| |
| gcs_set_el0_mode(env, new_mode); |
| arm_rebuild_hflags(env); |
| return 0; |
| } |
| #define do_prctl_set_shadow_stack_status do_prctl_set_shadow_stack_status |
| |
| static abi_long do_prctl_lock_shadow_stack_status(CPUArchState *env, |
| abi_long arg2) |
| { |
| ARMCPU *cpu = env_archcpu(env); |
| TaskState *ts = get_task_state(env_cpu(env)); |
| |
| if (!cpu_isar_feature(aa64_gcs, cpu)) { |
| return -EINVAL; |
| } |
| ts->gcs_el0_locked |= arg2; |
| return 0; |
| } |
| #define do_prctl_lock_shadow_stack_status do_prctl_lock_shadow_stack_status |
| |
| #endif /* AARCH64_TARGET_PRCTL_H */ |