Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * ARM implementation of KVM hooks, 32 bit specific code. |
| 3 | * |
| 4 | * Copyright Christoffer Dall 2009-2010 |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 7 | * See the COPYING file in the top-level directory. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <stdio.h> |
| 12 | #include <sys/types.h> |
| 13 | #include <sys/ioctl.h> |
| 14 | #include <sys/mman.h> |
| 15 | |
| 16 | #include <linux/kvm.h> |
| 17 | |
| 18 | #include "qemu-common.h" |
| 19 | #include "qemu/timer.h" |
| 20 | #include "sysemu/sysemu.h" |
| 21 | #include "sysemu/kvm.h" |
| 22 | #include "kvm_arm.h" |
| 23 | #include "cpu.h" |
Peter Maydell | ccd3808 | 2014-04-15 19:18:37 +0100 | [diff] [blame] | 24 | #include "internals.h" |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 25 | #include "hw/arm/arm.h" |
| 26 | |
| 27 | static inline void set_feature(uint64_t *features, int feature) |
| 28 | { |
| 29 | *features |= 1ULL << feature; |
| 30 | } |
| 31 | |
| 32 | bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc) |
| 33 | { |
| 34 | /* Identify the feature bits corresponding to the host CPU, and |
| 35 | * fill out the ARMHostCPUClass fields accordingly. To do this |
| 36 | * we have to create a scratch VM, create a single CPU inside it, |
| 37 | * and then query that CPU for the relevant ID registers. |
| 38 | */ |
| 39 | int i, ret, fdarray[3]; |
| 40 | uint32_t midr, id_pfr0, id_isar0, mvfr1; |
| 41 | uint64_t features = 0; |
| 42 | /* Old kernels may not know about the PREFERRED_TARGET ioctl: however |
| 43 | * we know these will only support creating one kind of guest CPU, |
| 44 | * which is its preferred CPU type. |
| 45 | */ |
| 46 | static const uint32_t cpus_to_try[] = { |
| 47 | QEMU_KVM_ARM_TARGET_CORTEX_A15, |
| 48 | QEMU_KVM_ARM_TARGET_NONE |
| 49 | }; |
| 50 | struct kvm_vcpu_init init; |
| 51 | struct kvm_one_reg idregs[] = { |
| 52 | { |
| 53 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
Peter Maydell | 51a79b0 | 2014-12-11 12:07:49 +0000 | [diff] [blame] | 54 | | ENCODE_CP_REG(15, 0, 0, 0, 0, 0, 0), |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 55 | .addr = (uintptr_t)&midr, |
| 56 | }, |
| 57 | { |
| 58 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
Peter Maydell | 51a79b0 | 2014-12-11 12:07:49 +0000 | [diff] [blame] | 59 | | ENCODE_CP_REG(15, 0, 0, 0, 1, 0, 0), |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 60 | .addr = (uintptr_t)&id_pfr0, |
| 61 | }, |
| 62 | { |
| 63 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
Peter Maydell | 51a79b0 | 2014-12-11 12:07:49 +0000 | [diff] [blame] | 64 | | ENCODE_CP_REG(15, 0, 0, 0, 2, 0, 0), |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 65 | .addr = (uintptr_t)&id_isar0, |
| 66 | }, |
| 67 | { |
| 68 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
| 69 | | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1, |
| 70 | .addr = (uintptr_t)&mvfr1, |
| 71 | }, |
| 72 | }; |
| 73 | |
| 74 | if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { |
| 75 | return false; |
| 76 | } |
| 77 | |
| 78 | ahcc->target = init.target; |
| 79 | |
| 80 | /* This is not strictly blessed by the device tree binding docs yet, |
| 81 | * but in practice the kernel does not care about this string so |
| 82 | * there is no point maintaining an KVM_ARM_TARGET_* -> string table. |
| 83 | */ |
| 84 | ahcc->dtb_compatible = "arm,arm-v7"; |
| 85 | |
| 86 | for (i = 0; i < ARRAY_SIZE(idregs); i++) { |
| 87 | ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]); |
| 88 | if (ret) { |
| 89 | break; |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | kvm_arm_destroy_scratch_host_vcpu(fdarray); |
| 94 | |
| 95 | if (ret) { |
| 96 | return false; |
| 97 | } |
| 98 | |
| 99 | /* Now we've retrieved all the register information we can |
| 100 | * set the feature bits based on the ID register fields. |
| 101 | * We can assume any KVM supporting CPU is at least a v7 |
| 102 | * with VFPv3, LPAE and the generic timers; this in turn implies |
| 103 | * most of the other feature bits, but a few must be tested. |
| 104 | */ |
| 105 | set_feature(&features, ARM_FEATURE_V7); |
| 106 | set_feature(&features, ARM_FEATURE_VFP3); |
| 107 | set_feature(&features, ARM_FEATURE_LPAE); |
| 108 | set_feature(&features, ARM_FEATURE_GENERIC_TIMER); |
| 109 | |
| 110 | switch (extract32(id_isar0, 24, 4)) { |
| 111 | case 1: |
| 112 | set_feature(&features, ARM_FEATURE_THUMB_DIV); |
| 113 | break; |
| 114 | case 2: |
| 115 | set_feature(&features, ARM_FEATURE_ARM_DIV); |
| 116 | set_feature(&features, ARM_FEATURE_THUMB_DIV); |
| 117 | break; |
| 118 | default: |
| 119 | break; |
| 120 | } |
| 121 | |
| 122 | if (extract32(id_pfr0, 12, 4) == 1) { |
| 123 | set_feature(&features, ARM_FEATURE_THUMB2EE); |
| 124 | } |
| 125 | if (extract32(mvfr1, 20, 4) == 1) { |
| 126 | set_feature(&features, ARM_FEATURE_VFP_FP16); |
| 127 | } |
| 128 | if (extract32(mvfr1, 12, 4) == 1) { |
| 129 | set_feature(&features, ARM_FEATURE_NEON); |
| 130 | } |
| 131 | if (extract32(mvfr1, 28, 4) == 1) { |
| 132 | /* FMAC support implies VFPv4 */ |
| 133 | set_feature(&features, ARM_FEATURE_VFP4); |
| 134 | } |
| 135 | |
| 136 | ahcc->features = features; |
| 137 | |
| 138 | return true; |
| 139 | } |
| 140 | |
Alex Bennée | 38df27c | 2014-12-11 12:07:53 +0000 | [diff] [blame] | 141 | bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 142 | { |
| 143 | /* Return true if the regidx is a register we should synchronize |
| 144 | * via the cpreg_tuples array (ie is not a core reg we sync by |
| 145 | * hand in kvm_arch_get/put_registers()) |
| 146 | */ |
| 147 | switch (regidx & KVM_REG_ARM_COPROC_MASK) { |
| 148 | case KVM_REG_ARM_CORE: |
| 149 | case KVM_REG_ARM_VFP: |
| 150 | return false; |
| 151 | default: |
| 152 | return true; |
| 153 | } |
| 154 | } |
| 155 | |
Christoffer Dall | 4b7a6bf | 2015-07-21 11:18:45 +0100 | [diff] [blame] | 156 | typedef struct CPRegStateLevel { |
| 157 | uint64_t regidx; |
| 158 | int level; |
| 159 | } CPRegStateLevel; |
| 160 | |
| 161 | /* All coprocessor registers not listed in the following table are assumed to |
| 162 | * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less |
| 163 | * often, you must add it to this table with a state of either |
| 164 | * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. |
| 165 | */ |
| 166 | static const CPRegStateLevel non_runtime_cpregs[] = { |
| 167 | { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, |
| 168 | }; |
| 169 | |
| 170 | int kvm_arm_cpreg_level(uint64_t regidx) |
| 171 | { |
| 172 | int i; |
| 173 | |
| 174 | for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { |
| 175 | const CPRegStateLevel *l = &non_runtime_cpregs[i]; |
| 176 | if (l->regidx == regidx) { |
| 177 | return l->level; |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | return KVM_PUT_RUNTIME_STATE; |
| 182 | } |
| 183 | |
Pavel Fedin | eb5e1d3 | 2015-06-15 18:06:09 +0100 | [diff] [blame] | 184 | #define ARM_CPU_ID_MPIDR 0, 0, 0, 5 |
| 185 | |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 186 | int kvm_arch_init_vcpu(CPUState *cs) |
| 187 | { |
Alex Bennée | 38df27c | 2014-12-11 12:07:53 +0000 | [diff] [blame] | 188 | int ret; |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 189 | uint64_t v; |
Pavel Fedin | eb5e1d3 | 2015-06-15 18:06:09 +0100 | [diff] [blame] | 190 | uint32_t mpidr; |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 191 | struct kvm_one_reg r; |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 192 | ARMCPU *cpu = ARM_CPU(cs); |
| 193 | |
| 194 | if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) { |
| 195 | fprintf(stderr, "KVM is not supported for this guest CPU type\n"); |
| 196 | return -EINVAL; |
| 197 | } |
| 198 | |
Pranavkumar Sawargaonkar | 228d5e0 | 2014-06-19 18:06:26 +0100 | [diff] [blame] | 199 | /* Determine init features for this CPU */ |
| 200 | memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 201 | if (cpu->start_powered_off) { |
Pranavkumar Sawargaonkar | 228d5e0 | 2014-06-19 18:06:26 +0100 | [diff] [blame] | 202 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 203 | } |
Pranavkumar Sawargaonkar | 7cd62e5 | 2014-06-19 18:06:26 +0100 | [diff] [blame] | 204 | if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { |
Pranavkumar Sawargaonkar | dd032e3 | 2014-06-19 18:06:26 +0100 | [diff] [blame] | 205 | cpu->psci_version = 2; |
Pranavkumar Sawargaonkar | 7cd62e5 | 2014-06-19 18:06:26 +0100 | [diff] [blame] | 206 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; |
| 207 | } |
Pranavkumar Sawargaonkar | 228d5e0 | 2014-06-19 18:06:26 +0100 | [diff] [blame] | 208 | |
| 209 | /* Do KVM_ARM_VCPU_INIT ioctl */ |
| 210 | ret = kvm_arm_vcpu_init(cs); |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 211 | if (ret) { |
| 212 | return ret; |
| 213 | } |
Pranavkumar Sawargaonkar | 228d5e0 | 2014-06-19 18:06:26 +0100 | [diff] [blame] | 214 | |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 215 | /* Query the kernel to make sure it supports 32 VFP |
| 216 | * registers: QEMU's "cortex-a15" CPU is always a |
| 217 | * VFP-D32 core. The simplest way to do this is just |
| 218 | * to attempt to read register d31. |
| 219 | */ |
| 220 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31; |
| 221 | r.addr = (uintptr_t)(&v); |
| 222 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
| 223 | if (ret == -ENOENT) { |
| 224 | return -EINVAL; |
| 225 | } |
| 226 | |
Pavel Fedin | eb5e1d3 | 2015-06-15 18:06:09 +0100 | [diff] [blame] | 227 | /* |
| 228 | * When KVM is in use, PSCI is emulated in-kernel and not by qemu. |
| 229 | * Currently KVM has its own idea about MPIDR assignment, so we |
| 230 | * override our defaults with what we get from KVM. |
| 231 | */ |
| 232 | ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr); |
| 233 | if (ret) { |
| 234 | return ret; |
| 235 | } |
Pavel Fedin | 0f4a9e4 | 2015-09-07 10:39:31 +0100 | [diff] [blame] | 236 | cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK; |
Pavel Fedin | eb5e1d3 | 2015-06-15 18:06:09 +0100 | [diff] [blame] | 237 | |
Alex Bennée | 38df27c | 2014-12-11 12:07:53 +0000 | [diff] [blame] | 238 | return kvm_arm_init_cpreg_list(cpu); |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | typedef struct Reg { |
| 242 | uint64_t id; |
| 243 | int offset; |
| 244 | } Reg; |
| 245 | |
| 246 | #define COREREG(KERNELNAME, QEMUFIELD) \ |
| 247 | { \ |
| 248 | KVM_REG_ARM | KVM_REG_SIZE_U32 | \ |
| 249 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \ |
| 250 | offsetof(CPUARMState, QEMUFIELD) \ |
| 251 | } |
| 252 | |
| 253 | #define VFPSYSREG(R) \ |
| 254 | { \ |
| 255 | KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \ |
| 256 | KVM_REG_ARM_VFP_##R, \ |
| 257 | offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \ |
| 258 | } |
| 259 | |
Peter Maydell | a65f1de | 2014-04-15 19:18:43 +0100 | [diff] [blame] | 260 | /* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */ |
| 261 | #define COREREG64(KERNELNAME, QEMUFIELD) \ |
| 262 | { \ |
| 263 | KVM_REG_ARM | KVM_REG_SIZE_U32 | \ |
| 264 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \ |
| 265 | offsetoflow32(CPUARMState, QEMUFIELD) \ |
| 266 | } |
| 267 | |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 268 | static const Reg regs[] = { |
| 269 | /* R0_usr .. R14_usr */ |
| 270 | COREREG(usr_regs.uregs[0], regs[0]), |
| 271 | COREREG(usr_regs.uregs[1], regs[1]), |
| 272 | COREREG(usr_regs.uregs[2], regs[2]), |
| 273 | COREREG(usr_regs.uregs[3], regs[3]), |
| 274 | COREREG(usr_regs.uregs[4], regs[4]), |
| 275 | COREREG(usr_regs.uregs[5], regs[5]), |
| 276 | COREREG(usr_regs.uregs[6], regs[6]), |
| 277 | COREREG(usr_regs.uregs[7], regs[7]), |
| 278 | COREREG(usr_regs.uregs[8], usr_regs[0]), |
| 279 | COREREG(usr_regs.uregs[9], usr_regs[1]), |
| 280 | COREREG(usr_regs.uregs[10], usr_regs[2]), |
| 281 | COREREG(usr_regs.uregs[11], usr_regs[3]), |
| 282 | COREREG(usr_regs.uregs[12], usr_regs[4]), |
Soren Brinkmann | 99a99c1 | 2015-11-03 13:49:41 +0000 | [diff] [blame] | 283 | COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]), |
| 284 | COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]), |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 285 | /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */ |
Soren Brinkmann | 99a99c1 | 2015-11-03 13:49:41 +0000 | [diff] [blame] | 286 | COREREG(svc_regs[0], banked_r13[BANK_SVC]), |
| 287 | COREREG(svc_regs[1], banked_r14[BANK_SVC]), |
| 288 | COREREG64(svc_regs[2], banked_spsr[BANK_SVC]), |
| 289 | COREREG(abt_regs[0], banked_r13[BANK_ABT]), |
| 290 | COREREG(abt_regs[1], banked_r14[BANK_ABT]), |
| 291 | COREREG64(abt_regs[2], banked_spsr[BANK_ABT]), |
| 292 | COREREG(und_regs[0], banked_r13[BANK_UND]), |
| 293 | COREREG(und_regs[1], banked_r14[BANK_UND]), |
| 294 | COREREG64(und_regs[2], banked_spsr[BANK_UND]), |
| 295 | COREREG(irq_regs[0], banked_r13[BANK_IRQ]), |
| 296 | COREREG(irq_regs[1], banked_r14[BANK_IRQ]), |
| 297 | COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]), |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 298 | /* R8_fiq .. R14_fiq and SPSR_fiq */ |
| 299 | COREREG(fiq_regs[0], fiq_regs[0]), |
| 300 | COREREG(fiq_regs[1], fiq_regs[1]), |
| 301 | COREREG(fiq_regs[2], fiq_regs[2]), |
| 302 | COREREG(fiq_regs[3], fiq_regs[3]), |
| 303 | COREREG(fiq_regs[4], fiq_regs[4]), |
Soren Brinkmann | 99a99c1 | 2015-11-03 13:49:41 +0000 | [diff] [blame] | 304 | COREREG(fiq_regs[5], banked_r13[BANK_FIQ]), |
| 305 | COREREG(fiq_regs[6], banked_r14[BANK_FIQ]), |
| 306 | COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]), |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 307 | /* R15 */ |
| 308 | COREREG(usr_regs.uregs[15], regs[15]), |
| 309 | /* VFP system registers */ |
| 310 | VFPSYSREG(FPSID), |
| 311 | VFPSYSREG(MVFR1), |
| 312 | VFPSYSREG(MVFR0), |
| 313 | VFPSYSREG(FPEXC), |
| 314 | VFPSYSREG(FPINST), |
| 315 | VFPSYSREG(FPINST2), |
| 316 | }; |
| 317 | |
| 318 | int kvm_arch_put_registers(CPUState *cs, int level) |
| 319 | { |
| 320 | ARMCPU *cpu = ARM_CPU(cs); |
| 321 | CPUARMState *env = &cpu->env; |
| 322 | struct kvm_one_reg r; |
| 323 | int mode, bn; |
| 324 | int ret, i; |
| 325 | uint32_t cpsr, fpscr; |
| 326 | |
| 327 | /* Make sure the banked regs are properly set */ |
| 328 | mode = env->uncached_cpsr & CPSR_M; |
| 329 | bn = bank_number(mode); |
| 330 | if (mode == ARM_CPU_MODE_FIQ) { |
| 331 | memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); |
| 332 | } else { |
| 333 | memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); |
| 334 | } |
| 335 | env->banked_r13[bn] = env->regs[13]; |
| 336 | env->banked_r14[bn] = env->regs[14]; |
| 337 | env->banked_spsr[bn] = env->spsr; |
| 338 | |
| 339 | /* Now we can safely copy stuff down to the kernel */ |
| 340 | for (i = 0; i < ARRAY_SIZE(regs); i++) { |
| 341 | r.id = regs[i].id; |
| 342 | r.addr = (uintptr_t)(env) + regs[i].offset; |
| 343 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
| 344 | if (ret) { |
| 345 | return ret; |
| 346 | } |
| 347 | } |
| 348 | |
| 349 | /* Special cases which aren't a single CPUARMState field */ |
| 350 | cpsr = cpsr_read(env); |
| 351 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
| 352 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); |
| 353 | r.addr = (uintptr_t)(&cpsr); |
| 354 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
| 355 | if (ret) { |
| 356 | return ret; |
| 357 | } |
| 358 | |
| 359 | /* VFP registers */ |
| 360 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; |
| 361 | for (i = 0; i < 32; i++) { |
| 362 | r.addr = (uintptr_t)(&env->vfp.regs[i]); |
| 363 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
| 364 | if (ret) { |
| 365 | return ret; |
| 366 | } |
| 367 | r.id++; |
| 368 | } |
| 369 | |
| 370 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | |
| 371 | KVM_REG_ARM_VFP_FPSCR; |
| 372 | fpscr = vfp_get_fpscr(env); |
| 373 | r.addr = (uintptr_t)&fpscr; |
| 374 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
| 375 | if (ret) { |
| 376 | return ret; |
| 377 | } |
| 378 | |
| 379 | /* Note that we do not call write_cpustate_to_list() |
| 380 | * here, so we are only writing the tuple list back to |
| 381 | * KVM. This is safe because nothing can change the |
| 382 | * CPUARMState cp15 fields (in particular gdb accesses cannot) |
| 383 | * and so there are no changes to sync. In fact syncing would |
| 384 | * be wrong at this point: for a constant register where TCG and |
| 385 | * KVM disagree about its value, the preceding write_list_to_cpustate() |
| 386 | * would not have had any effect on the CPUARMState value (since the |
| 387 | * register is read-only), and a write_cpustate_to_list() here would |
| 388 | * then try to write the TCG value back into KVM -- this would either |
| 389 | * fail or incorrectly change the value the guest sees. |
| 390 | * |
| 391 | * If we ever want to allow the user to modify cp15 registers via |
| 392 | * the gdb stub, we would need to be more clever here (for instance |
| 393 | * tracking the set of registers kvm_arch_get_registers() successfully |
| 394 | * managed to update the CPUARMState with, and only allowing those |
| 395 | * to be written back up into the kernel). |
| 396 | */ |
Christoffer Dall | 4b7a6bf | 2015-07-21 11:18:45 +0100 | [diff] [blame] | 397 | if (!write_list_to_kvmstate(cpu, level)) { |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 398 | return EINVAL; |
| 399 | } |
| 400 | |
Alex Bennée | 1a1753f | 2015-04-01 17:57:30 +0100 | [diff] [blame] | 401 | kvm_arm_sync_mpstate_to_kvm(cpu); |
| 402 | |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 403 | return ret; |
| 404 | } |
| 405 | |
| 406 | int kvm_arch_get_registers(CPUState *cs) |
| 407 | { |
| 408 | ARMCPU *cpu = ARM_CPU(cs); |
| 409 | CPUARMState *env = &cpu->env; |
| 410 | struct kvm_one_reg r; |
| 411 | int mode, bn; |
| 412 | int ret, i; |
| 413 | uint32_t cpsr, fpscr; |
| 414 | |
| 415 | for (i = 0; i < ARRAY_SIZE(regs); i++) { |
| 416 | r.id = regs[i].id; |
| 417 | r.addr = (uintptr_t)(env) + regs[i].offset; |
| 418 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
| 419 | if (ret) { |
| 420 | return ret; |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | /* Special cases which aren't a single CPUARMState field */ |
| 425 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
| 426 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); |
| 427 | r.addr = (uintptr_t)(&cpsr); |
| 428 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
| 429 | if (ret) { |
| 430 | return ret; |
| 431 | } |
| 432 | cpsr_write(env, cpsr, 0xffffffff); |
| 433 | |
| 434 | /* Make sure the current mode regs are properly set */ |
| 435 | mode = env->uncached_cpsr & CPSR_M; |
| 436 | bn = bank_number(mode); |
| 437 | if (mode == ARM_CPU_MODE_FIQ) { |
| 438 | memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); |
| 439 | } else { |
| 440 | memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); |
| 441 | } |
| 442 | env->regs[13] = env->banked_r13[bn]; |
| 443 | env->regs[14] = env->banked_r14[bn]; |
| 444 | env->spsr = env->banked_spsr[bn]; |
| 445 | |
| 446 | /* VFP registers */ |
| 447 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; |
| 448 | for (i = 0; i < 32; i++) { |
| 449 | r.addr = (uintptr_t)(&env->vfp.regs[i]); |
| 450 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
| 451 | if (ret) { |
| 452 | return ret; |
| 453 | } |
| 454 | r.id++; |
| 455 | } |
| 456 | |
| 457 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | |
| 458 | KVM_REG_ARM_VFP_FPSCR; |
| 459 | r.addr = (uintptr_t)&fpscr; |
| 460 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
| 461 | if (ret) { |
| 462 | return ret; |
| 463 | } |
| 464 | vfp_set_fpscr(env, fpscr); |
| 465 | |
| 466 | if (!write_kvmstate_to_list(cpu)) { |
| 467 | return EINVAL; |
| 468 | } |
| 469 | /* Note that it's OK to have registers which aren't in CPUState, |
| 470 | * so we can ignore a failure return here. |
| 471 | */ |
| 472 | write_list_to_cpustate(cpu); |
| 473 | |
Alex Bennée | 1a1753f | 2015-04-01 17:57:30 +0100 | [diff] [blame] | 474 | kvm_arm_sync_mpstate_to_qemu(cpu); |
| 475 | |
Peter Maydell | b197ebd | 2013-12-17 19:42:29 +0000 | [diff] [blame] | 476 | return 0; |
| 477 | } |
Alex Bennée | 2ecb202 | 2015-12-17 13:37:15 +0000 | [diff] [blame] | 478 | |
| 479 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
| 480 | { |
| 481 | qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__); |
| 482 | return -EINVAL; |
| 483 | } |
| 484 | |
| 485 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
| 486 | { |
| 487 | qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__); |
| 488 | return -EINVAL; |
| 489 | } |
| 490 | |
| 491 | bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) |
| 492 | { |
| 493 | qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__); |
| 494 | return false; |
| 495 | } |
Alex Bennée | e4482ab | 2015-12-17 13:37:15 +0000 | [diff] [blame] | 496 | |
| 497 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, |
| 498 | target_ulong len, int type) |
| 499 | { |
| 500 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); |
| 501 | return -EINVAL; |
| 502 | } |
| 503 | |
| 504 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, |
| 505 | target_ulong len, int type) |
| 506 | { |
| 507 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); |
| 508 | return -EINVAL; |
| 509 | } |
| 510 | |
| 511 | void kvm_arch_remove_all_hw_breakpoints(void) |
| 512 | { |
| 513 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); |
| 514 | } |
| 515 | |
| 516 | void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) |
| 517 | { |
| 518 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); |
| 519 | } |
| 520 | |
| 521 | bool kvm_arm_hw_debug_active(CPUState *cs) |
| 522 | { |
| 523 | return false; |
| 524 | } |