blob: 6843fa0b98a8b913962cf9f285a76122ea64983b [file] [log] [blame]
aurel32d76d1652008-12-16 10:43:58 +00001/*
2 * PowerPC implementation of KVM hooks
3 *
4 * Copyright IBM Corp. 2007
Scott Wood90dc8812011-04-29 17:10:23 -05005 * Copyright (C) 2011 Freescale Semiconductor, Inc.
aurel32d76d1652008-12-16 10:43:58 +00006 *
7 * Authors:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
14 *
15 */
16
Alexander Grafeadaada2011-07-21 02:29:15 +020017#include <dirent.h>
aurel32d76d1652008-12-16 10:43:58 +000018#include <sys/types.h>
19#include <sys/ioctl.h>
20#include <sys/mman.h>
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +000021#include <sys/vfs.h>
aurel32d76d1652008-12-16 10:43:58 +000022
23#include <linux/kvm.h>
24
25#include "qemu-common.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010026#include "qemu/timer.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010027#include "sysemu/sysemu.h"
28#include "sysemu/kvm.h"
aurel32d76d1652008-12-16 10:43:58 +000029#include "kvm_ppc.h"
30#include "cpu.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/cpus.h"
32#include "sysemu/device_tree.h"
David Gibsond5aea6f2013-03-12 00:31:18 +000033#include "mmu-hash64.h"
aurel32d76d1652008-12-16 10:43:58 +000034
Alexander Graff61b4be2011-08-09 17:57:37 +020035#include "hw/sysbus.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/ppc/spapr.h"
37#include "hw/ppc/spapr_vio.h"
Alexey Kardashevskiy98a8b522014-05-01 20:37:09 +100038#include "hw/ppc/ppc.h"
Bharat Bhushan31f2cb82013-02-24 18:16:21 +000039#include "sysemu/watchdog.h"
Alexey Kardashevskiyb36f1002014-02-04 15:12:34 +110040#include "trace.h"
Bharat Bhushan88365d12014-07-14 14:45:37 +053041#include "exec/gdbstub.h"
Alexander Graff61b4be2011-08-09 17:57:37 +020042
aurel32d76d1652008-12-16 10:43:58 +000043//#define DEBUG_KVM
44
45#ifdef DEBUG_KVM
Peter Maydellda56ff92013-07-29 13:16:38 +010046#define DPRINTF(fmt, ...) \
aurel32d76d1652008-12-16 10:43:58 +000047 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
48#else
Peter Maydellda56ff92013-07-29 13:16:38 +010049#define DPRINTF(fmt, ...) \
aurel32d76d1652008-12-16 10:43:58 +000050 do { } while (0)
51#endif
52
Alexander Grafeadaada2011-07-21 02:29:15 +020053#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
54
Jan Kiszka94a8d392011-01-21 21:48:17 +010055const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
56 KVM_CAP_LAST_INFO
57};
58
Alexander Graffc87e182010-08-30 13:49:15 +020059static int cap_interrupt_unset = false;
60static int cap_interrupt_level = false;
Scott Wood90dc8812011-04-29 17:10:23 -050061static int cap_segstate;
Scott Wood90dc8812011-04-29 17:10:23 -050062static int cap_booke_sregs;
David Gibsone97c3632011-09-29 21:39:10 +000063static int cap_ppc_smt;
David Gibson354ac202011-09-29 21:39:11 +000064static int cap_ppc_rma;
David Gibson0f5cb292011-09-29 21:39:12 +000065static int cap_spapr_tce;
Alexey Kardashevskiyda953242014-05-27 15:36:30 +100066static int cap_spapr_multitce;
Alexey Kardashevskiy9bb62a02014-06-10 15:39:21 +100067static int cap_spapr_vfio;
David Gibsonf1af19d2012-09-12 16:57:09 +000068static int cap_hior;
David Gibsond67d40e2013-02-20 16:41:50 +000069static int cap_one_reg;
Stuart Yoder3b961122013-03-30 06:40:49 +000070static int cap_epr;
Bharat Bhushan31f2cb82013-02-24 18:16:21 +000071static int cap_ppc_watchdog;
David Gibson9b00ea42013-04-07 19:08:22 +000072static int cap_papr;
Alexey Kardashevskiye68cb8b2013-07-18 14:33:03 -050073static int cap_htab_fd;
Alexander Graf87a91de2014-06-04 12:14:08 +020074static int cap_fixup_hcalls;
Alexander Graffc87e182010-08-30 13:49:15 +020075
Bharat Bhushan3c902d42014-07-14 14:45:35 +053076static uint32_t debug_inst_opcode;
77
Alexander Grafc821c2b2010-04-18 23:10:17 +020078/* XXX We have a race condition where we actually have a level triggered
79 * interrupt, but the infrastructure can't expose that yet, so the guest
80 * takes but ignores it, goes to sleep and never gets notified that there's
81 * still an interrupt pending.
Alexander Grafc6a94ba2010-02-09 17:37:10 +010082 *
Alexander Grafc821c2b2010-04-18 23:10:17 +020083 * As a quick workaround, let's just wake up again 20 ms after we injected
84 * an interrupt. That way we can assure that we're always reinjecting
85 * interrupts in case the guest swallowed them.
Alexander Grafc6a94ba2010-02-09 17:37:10 +010086 */
87static QEMUTimer *idle_timer;
88
Andreas Färberd5a68142012-05-03 04:02:03 +020089static void kvm_kick_cpu(void *opaque)
Alexander Grafc6a94ba2010-02-09 17:37:10 +010090{
Andreas Färberd5a68142012-05-03 04:02:03 +020091 PowerPCCPU *cpu = opaque;
Andreas Färberd5a68142012-05-03 04:02:03 +020092
Andreas Färberc08d7422012-05-03 04:34:15 +020093 qemu_cpu_kick(CPU(cpu));
Alexander Grafc6a94ba2010-02-09 17:37:10 +010094}
95
Andreas Färber5ba45762013-02-23 11:22:12 +000096static int kvm_ppc_register_host_cpu_type(void);
97
Jan Kiszkacad1e282011-01-21 21:48:16 +010098int kvm_arch_init(KVMState *s)
aurel32d76d1652008-12-16 10:43:58 +000099{
Alexander Graffc87e182010-08-30 13:49:15 +0200100 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
Alexander Graffc87e182010-08-30 13:49:15 +0200101 cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
Scott Wood90dc8812011-04-29 17:10:23 -0500102 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
Scott Wood90dc8812011-04-29 17:10:23 -0500103 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
David Gibsone97c3632011-09-29 21:39:10 +0000104 cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
David Gibson354ac202011-09-29 21:39:11 +0000105 cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
David Gibson0f5cb292011-09-29 21:39:12 +0000106 cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
Alexey Kardashevskiyda953242014-05-27 15:36:30 +1000107 cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
Alexey Kardashevskiy9bb62a02014-06-10 15:39:21 +1000108 cap_spapr_vfio = false;
David Gibsond67d40e2013-02-20 16:41:50 +0000109 cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
David Gibsonf1af19d2012-09-12 16:57:09 +0000110 cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
Stuart Yoder3b961122013-03-30 06:40:49 +0000111 cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
Bharat Bhushan31f2cb82013-02-24 18:16:21 +0000112 cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
David Gibson9b00ea42013-04-07 19:08:22 +0000113 /* Note: we don't set cap_papr here, because this capability is
114 * only activated after this by kvmppc_set_papr() */
Alexey Kardashevskiye68cb8b2013-07-18 14:33:03 -0500115 cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
Alexander Graf87a91de2014-06-04 12:14:08 +0200116 cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
Alexander Graffc87e182010-08-30 13:49:15 +0200117
118 if (!cap_interrupt_level) {
119 fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
120 "VM to stall at times!\n");
121 }
122
Andreas Färber5ba45762013-02-23 11:22:12 +0000123 kvm_ppc_register_host_cpu_type();
124
aurel32d76d1652008-12-16 10:43:58 +0000125 return 0;
126}
127
Andreas Färber1bc22652012-10-31 06:06:49 +0100128static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
aurel32d76d1652008-12-16 10:43:58 +0000129{
Andreas Färber1bc22652012-10-31 06:06:49 +0100130 CPUPPCState *cenv = &cpu->env;
131 CPUState *cs = CPU(cpu);
Alexander Graf861bbc802009-07-17 13:51:43 +0200132 struct kvm_sregs sregs;
Scott Wood5666ca42011-04-11 18:34:34 -0500133 int ret;
134
135 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
Alexander Graf64e07be2011-04-16 02:00:36 +0200136 /* What we're really trying to say is "if we're on BookE, we use
137 the native PVR for now". This is the only sane way to check
138 it though, so we potentially confuse users that they can run
139 BookE guests on BookS. Let's hope nobody dares enough :) */
Scott Wood5666ca42011-04-11 18:34:34 -0500140 return 0;
141 } else {
Scott Wood90dc8812011-04-29 17:10:23 -0500142 if (!cap_segstate) {
Alexander Graf64e07be2011-04-16 02:00:36 +0200143 fprintf(stderr, "kvm error: missing PVR setting capability\n");
144 return -ENOSYS;
Scott Wood5666ca42011-04-11 18:34:34 -0500145 }
Scott Wood5666ca42011-04-11 18:34:34 -0500146 }
147
Andreas Färber1bc22652012-10-31 06:06:49 +0100148 ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
Scott Wood5666ca42011-04-11 18:34:34 -0500149 if (ret) {
150 return ret;
151 }
Alexander Graf861bbc802009-07-17 13:51:43 +0200152
153 sregs.pvr = cenv->spr[SPR_PVR];
Andreas Färber1bc22652012-10-31 06:06:49 +0100154 return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
Scott Wood5666ca42011-04-11 18:34:34 -0500155}
156
Scott Wood93dd5e82011-08-31 11:26:56 +0000157/* Set up a shared TLB array with KVM */
Andreas Färber1bc22652012-10-31 06:06:49 +0100158static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
Scott Wood93dd5e82011-08-31 11:26:56 +0000159{
Andreas Färber1bc22652012-10-31 06:06:49 +0100160 CPUPPCState *env = &cpu->env;
161 CPUState *cs = CPU(cpu);
Scott Wood93dd5e82011-08-31 11:26:56 +0000162 struct kvm_book3e_206_tlb_params params = {};
163 struct kvm_config_tlb cfg = {};
Scott Wood93dd5e82011-08-31 11:26:56 +0000164 unsigned int entries = 0;
165 int ret, i;
166
167 if (!kvm_enabled() ||
Andreas Färbera60f24b2012-12-01 05:35:08 +0100168 !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
Scott Wood93dd5e82011-08-31 11:26:56 +0000169 return 0;
170 }
171
172 assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
173
174 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
175 params.tlb_sizes[i] = booke206_tlb_size(env, i);
176 params.tlb_ways[i] = booke206_tlb_ways(env, i);
177 entries += params.tlb_sizes[i];
178 }
179
180 assert(entries == env->nb_tlb);
181 assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
182
183 env->tlb_dirty = true;
184
185 cfg.array = (uintptr_t)env->tlb.tlbm;
186 cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
187 cfg.params = (uintptr_t)&params;
188 cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
189
Cornelia Huck48add812014-04-09 17:21:57 +0200190 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
Scott Wood93dd5e82011-08-31 11:26:56 +0000191 if (ret < 0) {
192 fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
193 __func__, strerror(-ret));
194 return ret;
195 }
196
197 env->kvm_sw_tlb = true;
198 return 0;
199}
200
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000201
202#if defined(TARGET_PPC64)
Andreas Färbera60f24b2012-12-01 05:35:08 +0100203static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000204 struct kvm_ppc_smmu_info *info)
205{
Andreas Färbera60f24b2012-12-01 05:35:08 +0100206 CPUPPCState *env = &cpu->env;
207 CPUState *cs = CPU(cpu);
208
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000209 memset(info, 0, sizeof(*info));
210
211 /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
212 * need to "guess" what the supported page sizes are.
213 *
214 * For that to work we make a few assumptions:
215 *
216 * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
217 * KVM which only supports 4K and 16M pages, but supports them
218 * regardless of the backing store characteritics. We also don't
219 * support 1T segments.
220 *
221 * This is safe as if HV KVM ever supports that capability or PR
222 * KVM grows supports for more page/segment sizes, those versions
223 * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
224 * will not hit this fallback
225 *
226 * - Else we are running HV KVM. This means we only support page
227 * sizes that fit in the backing store. Additionally we only
228 * advertize 64K pages if the processor is ARCH 2.06 and we assume
229 * P7 encodings for the SLB and hash table. Here too, we assume
230 * support for any newer processor will mean a kernel that
231 * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
232 * this fallback.
233 */
Andreas Färbera60f24b2012-12-01 05:35:08 +0100234 if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000235 /* No flags */
236 info->flags = 0;
237 info->slb_size = 64;
238
239 /* Standard 4k base page size segment */
240 info->sps[0].page_shift = 12;
241 info->sps[0].slb_enc = 0;
242 info->sps[0].enc[0].page_shift = 12;
243 info->sps[0].enc[0].pte_enc = 0;
244
245 /* Standard 16M large page size segment */
246 info->sps[1].page_shift = 24;
247 info->sps[1].slb_enc = SLB_VSID_L;
248 info->sps[1].enc[0].page_shift = 24;
249 info->sps[1].enc[0].pte_enc = 0;
250 } else {
251 int i = 0;
252
253 /* HV KVM has backing store size restrictions */
254 info->flags = KVM_PPC_PAGE_SIZES_REAL;
255
256 if (env->mmu_model & POWERPC_MMU_1TSEG) {
257 info->flags |= KVM_PPC_1T_SEGMENTS;
258 }
259
260 if (env->mmu_model == POWERPC_MMU_2_06) {
261 info->slb_size = 32;
262 } else {
263 info->slb_size = 64;
264 }
265
266 /* Standard 4k base page size segment */
267 info->sps[i].page_shift = 12;
268 info->sps[i].slb_enc = 0;
269 info->sps[i].enc[0].page_shift = 12;
270 info->sps[i].enc[0].pte_enc = 0;
271 i++;
272
273 /* 64K on MMU 2.06 */
274 if (env->mmu_model == POWERPC_MMU_2_06) {
275 info->sps[i].page_shift = 16;
276 info->sps[i].slb_enc = 0x110;
277 info->sps[i].enc[0].page_shift = 16;
278 info->sps[i].enc[0].pte_enc = 1;
279 i++;
280 }
281
282 /* Standard 16M large page size segment */
283 info->sps[i].page_shift = 24;
284 info->sps[i].slb_enc = SLB_VSID_L;
285 info->sps[i].enc[0].page_shift = 24;
286 info->sps[i].enc[0].pte_enc = 0;
287 }
288}
289
Andreas Färbera60f24b2012-12-01 05:35:08 +0100290static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000291{
Andreas Färbera60f24b2012-12-01 05:35:08 +0100292 CPUState *cs = CPU(cpu);
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000293 int ret;
294
Andreas Färbera60f24b2012-12-01 05:35:08 +0100295 if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
296 ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000297 if (ret == 0) {
298 return;
299 }
300 }
301
Andreas Färbera60f24b2012-12-01 05:35:08 +0100302 kvm_get_fallback_smmu_info(cpu, info);
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000303}
304
305static long getrampagesize(void)
306{
307 struct statfs fs;
308 int ret;
309
310 if (!mem_path) {
311 /* guest RAM is backed by normal anonymous pages */
312 return getpagesize();
313 }
314
315 do {
316 ret = statfs(mem_path, &fs);
317 } while (ret != 0 && errno == EINTR);
318
319 if (ret != 0) {
320 fprintf(stderr, "Couldn't statfs() memory path: %s\n",
321 strerror(errno));
322 exit(1);
323 }
324
325#define HUGETLBFS_MAGIC 0x958458f6
326
327 if (fs.f_type != HUGETLBFS_MAGIC) {
328 /* Explicit mempath, but it's ordinary pages */
329 return getpagesize();
330 }
331
332 /* It's hugepage, return the huge page size */
333 return fs.f_bsize;
334}
335
336static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
337{
338 if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
339 return true;
340 }
341
342 return (1ul << shift) <= rampgsize;
343}
344
Andreas Färbera60f24b2012-12-01 05:35:08 +0100345static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000346{
347 static struct kvm_ppc_smmu_info smmu_info;
348 static bool has_smmu_info;
Andreas Färbera60f24b2012-12-01 05:35:08 +0100349 CPUPPCState *env = &cpu->env;
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000350 long rampagesize;
351 int iq, ik, jq, jk;
352
353 /* We only handle page sizes for 64-bit server guests for now */
354 if (!(env->mmu_model & POWERPC_MMU_64)) {
355 return;
356 }
357
358 /* Collect MMU info from kernel if not already */
359 if (!has_smmu_info) {
Andreas Färbera60f24b2012-12-01 05:35:08 +0100360 kvm_get_smmu_info(cpu, &smmu_info);
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000361 has_smmu_info = true;
362 }
363
364 rampagesize = getrampagesize();
365
366 /* Convert to QEMU form */
367 memset(&env->sps, 0, sizeof(env->sps));
368
Alexander Graf08215d82014-05-11 18:37:00 +0200369 /*
370 * XXX This loop should be an entry wide AND of the capabilities that
371 * the selected CPU has with the capabilities that KVM supports.
372 */
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000373 for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
374 struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
375 struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
376
377 if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
378 ksps->page_shift)) {
379 continue;
380 }
381 qsps->page_shift = ksps->page_shift;
382 qsps->slb_enc = ksps->slb_enc;
383 for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
384 if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
385 ksps->enc[jk].page_shift)) {
386 continue;
387 }
388 qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
389 qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
390 if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
391 break;
392 }
393 }
394 if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
395 break;
396 }
397 }
398 env->slb_nr = smmu_info.slb_size;
Alexander Graf08215d82014-05-11 18:37:00 +0200399 if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000400 env->mmu_model &= ~POWERPC_MMU_1TSEG;
401 }
402}
403#else /* defined (TARGET_PPC64) */
404
Andreas Färbera60f24b2012-12-01 05:35:08 +0100405static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000406{
407}
408
409#endif /* !defined (TARGET_PPC64) */
410
Eduardo Habkostb164e482013-01-22 18:25:01 -0200411unsigned long kvm_arch_vcpu_id(CPUState *cpu)
412{
Alexey Kardashevskiy0f20ba62014-02-02 01:45:52 +1100413 return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
Eduardo Habkostb164e482013-01-22 18:25:01 -0200414}
415
Bharat Bhushan88365d12014-07-14 14:45:37 +0530416/* e500 supports 2 h/w breakpoint and 2 watchpoint.
417 * book3s supports only 1 watchpoint, so array size
418 * of 4 is sufficient for now.
419 */
420#define MAX_HW_BKPTS 4
421
422static struct HWBreakpoint {
423 target_ulong addr;
424 int type;
425} hw_debug_points[MAX_HW_BKPTS];
426
427static CPUWatchpoint hw_watchpoint;
428
429/* Default there is no breakpoint and watchpoint supported */
430static int max_hw_breakpoint;
431static int max_hw_watchpoint;
432static int nb_hw_breakpoint;
433static int nb_hw_watchpoint;
434
435static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
436{
437 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
438 max_hw_breakpoint = 2;
439 max_hw_watchpoint = 2;
440 }
441
442 if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
443 fprintf(stderr, "Error initializing h/w breakpoints\n");
444 return;
445 }
446}
447
Andreas Färber20d695a2012-10-31 06:57:49 +0100448int kvm_arch_init_vcpu(CPUState *cs)
Scott Wood5666ca42011-04-11 18:34:34 -0500449{
Andreas Färber20d695a2012-10-31 06:57:49 +0100450 PowerPCCPU *cpu = POWERPC_CPU(cs);
451 CPUPPCState *cenv = &cpu->env;
Scott Wood5666ca42011-04-11 18:34:34 -0500452 int ret;
453
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000454 /* Gather server mmu info from KVM and update the CPU state */
Andreas Färbera60f24b2012-12-01 05:35:08 +0100455 kvm_fixup_page_sizes(cpu);
Benjamin Herrenschmidt4656e1f2012-06-18 19:56:25 +0000456
457 /* Synchronize sregs with kvm */
Andreas Färber1bc22652012-10-31 06:06:49 +0100458 ret = kvm_arch_sync_sregs(cpu);
Scott Wood5666ca42011-04-11 18:34:34 -0500459 if (ret) {
460 return ret;
461 }
Alexander Graf861bbc802009-07-17 13:51:43 +0200462
Alex Blighbc72ad62013-08-21 16:03:08 +0100463 idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
Alexander Grafc821c2b2010-04-18 23:10:17 +0200464
Scott Wood93dd5e82011-08-31 11:26:56 +0000465 /* Some targets support access to KVM's guest TLB. */
466 switch (cenv->mmu_model) {
467 case POWERPC_MMU_BOOKE206:
Andreas Färber1bc22652012-10-31 06:06:49 +0100468 ret = kvm_booke206_tlb_init(cpu);
Scott Wood93dd5e82011-08-31 11:26:56 +0000469 break;
470 default:
471 break;
472 }
473
Bharat Bhushan3c902d42014-07-14 14:45:35 +0530474 kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
Bharat Bhushan88365d12014-07-14 14:45:37 +0530475 kvmppc_hw_debug_points_init(cenv);
Bharat Bhushan3c902d42014-07-14 14:45:35 +0530476
Alexander Graf861bbc802009-07-17 13:51:43 +0200477 return ret;
aurel32d76d1652008-12-16 10:43:58 +0000478}
479
Andreas Färber1bc22652012-10-31 06:06:49 +0100480static void kvm_sw_tlb_put(PowerPCCPU *cpu)
Scott Wood93dd5e82011-08-31 11:26:56 +0000481{
Andreas Färber1bc22652012-10-31 06:06:49 +0100482 CPUPPCState *env = &cpu->env;
483 CPUState *cs = CPU(cpu);
Scott Wood93dd5e82011-08-31 11:26:56 +0000484 struct kvm_dirty_tlb dirty_tlb;
485 unsigned char *bitmap;
486 int ret;
487
488 if (!env->kvm_sw_tlb) {
489 return;
490 }
491
492 bitmap = g_malloc((env->nb_tlb + 7) / 8);
493 memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
494
495 dirty_tlb.bitmap = (uintptr_t)bitmap;
496 dirty_tlb.num_dirty = env->nb_tlb;
497
Andreas Färber1bc22652012-10-31 06:06:49 +0100498 ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
Scott Wood93dd5e82011-08-31 11:26:56 +0000499 if (ret) {
500 fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
501 __func__, strerror(-ret));
502 }
503
504 g_free(bitmap);
505}
506
David Gibsond67d40e2013-02-20 16:41:50 +0000507static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
508{
509 PowerPCCPU *cpu = POWERPC_CPU(cs);
510 CPUPPCState *env = &cpu->env;
511 union {
512 uint32_t u32;
513 uint64_t u64;
514 } val;
515 struct kvm_one_reg reg = {
516 .id = id,
517 .addr = (uintptr_t) &val,
518 };
519 int ret;
520
521 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
522 if (ret != 0) {
Alexey Kardashevskiyb36f1002014-02-04 15:12:34 +1100523 trace_kvm_failed_spr_get(spr, strerror(errno));
David Gibsond67d40e2013-02-20 16:41:50 +0000524 } else {
525 switch (id & KVM_REG_SIZE_MASK) {
526 case KVM_REG_SIZE_U32:
527 env->spr[spr] = val.u32;
528 break;
529
530 case KVM_REG_SIZE_U64:
531 env->spr[spr] = val.u64;
532 break;
533
534 default:
535 /* Don't handle this size yet */
536 abort();
537 }
538 }
539}
540
541static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
542{
543 PowerPCCPU *cpu = POWERPC_CPU(cs);
544 CPUPPCState *env = &cpu->env;
545 union {
546 uint32_t u32;
547 uint64_t u64;
548 } val;
549 struct kvm_one_reg reg = {
550 .id = id,
551 .addr = (uintptr_t) &val,
552 };
553 int ret;
554
555 switch (id & KVM_REG_SIZE_MASK) {
556 case KVM_REG_SIZE_U32:
557 val.u32 = env->spr[spr];
558 break;
559
560 case KVM_REG_SIZE_U64:
561 val.u64 = env->spr[spr];
562 break;
563
564 default:
565 /* Don't handle this size yet */
566 abort();
567 }
568
569 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
570 if (ret != 0) {
Alexey Kardashevskiyb36f1002014-02-04 15:12:34 +1100571 trace_kvm_failed_spr_set(spr, strerror(errno));
David Gibsond67d40e2013-02-20 16:41:50 +0000572 }
573}
574
David Gibson70b79842013-02-20 16:41:51 +0000575static int kvm_put_fp(CPUState *cs)
576{
577 PowerPCCPU *cpu = POWERPC_CPU(cs);
578 CPUPPCState *env = &cpu->env;
579 struct kvm_one_reg reg;
580 int i;
581 int ret;
582
583 if (env->insns_flags & PPC_FLOAT) {
584 uint64_t fpscr = env->fpscr;
585 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
586
587 reg.id = KVM_REG_PPC_FPSCR;
588 reg.addr = (uintptr_t)&fpscr;
589 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
590 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100591 DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
David Gibson70b79842013-02-20 16:41:51 +0000592 return ret;
593 }
594
595 for (i = 0; i < 32; i++) {
596 uint64_t vsr[2];
597
598 vsr[0] = float64_val(env->fpr[i]);
599 vsr[1] = env->vsr[i];
600 reg.addr = (uintptr_t) &vsr;
601 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
602
603 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
604 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100605 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
David Gibson70b79842013-02-20 16:41:51 +0000606 i, strerror(errno));
607 return ret;
608 }
609 }
610 }
611
612 if (env->insns_flags & PPC_ALTIVEC) {
613 reg.id = KVM_REG_PPC_VSCR;
614 reg.addr = (uintptr_t)&env->vscr;
615 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
616 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100617 DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
David Gibson70b79842013-02-20 16:41:51 +0000618 return ret;
619 }
620
621 for (i = 0; i < 32; i++) {
622 reg.id = KVM_REG_PPC_VR(i);
623 reg.addr = (uintptr_t)&env->avr[i];
624 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
625 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100626 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
David Gibson70b79842013-02-20 16:41:51 +0000627 return ret;
628 }
629 }
630 }
631
632 return 0;
633}
634
635static int kvm_get_fp(CPUState *cs)
636{
637 PowerPCCPU *cpu = POWERPC_CPU(cs);
638 CPUPPCState *env = &cpu->env;
639 struct kvm_one_reg reg;
640 int i;
641 int ret;
642
643 if (env->insns_flags & PPC_FLOAT) {
644 uint64_t fpscr;
645 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
646
647 reg.id = KVM_REG_PPC_FPSCR;
648 reg.addr = (uintptr_t)&fpscr;
649 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
650 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100651 DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
David Gibson70b79842013-02-20 16:41:51 +0000652 return ret;
653 } else {
654 env->fpscr = fpscr;
655 }
656
657 for (i = 0; i < 32; i++) {
658 uint64_t vsr[2];
659
660 reg.addr = (uintptr_t) &vsr;
661 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
662
663 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
664 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100665 DPRINTF("Unable to get %s%d from KVM: %s\n",
David Gibson70b79842013-02-20 16:41:51 +0000666 vsx ? "VSR" : "FPR", i, strerror(errno));
667 return ret;
668 } else {
669 env->fpr[i] = vsr[0];
670 if (vsx) {
671 env->vsr[i] = vsr[1];
672 }
673 }
674 }
675 }
676
677 if (env->insns_flags & PPC_ALTIVEC) {
678 reg.id = KVM_REG_PPC_VSCR;
679 reg.addr = (uintptr_t)&env->vscr;
680 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
681 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100682 DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
David Gibson70b79842013-02-20 16:41:51 +0000683 return ret;
684 }
685
686 for (i = 0; i < 32; i++) {
687 reg.id = KVM_REG_PPC_VR(i);
688 reg.addr = (uintptr_t)&env->avr[i];
689 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
690 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100691 DPRINTF("Unable to get VR%d from KVM: %s\n",
David Gibson70b79842013-02-20 16:41:51 +0000692 i, strerror(errno));
693 return ret;
694 }
695 }
696 }
697
698 return 0;
699}
700
David Gibson9b00ea42013-04-07 19:08:22 +0000701#if defined(TARGET_PPC64)
702static int kvm_get_vpa(CPUState *cs)
703{
704 PowerPCCPU *cpu = POWERPC_CPU(cs);
705 CPUPPCState *env = &cpu->env;
706 struct kvm_one_reg reg;
707 int ret;
708
709 reg.id = KVM_REG_PPC_VPA_ADDR;
710 reg.addr = (uintptr_t)&env->vpa_addr;
711 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
712 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100713 DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
David Gibson9b00ea42013-04-07 19:08:22 +0000714 return ret;
715 }
716
717 assert((uintptr_t)&env->slb_shadow_size
718 == ((uintptr_t)&env->slb_shadow_addr + 8));
719 reg.id = KVM_REG_PPC_VPA_SLB;
720 reg.addr = (uintptr_t)&env->slb_shadow_addr;
721 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
722 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100723 DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
David Gibson9b00ea42013-04-07 19:08:22 +0000724 strerror(errno));
725 return ret;
726 }
727
728 assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
729 reg.id = KVM_REG_PPC_VPA_DTL;
730 reg.addr = (uintptr_t)&env->dtl_addr;
731 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
732 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100733 DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
David Gibson9b00ea42013-04-07 19:08:22 +0000734 strerror(errno));
735 return ret;
736 }
737
738 return 0;
739}
740
741static int kvm_put_vpa(CPUState *cs)
742{
743 PowerPCCPU *cpu = POWERPC_CPU(cs);
744 CPUPPCState *env = &cpu->env;
745 struct kvm_one_reg reg;
746 int ret;
747
748 /* SLB shadow or DTL can't be registered unless a master VPA is
749 * registered. That means when restoring state, if a VPA *is*
750 * registered, we need to set that up first. If not, we need to
751 * deregister the others before deregistering the master VPA */
752 assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
753
754 if (env->vpa_addr) {
755 reg.id = KVM_REG_PPC_VPA_ADDR;
756 reg.addr = (uintptr_t)&env->vpa_addr;
757 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
758 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100759 DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
David Gibson9b00ea42013-04-07 19:08:22 +0000760 return ret;
761 }
762 }
763
764 assert((uintptr_t)&env->slb_shadow_size
765 == ((uintptr_t)&env->slb_shadow_addr + 8));
766 reg.id = KVM_REG_PPC_VPA_SLB;
767 reg.addr = (uintptr_t)&env->slb_shadow_addr;
768 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
769 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100770 DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
David Gibson9b00ea42013-04-07 19:08:22 +0000771 return ret;
772 }
773
774 assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
775 reg.id = KVM_REG_PPC_VPA_DTL;
776 reg.addr = (uintptr_t)&env->dtl_addr;
777 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
778 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100779 DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
David Gibson9b00ea42013-04-07 19:08:22 +0000780 strerror(errno));
781 return ret;
782 }
783
784 if (!env->vpa_addr) {
785 reg.id = KVM_REG_PPC_VPA_ADDR;
786 reg.addr = (uintptr_t)&env->vpa_addr;
787 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
788 if (ret < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100789 DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
David Gibson9b00ea42013-04-07 19:08:22 +0000790 return ret;
791 }
792 }
793
794 return 0;
795}
796#endif /* TARGET_PPC64 */
797
Andreas Färber20d695a2012-10-31 06:57:49 +0100798int kvm_arch_put_registers(CPUState *cs, int level)
aurel32d76d1652008-12-16 10:43:58 +0000799{
Andreas Färber20d695a2012-10-31 06:57:49 +0100800 PowerPCCPU *cpu = POWERPC_CPU(cs);
801 CPUPPCState *env = &cpu->env;
aurel32d76d1652008-12-16 10:43:58 +0000802 struct kvm_regs regs;
803 int ret;
804 int i;
805
Andreas Färber1bc22652012-10-31 06:06:49 +0100806 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
807 if (ret < 0) {
aurel32d76d1652008-12-16 10:43:58 +0000808 return ret;
Andreas Färber1bc22652012-10-31 06:06:49 +0100809 }
aurel32d76d1652008-12-16 10:43:58 +0000810
811 regs.ctr = env->ctr;
812 regs.lr = env->lr;
Richard Hendersonda91a002013-02-19 23:52:13 -0800813 regs.xer = cpu_read_xer(env);
aurel32d76d1652008-12-16 10:43:58 +0000814 regs.msr = env->msr;
815 regs.pc = env->nip;
816
817 regs.srr0 = env->spr[SPR_SRR0];
818 regs.srr1 = env->spr[SPR_SRR1];
819
820 regs.sprg0 = env->spr[SPR_SPRG0];
821 regs.sprg1 = env->spr[SPR_SPRG1];
822 regs.sprg2 = env->spr[SPR_SPRG2];
823 regs.sprg3 = env->spr[SPR_SPRG3];
824 regs.sprg4 = env->spr[SPR_SPRG4];
825 regs.sprg5 = env->spr[SPR_SPRG5];
826 regs.sprg6 = env->spr[SPR_SPRG6];
827 regs.sprg7 = env->spr[SPR_SPRG7];
828
Scott Wood90dc8812011-04-29 17:10:23 -0500829 regs.pid = env->spr[SPR_BOOKE_PID];
830
aurel32d76d1652008-12-16 10:43:58 +0000831 for (i = 0;i < 32; i++)
832 regs.gpr[i] = env->gpr[i];
833
Alexey Kardashevskiy4bddaf52013-06-15 11:51:51 +1000834 regs.cr = 0;
835 for (i = 0; i < 8; i++) {
836 regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
837 }
838
Andreas Färber1bc22652012-10-31 06:06:49 +0100839 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
aurel32d76d1652008-12-16 10:43:58 +0000840 if (ret < 0)
841 return ret;
842
David Gibson70b79842013-02-20 16:41:51 +0000843 kvm_put_fp(cs);
844
Scott Wood93dd5e82011-08-31 11:26:56 +0000845 if (env->tlb_dirty) {
Andreas Färber1bc22652012-10-31 06:06:49 +0100846 kvm_sw_tlb_put(cpu);
Scott Wood93dd5e82011-08-31 11:26:56 +0000847 env->tlb_dirty = false;
848 }
849
David Gibsonf1af19d2012-09-12 16:57:09 +0000850 if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
851 struct kvm_sregs sregs;
852
853 sregs.pvr = env->spr[SPR_PVR];
854
855 sregs.u.s.sdr1 = env->spr[SPR_SDR1];
856
857 /* Sync SLB */
858#ifdef TARGET_PPC64
Aneesh Kumar K.Vd83af162013-10-01 21:49:31 +0530859 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
David Gibsonf1af19d2012-09-12 16:57:09 +0000860 sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
Alexey Kardashevskiy69b31b92014-01-31 13:24:00 +1100861 if (env->slb[i].esid & SLB_ESID_V) {
862 sregs.u.s.ppc64.slb[i].slbe |= i;
863 }
David Gibsonf1af19d2012-09-12 16:57:09 +0000864 sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
865 }
866#endif
867
868 /* Sync SRs */
869 for (i = 0; i < 16; i++) {
870 sregs.u.s.ppc32.sr[i] = env->sr[i];
871 }
872
873 /* Sync BATs */
874 for (i = 0; i < 8; i++) {
Alexander Grafef8beb02012-10-05 04:34:40 +0200875 /* Beware. We have to swap upper and lower bits here */
876 sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
877 | env->DBAT[1][i];
878 sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
879 | env->IBAT[1][i];
David Gibsonf1af19d2012-09-12 16:57:09 +0000880 }
881
Andreas Färber1bc22652012-10-31 06:06:49 +0100882 ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
David Gibsonf1af19d2012-09-12 16:57:09 +0000883 if (ret) {
884 return ret;
885 }
886 }
887
888 if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
David Gibsond67d40e2013-02-20 16:41:50 +0000889 kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
890 }
David Gibsonf1af19d2012-09-12 16:57:09 +0000891
David Gibsond67d40e2013-02-20 16:41:50 +0000892 if (cap_one_reg) {
893 int i;
894
895 /* We deliberately ignore errors here, for kernels which have
896 * the ONE_REG calls, but don't support the specific
897 * registers, there's a reasonable chance things will still
898 * work, at least until we try to migrate. */
899 for (i = 0; i < 1024; i++) {
900 uint64_t id = env->spr_cb[i].one_reg_id;
901
902 if (id != 0) {
903 kvm_put_one_spr(cs, id, i);
904 }
David Gibsonf1af19d2012-09-12 16:57:09 +0000905 }
David Gibson9b00ea42013-04-07 19:08:22 +0000906
907#ifdef TARGET_PPC64
Alexey Kardashevskiy80b3f792014-06-04 22:51:00 +1000908 if (msr_ts) {
909 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
910 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
911 }
912 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
913 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
914 }
915 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
916 kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
917 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
918 kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
919 kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
920 kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
921 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
922 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
923 kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
924 kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
925 }
926
David Gibson9b00ea42013-04-07 19:08:22 +0000927 if (cap_papr) {
928 if (kvm_put_vpa(cs) < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +0100929 DPRINTF("Warning: Unable to set VPA information to KVM\n");
David Gibson9b00ea42013-04-07 19:08:22 +0000930 }
931 }
Alexey Kardashevskiy98a8b522014-05-01 20:37:09 +1000932
933 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
David Gibson9b00ea42013-04-07 19:08:22 +0000934#endif /* TARGET_PPC64 */
David Gibsonf1af19d2012-09-12 16:57:09 +0000935 }
936
aurel32d76d1652008-12-16 10:43:58 +0000937 return ret;
938}
939
Bharat Bhushanc371c2e2014-07-14 14:45:36 +0530940static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
941{
942 env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
943}
944
Andreas Färber20d695a2012-10-31 06:57:49 +0100945int kvm_arch_get_registers(CPUState *cs)
aurel32d76d1652008-12-16 10:43:58 +0000946{
Andreas Färber20d695a2012-10-31 06:57:49 +0100947 PowerPCCPU *cpu = POWERPC_CPU(cs);
948 CPUPPCState *env = &cpu->env;
aurel32d76d1652008-12-16 10:43:58 +0000949 struct kvm_regs regs;
Alexander Grafba5e5092009-12-02 23:19:47 +0100950 struct kvm_sregs sregs;
Scott Wood90dc8812011-04-29 17:10:23 -0500951 uint32_t cr;
Alexander Graf138b38b2010-11-25 08:20:46 +0100952 int i, ret;
aurel32d76d1652008-12-16 10:43:58 +0000953
Andreas Färber1bc22652012-10-31 06:06:49 +0100954 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
aurel32d76d1652008-12-16 10:43:58 +0000955 if (ret < 0)
956 return ret;
957
Scott Wood90dc8812011-04-29 17:10:23 -0500958 cr = regs.cr;
959 for (i = 7; i >= 0; i--) {
960 env->crf[i] = cr & 15;
961 cr >>= 4;
962 }
Alexander Grafba5e5092009-12-02 23:19:47 +0100963
aurel32d76d1652008-12-16 10:43:58 +0000964 env->ctr = regs.ctr;
965 env->lr = regs.lr;
Richard Hendersonda91a002013-02-19 23:52:13 -0800966 cpu_write_xer(env, regs.xer);
aurel32d76d1652008-12-16 10:43:58 +0000967 env->msr = regs.msr;
968 env->nip = regs.pc;
969
970 env->spr[SPR_SRR0] = regs.srr0;
971 env->spr[SPR_SRR1] = regs.srr1;
972
973 env->spr[SPR_SPRG0] = regs.sprg0;
974 env->spr[SPR_SPRG1] = regs.sprg1;
975 env->spr[SPR_SPRG2] = regs.sprg2;
976 env->spr[SPR_SPRG3] = regs.sprg3;
977 env->spr[SPR_SPRG4] = regs.sprg4;
978 env->spr[SPR_SPRG5] = regs.sprg5;
979 env->spr[SPR_SPRG6] = regs.sprg6;
980 env->spr[SPR_SPRG7] = regs.sprg7;
981
Scott Wood90dc8812011-04-29 17:10:23 -0500982 env->spr[SPR_BOOKE_PID] = regs.pid;
983
aurel32d76d1652008-12-16 10:43:58 +0000984 for (i = 0;i < 32; i++)
985 env->gpr[i] = regs.gpr[i];
986
David Gibson70b79842013-02-20 16:41:51 +0000987 kvm_get_fp(cs);
988
Scott Wood90dc8812011-04-29 17:10:23 -0500989 if (cap_booke_sregs) {
Andreas Färber1bc22652012-10-31 06:06:49 +0100990 ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
Scott Wood90dc8812011-04-29 17:10:23 -0500991 if (ret < 0) {
992 return ret;
993 }
994
995 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
996 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
997 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
998 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
999 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
1000 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
1001 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
1002 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
1003 env->spr[SPR_DECR] = sregs.u.e.dec;
1004 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
1005 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
1006 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
1007 }
1008
1009 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
1010 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
1011 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
1012 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
1013 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
1014 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
1015 }
1016
1017 if (sregs.u.e.features & KVM_SREGS_E_64) {
1018 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
1019 }
1020
1021 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
1022 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
1023 }
1024
1025 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
1026 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301027 kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
Scott Wood90dc8812011-04-29 17:10:23 -05001028 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301029 kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
Scott Wood90dc8812011-04-29 17:10:23 -05001030 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301031 kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
Scott Wood90dc8812011-04-29 17:10:23 -05001032 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301033 kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
Scott Wood90dc8812011-04-29 17:10:23 -05001034 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301035 kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
Scott Wood90dc8812011-04-29 17:10:23 -05001036 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301037 kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
Scott Wood90dc8812011-04-29 17:10:23 -05001038 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301039 kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
Scott Wood90dc8812011-04-29 17:10:23 -05001040 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301041 kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
Scott Wood90dc8812011-04-29 17:10:23 -05001042 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301043 kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
Scott Wood90dc8812011-04-29 17:10:23 -05001044 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301045 kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
Scott Wood90dc8812011-04-29 17:10:23 -05001046 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301047 kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
Scott Wood90dc8812011-04-29 17:10:23 -05001048 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301049 kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
Scott Wood90dc8812011-04-29 17:10:23 -05001050 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301051 kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
Scott Wood90dc8812011-04-29 17:10:23 -05001052 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301053 kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
Scott Wood90dc8812011-04-29 17:10:23 -05001054 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301055 kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
Scott Wood90dc8812011-04-29 17:10:23 -05001056 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301057 kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
Scott Wood90dc8812011-04-29 17:10:23 -05001058
1059 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
1060 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301061 kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
Scott Wood90dc8812011-04-29 17:10:23 -05001062 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301063 kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
Scott Wood90dc8812011-04-29 17:10:23 -05001064 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301065 kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
Scott Wood90dc8812011-04-29 17:10:23 -05001066 }
1067
1068 if (sregs.u.e.features & KVM_SREGS_E_PM) {
1069 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301070 kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
Scott Wood90dc8812011-04-29 17:10:23 -05001071 }
1072
1073 if (sregs.u.e.features & KVM_SREGS_E_PC) {
1074 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301075 kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
Scott Wood90dc8812011-04-29 17:10:23 -05001076 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
Bharat Bhushanc371c2e2014-07-14 14:45:36 +05301077 kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
Scott Wood90dc8812011-04-29 17:10:23 -05001078 }
1079 }
1080
1081 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1082 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
1083 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
1084 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
1085 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
1086 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
1087 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
1088 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
1089 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
1090 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
1091 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
1092 }
1093
1094 if (sregs.u.e.features & KVM_SREGS_EXP) {
1095 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
1096 }
1097
1098 if (sregs.u.e.features & KVM_SREGS_E_PD) {
1099 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
1100 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
1101 }
1102
1103 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
1104 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
1105 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
1106 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
1107
1108 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
1109 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
1110 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
1111 }
1112 }
Alexander Graffafc0b62011-05-25 15:04:42 +02001113 }
Scott Wood90dc8812011-04-29 17:10:23 -05001114
Scott Wood90dc8812011-04-29 17:10:23 -05001115 if (cap_segstate) {
Andreas Färber1bc22652012-10-31 06:06:49 +01001116 ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
Scott Wood90dc8812011-04-29 17:10:23 -05001117 if (ret < 0) {
1118 return ret;
1119 }
1120
Aneesh Kumar K.Vf3c75d42014-02-20 18:52:17 +01001121 if (!env->external_htab) {
1122 ppc_store_sdr1(env, sregs.u.s.sdr1);
1123 }
Alexander Grafba5e5092009-12-02 23:19:47 +01001124
1125 /* Sync SLB */
Alexander Graf82c09f22009-12-19 01:58:59 +01001126#ifdef TARGET_PPC64
Aneesh Kumar K.V4b4d4a22013-10-01 21:49:28 +05301127 /*
1128 * The packed SLB array we get from KVM_GET_SREGS only contains
1129 * information about valid entries. So we flush our internal
1130 * copy to get rid of stale ones, then put all valid SLB entries
1131 * back in.
1132 */
1133 memset(env->slb, 0, sizeof(env->slb));
Aneesh Kumar K.Vd83af162013-10-01 21:49:31 +05301134 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
Aneesh Kumar K.V4b4d4a22013-10-01 21:49:28 +05301135 target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
1136 target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
1137 /*
1138 * Only restore valid entries
1139 */
1140 if (rb & SLB_ESID_V) {
1141 ppc_store_slb(env, rb, rs);
1142 }
Alexander Grafba5e5092009-12-02 23:19:47 +01001143 }
Alexander Graf82c09f22009-12-19 01:58:59 +01001144#endif
Alexander Grafba5e5092009-12-02 23:19:47 +01001145
1146 /* Sync SRs */
1147 for (i = 0; i < 16; i++) {
1148 env->sr[i] = sregs.u.s.ppc32.sr[i];
1149 }
1150
1151 /* Sync BATs */
1152 for (i = 0; i < 8; i++) {
1153 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1154 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1155 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1156 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1157 }
Alexander Graffafc0b62011-05-25 15:04:42 +02001158 }
Alexander Grafba5e5092009-12-02 23:19:47 +01001159
David Gibsond67d40e2013-02-20 16:41:50 +00001160 if (cap_hior) {
1161 kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1162 }
1163
1164 if (cap_one_reg) {
1165 int i;
1166
1167 /* We deliberately ignore errors here, for kernels which have
1168 * the ONE_REG calls, but don't support the specific
1169 * registers, there's a reasonable chance things will still
1170 * work, at least until we try to migrate. */
1171 for (i = 0; i < 1024; i++) {
1172 uint64_t id = env->spr_cb[i].one_reg_id;
1173
1174 if (id != 0) {
1175 kvm_get_one_spr(cs, id, i);
1176 }
1177 }
David Gibson9b00ea42013-04-07 19:08:22 +00001178
1179#ifdef TARGET_PPC64
Alexey Kardashevskiy80b3f792014-06-04 22:51:00 +10001180 if (msr_ts) {
1181 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
1182 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
1183 }
1184 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
1185 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
1186 }
1187 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
1188 kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
1189 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
1190 kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
1191 kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
1192 kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
1193 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
1194 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
1195 kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
1196 kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
1197 }
1198
David Gibson9b00ea42013-04-07 19:08:22 +00001199 if (cap_papr) {
1200 if (kvm_get_vpa(cs) < 0) {
Peter Maydellda56ff92013-07-29 13:16:38 +01001201 DPRINTF("Warning: Unable to get VPA information from KVM\n");
David Gibson9b00ea42013-04-07 19:08:22 +00001202 }
1203 }
Alexey Kardashevskiy98a8b522014-05-01 20:37:09 +10001204
1205 kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
David Gibson9b00ea42013-04-07 19:08:22 +00001206#endif
David Gibsond67d40e2013-02-20 16:41:50 +00001207 }
1208
aurel32d76d1652008-12-16 10:43:58 +00001209 return 0;
1210}
1211
Andreas Färber1bc22652012-10-31 06:06:49 +01001212int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
Alexander Graffc87e182010-08-30 13:49:15 +02001213{
1214 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1215
1216 if (irq != PPC_INTERRUPT_EXT) {
1217 return 0;
1218 }
1219
1220 if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1221 return 0;
1222 }
1223
Andreas Färber1bc22652012-10-31 06:06:49 +01001224 kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
Alexander Graffc87e182010-08-30 13:49:15 +02001225
1226 return 0;
1227}
1228
Alexander Graf16415332009-07-17 13:51:46 +02001229#if defined(TARGET_PPCEMB)
1230#define PPC_INPUT_INT PPC40x_INPUT_INT
1231#elif defined(TARGET_PPC64)
1232#define PPC_INPUT_INT PPC970_INPUT_INT
1233#else
1234#define PPC_INPUT_INT PPC6xx_INPUT_INT
1235#endif
1236
Andreas Färber20d695a2012-10-31 06:57:49 +01001237void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
aurel32d76d1652008-12-16 10:43:58 +00001238{
Andreas Färber20d695a2012-10-31 06:57:49 +01001239 PowerPCCPU *cpu = POWERPC_CPU(cs);
1240 CPUPPCState *env = &cpu->env;
aurel32d76d1652008-12-16 10:43:58 +00001241 int r;
1242 unsigned irq;
1243
Stefan Weil5cbdb3a2012-04-07 09:23:39 +02001244 /* PowerPC QEMU tracks the various core input pins (interrupt, critical
aurel32d76d1652008-12-16 10:43:58 +00001245 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
Alexander Graffc87e182010-08-30 13:49:15 +02001246 if (!cap_interrupt_level &&
1247 run->ready_for_interrupt_injection &&
Andreas Färber259186a2013-01-17 18:51:17 +01001248 (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
Alexander Graf16415332009-07-17 13:51:46 +02001249 (env->irq_input_state & (1<<PPC_INPUT_INT)))
aurel32d76d1652008-12-16 10:43:58 +00001250 {
1251 /* For now KVM disregards the 'irq' argument. However, in the
1252 * future KVM could cache it in-kernel to avoid a heavyweight exit
1253 * when reading the UIC.
1254 */
Alexander Graffc87e182010-08-30 13:49:15 +02001255 irq = KVM_INTERRUPT_SET;
aurel32d76d1652008-12-16 10:43:58 +00001256
Peter Maydellda56ff92013-07-29 13:16:38 +01001257 DPRINTF("injected interrupt %d\n", irq);
Andreas Färber1bc22652012-10-31 06:06:49 +01001258 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
Andreas Färber55e5c282012-12-17 06:18:02 +01001259 if (r < 0) {
1260 printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
1261 }
Alexander Grafc821c2b2010-04-18 23:10:17 +02001262
1263 /* Always wake up soon in case the interrupt was level based */
Alex Blighbc72ad62013-08-21 16:03:08 +01001264 timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Alexander Grafc821c2b2010-04-18 23:10:17 +02001265 (get_ticks_per_sec() / 50));
aurel32d76d1652008-12-16 10:43:58 +00001266 }
1267
1268 /* We don't know if there are more interrupts pending after this. However,
1269 * the guest will return to userspace in the course of handling this one
1270 * anyways, so we will get a chance to deliver the rest. */
aurel32d76d1652008-12-16 10:43:58 +00001271}
1272
Andreas Färber20d695a2012-10-31 06:57:49 +01001273void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
aurel32d76d1652008-12-16 10:43:58 +00001274{
aurel32d76d1652008-12-16 10:43:58 +00001275}
1276
Andreas Färber20d695a2012-10-31 06:57:49 +01001277int kvm_arch_process_async_events(CPUState *cs)
Marcelo Tosatti0af691d2010-05-04 09:45:27 -03001278{
Andreas Färber259186a2013-01-17 18:51:17 +01001279 return cs->halted;
Marcelo Tosatti0af691d2010-05-04 09:45:27 -03001280}
1281
Andreas Färber259186a2013-01-17 18:51:17 +01001282static int kvmppc_handle_halt(PowerPCCPU *cpu)
aurel32d76d1652008-12-16 10:43:58 +00001283{
Andreas Färber259186a2013-01-17 18:51:17 +01001284 CPUState *cs = CPU(cpu);
1285 CPUPPCState *env = &cpu->env;
1286
1287 if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1288 cs->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +02001289 cs->exception_index = EXCP_HLT;
aurel32d76d1652008-12-16 10:43:58 +00001290 }
1291
Jan Kiszkabb4ea392011-03-15 12:26:28 +01001292 return 0;
aurel32d76d1652008-12-16 10:43:58 +00001293}
1294
1295/* map dcr access to existing qemu dcr emulation */
Andreas Färber1328c2b2012-03-14 01:38:22 +01001296static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
aurel32d76d1652008-12-16 10:43:58 +00001297{
1298 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1299 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1300
Jan Kiszkabb4ea392011-03-15 12:26:28 +01001301 return 0;
aurel32d76d1652008-12-16 10:43:58 +00001302}
1303
Andreas Färber1328c2b2012-03-14 01:38:22 +01001304static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
aurel32d76d1652008-12-16 10:43:58 +00001305{
1306 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1307 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1308
Jan Kiszkabb4ea392011-03-15 12:26:28 +01001309 return 0;
aurel32d76d1652008-12-16 10:43:58 +00001310}
1311
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301312int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1313{
1314 /* Mixed endian case is not handled */
1315 uint32_t sc = debug_inst_opcode;
1316
1317 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1318 sizeof(sc), 0) ||
1319 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
1320 return -EINVAL;
1321 }
1322
1323 return 0;
1324}
1325
1326int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1327{
1328 uint32_t sc;
1329
1330 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
1331 sc != debug_inst_opcode ||
1332 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1333 sizeof(sc), 1)) {
1334 return -EINVAL;
1335 }
1336
1337 return 0;
1338}
1339
Bharat Bhushan88365d12014-07-14 14:45:37 +05301340static int find_hw_breakpoint(target_ulong addr, int type)
1341{
1342 int n;
1343
1344 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1345 <= ARRAY_SIZE(hw_debug_points));
1346
1347 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1348 if (hw_debug_points[n].addr == addr &&
1349 hw_debug_points[n].type == type) {
1350 return n;
1351 }
1352 }
1353
1354 return -1;
1355}
1356
1357static int find_hw_watchpoint(target_ulong addr, int *flag)
1358{
1359 int n;
1360
1361 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
1362 if (n >= 0) {
1363 *flag = BP_MEM_ACCESS;
1364 return n;
1365 }
1366
1367 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
1368 if (n >= 0) {
1369 *flag = BP_MEM_WRITE;
1370 return n;
1371 }
1372
1373 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
1374 if (n >= 0) {
1375 *flag = BP_MEM_READ;
1376 return n;
1377 }
1378
1379 return -1;
1380}
1381
1382int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1383 target_ulong len, int type)
1384{
1385 if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
1386 return -ENOBUFS;
1387 }
1388
1389 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
1390 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
1391
1392 switch (type) {
1393 case GDB_BREAKPOINT_HW:
1394 if (nb_hw_breakpoint >= max_hw_breakpoint) {
1395 return -ENOBUFS;
1396 }
1397
1398 if (find_hw_breakpoint(addr, type) >= 0) {
1399 return -EEXIST;
1400 }
1401
1402 nb_hw_breakpoint++;
1403 break;
1404
1405 case GDB_WATCHPOINT_WRITE:
1406 case GDB_WATCHPOINT_READ:
1407 case GDB_WATCHPOINT_ACCESS:
1408 if (nb_hw_watchpoint >= max_hw_watchpoint) {
1409 return -ENOBUFS;
1410 }
1411
1412 if (find_hw_breakpoint(addr, type) >= 0) {
1413 return -EEXIST;
1414 }
1415
1416 nb_hw_watchpoint++;
1417 break;
1418
1419 default:
1420 return -ENOSYS;
1421 }
1422
1423 return 0;
1424}
1425
1426int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1427 target_ulong len, int type)
1428{
1429 int n;
1430
1431 n = find_hw_breakpoint(addr, type);
1432 if (n < 0) {
1433 return -ENOENT;
1434 }
1435
1436 switch (type) {
1437 case GDB_BREAKPOINT_HW:
1438 nb_hw_breakpoint--;
1439 break;
1440
1441 case GDB_WATCHPOINT_WRITE:
1442 case GDB_WATCHPOINT_READ:
1443 case GDB_WATCHPOINT_ACCESS:
1444 nb_hw_watchpoint--;
1445 break;
1446
1447 default:
1448 return -ENOSYS;
1449 }
1450 hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
1451
1452 return 0;
1453}
1454
1455void kvm_arch_remove_all_hw_breakpoints(void)
1456{
1457 nb_hw_breakpoint = nb_hw_watchpoint = 0;
1458}
1459
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301460void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
1461{
Bharat Bhushan88365d12014-07-14 14:45:37 +05301462 int n;
1463
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301464 /* Software Breakpoint updates */
1465 if (kvm_sw_breakpoints_active(cs)) {
1466 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1467 }
Bharat Bhushan88365d12014-07-14 14:45:37 +05301468
1469 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1470 <= ARRAY_SIZE(hw_debug_points));
1471 assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
1472
1473 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1474 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1475 memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
1476 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1477 switch (hw_debug_points[n].type) {
1478 case GDB_BREAKPOINT_HW:
1479 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
1480 break;
1481 case GDB_WATCHPOINT_WRITE:
1482 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
1483 break;
1484 case GDB_WATCHPOINT_READ:
1485 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
1486 break;
1487 case GDB_WATCHPOINT_ACCESS:
1488 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
1489 KVMPPC_DEBUG_WATCH_READ;
1490 break;
1491 default:
1492 cpu_abort(cs, "Unsupported breakpoint type\n");
1493 }
1494 dbg->arch.bp[n].addr = hw_debug_points[n].addr;
1495 }
1496 }
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301497}
1498
1499static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
1500{
1501 CPUState *cs = CPU(cpu);
1502 CPUPPCState *env = &cpu->env;
1503 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1504 int handle = 0;
Bharat Bhushan88365d12014-07-14 14:45:37 +05301505 int n;
1506 int flag = 0;
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301507
Bharat Bhushan88365d12014-07-14 14:45:37 +05301508 if (cs->singlestep_enabled) {
1509 handle = 1;
1510 } else if (arch_info->status) {
1511 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1512 if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
1513 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
1514 if (n >= 0) {
1515 handle = 1;
1516 }
1517 } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
1518 KVMPPC_DEBUG_WATCH_WRITE)) {
1519 n = find_hw_watchpoint(arch_info->address, &flag);
1520 if (n >= 0) {
1521 handle = 1;
1522 cs->watchpoint_hit = &hw_watchpoint;
1523 hw_watchpoint.vaddr = hw_debug_points[n].addr;
1524 hw_watchpoint.flags = flag;
1525 }
1526 }
1527 }
1528 } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301529 handle = 1;
1530 } else {
1531 /* QEMU is not able to handle debug exception, so inject
1532 * program exception to guest;
1533 * Yes program exception NOT debug exception !!
Bharat Bhushan88365d12014-07-14 14:45:37 +05301534 * When QEMU is using debug resources then debug exception must
1535 * be always set. To achieve this we set MSR_DE and also set
1536 * MSRP_DEP so guest cannot change MSR_DE.
1537 * When emulating debug resource for guest we want guest
1538 * to control MSR_DE (enable/disable debug interrupt on need).
1539 * Supporting both configurations are NOT possible.
1540 * So the result is that we cannot share debug resources
1541 * between QEMU and Guest on BOOKE architecture.
1542 * In the current design QEMU gets the priority over guest,
1543 * this means that if QEMU is using debug resources then guest
1544 * cannot use them;
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301545 * For software breakpoint QEMU uses a privileged instruction;
1546 * So there cannot be any reason that we are here for guest
1547 * set debug exception, only possibility is guest executed a
1548 * privileged / illegal instruction and that's why we are
1549 * injecting a program interrupt.
1550 */
1551
1552 cpu_synchronize_state(cs);
1553 /* env->nip is PC, so increment this by 4 to use
1554 * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
1555 */
1556 env->nip += 4;
1557 cs->exception_index = POWERPC_EXCP_PROGRAM;
1558 env->error_code = POWERPC_EXCP_INVAL;
1559 ppc_cpu_do_interrupt(cs);
1560 }
1561
1562 return handle;
1563}
1564
Andreas Färber20d695a2012-10-31 06:57:49 +01001565int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
aurel32d76d1652008-12-16 10:43:58 +00001566{
Andreas Färber20d695a2012-10-31 06:57:49 +01001567 PowerPCCPU *cpu = POWERPC_CPU(cs);
1568 CPUPPCState *env = &cpu->env;
Jan Kiszkabb4ea392011-03-15 12:26:28 +01001569 int ret;
aurel32d76d1652008-12-16 10:43:58 +00001570
1571 switch (run->exit_reason) {
1572 case KVM_EXIT_DCR:
1573 if (run->dcr.is_write) {
Peter Maydellda56ff92013-07-29 13:16:38 +01001574 DPRINTF("handle dcr write\n");
aurel32d76d1652008-12-16 10:43:58 +00001575 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1576 } else {
Peter Maydellda56ff92013-07-29 13:16:38 +01001577 DPRINTF("handle dcr read\n");
aurel32d76d1652008-12-16 10:43:58 +00001578 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1579 }
1580 break;
1581 case KVM_EXIT_HLT:
Peter Maydellda56ff92013-07-29 13:16:38 +01001582 DPRINTF("handle halt\n");
Andreas Färber259186a2013-01-17 18:51:17 +01001583 ret = kvmppc_handle_halt(cpu);
aurel32d76d1652008-12-16 10:43:58 +00001584 break;
David Gibsonc6304a42013-03-13 15:53:27 +00001585#if defined(TARGET_PPC64)
Alexander Graff61b4be2011-08-09 17:57:37 +02001586 case KVM_EXIT_PAPR_HCALL:
Peter Maydellda56ff92013-07-29 13:16:38 +01001587 DPRINTF("handle PAPR hypercall\n");
Andreas Färber20d695a2012-10-31 06:57:49 +01001588 run->papr_hcall.ret = spapr_hypercall(cpu,
Andreas Färberaa100fa2012-05-03 06:13:14 +02001589 run->papr_hcall.nr,
Alexander Graff61b4be2011-08-09 17:57:37 +02001590 run->papr_hcall.args);
David Gibson78e8fde2012-08-06 18:44:45 +00001591 ret = 0;
Alexander Graff61b4be2011-08-09 17:57:37 +02001592 break;
1593#endif
Alexander Graf5b95b8b2013-01-17 11:54:38 +01001594 case KVM_EXIT_EPR:
Peter Maydellda56ff92013-07-29 13:16:38 +01001595 DPRINTF("handle epr\n");
Alexander Graf933b19e2014-02-14 09:15:21 +01001596 run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
Alexander Graf5b95b8b2013-01-17 11:54:38 +01001597 ret = 0;
1598 break;
Bharat Bhushan31f2cb82013-02-24 18:16:21 +00001599 case KVM_EXIT_WATCHDOG:
Peter Maydellda56ff92013-07-29 13:16:38 +01001600 DPRINTF("handle watchdog expiry\n");
Bharat Bhushan31f2cb82013-02-24 18:16:21 +00001601 watchdog_perform_action();
1602 ret = 0;
1603 break;
1604
Bharat Bhushan8a0548f2014-07-14 14:45:38 +05301605 case KVM_EXIT_DEBUG:
1606 DPRINTF("handle debug exception\n");
1607 if (kvm_handle_debug(cpu, run)) {
1608 ret = EXCP_DEBUG;
1609 break;
1610 }
1611 /* re-enter, this exception was guest-internal */
1612 ret = 0;
1613 break;
1614
Jan Kiszka73aaec42011-01-21 21:48:06 +01001615 default:
1616 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1617 ret = -1;
1618 break;
aurel32d76d1652008-12-16 10:43:58 +00001619 }
1620
1621 return ret;
1622}
1623
Bharat Bhushan31f2cb82013-02-24 18:16:21 +00001624int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1625{
1626 CPUState *cs = CPU(cpu);
1627 uint32_t bits = tsr_bits;
1628 struct kvm_one_reg reg = {
1629 .id = KVM_REG_PPC_OR_TSR,
1630 .addr = (uintptr_t) &bits,
1631 };
1632
1633 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1634}
1635
1636int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1637{
1638
1639 CPUState *cs = CPU(cpu);
1640 uint32_t bits = tsr_bits;
1641 struct kvm_one_reg reg = {
1642 .id = KVM_REG_PPC_CLEAR_TSR,
1643 .addr = (uintptr_t) &bits,
1644 };
1645
1646 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1647}
1648
1649int kvmppc_set_tcr(PowerPCCPU *cpu)
1650{
1651 CPUState *cs = CPU(cpu);
1652 CPUPPCState *env = &cpu->env;
1653 uint32_t tcr = env->spr[SPR_BOOKE_TCR];
1654
1655 struct kvm_one_reg reg = {
1656 .id = KVM_REG_PPC_TCR,
1657 .addr = (uintptr_t) &tcr,
1658 };
1659
1660 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1661}
1662
1663int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
1664{
1665 CPUState *cs = CPU(cpu);
Bharat Bhushan31f2cb82013-02-24 18:16:21 +00001666 int ret;
1667
1668 if (!kvm_enabled()) {
1669 return -1;
1670 }
1671
1672 if (!cap_ppc_watchdog) {
1673 printf("warning: KVM does not support watchdog");
1674 return -1;
1675 }
1676
Cornelia Huck48add812014-04-09 17:21:57 +02001677 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
Bharat Bhushan31f2cb82013-02-24 18:16:21 +00001678 if (ret < 0) {
1679 fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
1680 __func__, strerror(-ret));
1681 return ret;
1682 }
1683
1684 return ret;
1685}
1686
Alexander Grafdc333cd2010-02-09 17:37:05 +01001687static int read_cpuinfo(const char *field, char *value, int len)
1688{
1689 FILE *f;
1690 int ret = -1;
1691 int field_len = strlen(field);
1692 char line[512];
1693
1694 f = fopen("/proc/cpuinfo", "r");
1695 if (!f) {
1696 return -1;
1697 }
1698
1699 do {
Nikunj A Dadhaniaef951442014-07-09 16:08:37 +05301700 if (!fgets(line, sizeof(line), f)) {
Alexander Grafdc333cd2010-02-09 17:37:05 +01001701 break;
1702 }
1703 if (!strncmp(line, field, field_len)) {
Jim Meyeringae215062012-10-04 13:09:52 +02001704 pstrcpy(value, len, line);
Alexander Grafdc333cd2010-02-09 17:37:05 +01001705 ret = 0;
1706 break;
1707 }
1708 } while(*line);
1709
1710 fclose(f);
1711
1712 return ret;
1713}
1714
1715uint32_t kvmppc_get_tbfreq(void)
1716{
1717 char line[512];
1718 char *ns;
1719 uint32_t retval = get_ticks_per_sec();
1720
1721 if (read_cpuinfo("timebase", line, sizeof(line))) {
1722 return retval;
1723 }
1724
1725 if (!(ns = strchr(line, ':'))) {
1726 return retval;
1727 }
1728
1729 ns++;
1730
1731 retval = atoi(ns);
1732 return retval;
1733}
Gleb Natapov4513d922010-05-10 11:21:34 +03001734
Nikunj A Dadhaniaef951442014-07-09 16:08:37 +05301735bool kvmppc_get_host_serial(char **value)
1736{
1737 return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1738 NULL);
1739}
1740
1741bool kvmppc_get_host_model(char **value)
1742{
1743 return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1744}
1745
Alexander Grafeadaada2011-07-21 02:29:15 +02001746/* Try to find a device tree node for a CPU with clock-frequency property */
1747static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1748{
1749 struct dirent *dirp;
1750 DIR *dp;
1751
1752 if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1753 printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1754 return -1;
1755 }
1756
1757 buf[0] = '\0';
1758 while ((dirp = readdir(dp)) != NULL) {
1759 FILE *f;
1760 snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1761 dirp->d_name);
1762 f = fopen(buf, "r");
1763 if (f) {
1764 snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1765 fclose(f);
1766 break;
1767 }
1768 buf[0] = '\0';
1769 }
1770 closedir(dp);
1771 if (buf[0] == '\0') {
1772 printf("Unknown host!\n");
1773 return -1;
1774 }
1775
1776 return 0;
1777}
1778
David Gibson9bc884b2011-10-10 18:31:00 +00001779/* Read a CPU node property from the host device tree that's a single
1780 * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
1781 * (can't find or open the property, or doesn't understand the
1782 * format) */
1783static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
Alexander Grafeadaada2011-07-21 02:29:15 +02001784{
Chen Gangcc64b1a2014-10-15 21:48:07 +08001785 char buf[PATH_MAX], *tmp;
David Gibson9bc884b2011-10-10 18:31:00 +00001786 union {
1787 uint32_t v32;
1788 uint64_t v64;
1789 } u;
Alexander Grafeadaada2011-07-21 02:29:15 +02001790 FILE *f;
1791 int len;
1792
1793 if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
David Gibson9bc884b2011-10-10 18:31:00 +00001794 return -1;
Alexander Grafeadaada2011-07-21 02:29:15 +02001795 }
1796
Chen Gangcc64b1a2014-10-15 21:48:07 +08001797 tmp = g_strdup_printf("%s/%s", buf, propname);
Alexander Grafeadaada2011-07-21 02:29:15 +02001798
Chen Gangcc64b1a2014-10-15 21:48:07 +08001799 f = fopen(tmp, "rb");
1800 g_free(tmp);
Alexander Grafeadaada2011-07-21 02:29:15 +02001801 if (!f) {
1802 return -1;
1803 }
1804
David Gibson9bc884b2011-10-10 18:31:00 +00001805 len = fread(&u, 1, sizeof(u), f);
Alexander Grafeadaada2011-07-21 02:29:15 +02001806 fclose(f);
1807 switch (len) {
David Gibson9bc884b2011-10-10 18:31:00 +00001808 case 4:
1809 /* property is a 32-bit quantity */
1810 return be32_to_cpu(u.v32);
1811 case 8:
1812 return be64_to_cpu(u.v64);
Alexander Grafeadaada2011-07-21 02:29:15 +02001813 }
1814
1815 return 0;
1816}
1817
David Gibson9bc884b2011-10-10 18:31:00 +00001818uint64_t kvmppc_get_clockfreq(void)
1819{
1820 return kvmppc_read_int_cpu_dt("clock-frequency");
1821}
1822
David Gibson66593942011-10-10 18:31:01 +00001823uint32_t kvmppc_get_vmx(void)
1824{
1825 return kvmppc_read_int_cpu_dt("ibm,vmx");
1826}
1827
1828uint32_t kvmppc_get_dfp(void)
1829{
1830 return kvmppc_read_int_cpu_dt("ibm,dfp");
1831}
1832
Stuart Yoder1a61a9a2013-01-03 12:37:02 +00001833static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1834 {
1835 PowerPCCPU *cpu = ppc_env_get_cpu(env);
1836 CPUState *cs = CPU(cpu);
Alexander Graf45024f02010-08-03 15:22:42 +02001837
Alexander Graf6fd33a72014-07-14 19:17:35 +02001838 if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
Stuart Yoder1a61a9a2013-01-03 12:37:02 +00001839 !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1840 return 0;
1841 }
Alexander Graf45024f02010-08-03 15:22:42 +02001842
Stuart Yoder1a61a9a2013-01-03 12:37:02 +00001843 return 1;
1844}
1845
1846int kvmppc_get_hasidle(CPUPPCState *env)
1847{
1848 struct kvm_ppc_pvinfo pvinfo;
1849
1850 if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1851 (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1852 return 1;
1853 }
1854
1855 return 0;
1856}
1857
1858int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
1859{
1860 uint32_t *hc = (uint32_t*)buf;
1861 struct kvm_ppc_pvinfo pvinfo;
1862
1863 if (!kvmppc_get_pvinfo(env, &pvinfo)) {
1864 memcpy(buf, pvinfo.hcall, buf_len);
Alexander Graf45024f02010-08-03 15:22:42 +02001865 return 0;
1866 }
Alexander Graf45024f02010-08-03 15:22:42 +02001867
1868 /*
Alexander Grafd13fc322014-06-11 12:19:03 +02001869 * Fallback to always fail hypercalls regardless of endianness:
Alexander Graf45024f02010-08-03 15:22:42 +02001870 *
Alexander Grafd13fc322014-06-11 12:19:03 +02001871 * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
Alexander Graf45024f02010-08-03 15:22:42 +02001872 * li r3, -1
Alexander Grafd13fc322014-06-11 12:19:03 +02001873 * b .+8 (becomes nop in wrong endian)
1874 * bswap32(li r3, -1)
Alexander Graf45024f02010-08-03 15:22:42 +02001875 */
1876
Alexander Grafd13fc322014-06-11 12:19:03 +02001877 hc[0] = cpu_to_be32(0x08000048);
1878 hc[1] = cpu_to_be32(0x3860ffff);
1879 hc[2] = cpu_to_be32(0x48000008);
1880 hc[3] = cpu_to_be32(bswap32(0x3860ffff));
Alexander Graf45024f02010-08-03 15:22:42 +02001881
1882 return 0;
1883}
1884
Andreas Färber1bc22652012-10-31 06:06:49 +01001885void kvmppc_set_papr(PowerPCCPU *cpu)
Alexander Graff61b4be2011-08-09 17:57:37 +02001886{
Andreas Färber1bc22652012-10-31 06:06:49 +01001887 CPUState *cs = CPU(cpu);
Alexander Graff61b4be2011-08-09 17:57:37 +02001888 int ret;
1889
Cornelia Huck48add812014-04-09 17:21:57 +02001890 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
Alexander Graff61b4be2011-08-09 17:57:37 +02001891 if (ret) {
Andreas Färbera47dddd2013-09-03 17:38:47 +02001892 cpu_abort(cs, "This KVM version does not support PAPR\n");
Alexander Graff61b4be2011-08-09 17:57:37 +02001893 }
David Gibson9b00ea42013-04-07 19:08:22 +00001894
1895 /* Update the capability flag so we sync the right information
1896 * with kvm */
1897 cap_papr = 1;
Alexander Graff61b4be2011-08-09 17:57:37 +02001898}
1899
Alexey Kardashevskiy6db5bb02014-05-23 12:26:58 +10001900int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
1901{
1902 return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &cpu_version);
1903}
1904
Alexander Graf5b95b8b2013-01-17 11:54:38 +01001905void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
1906{
Alexander Graf5b95b8b2013-01-17 11:54:38 +01001907 CPUState *cs = CPU(cpu);
Alexander Graf5b95b8b2013-01-17 11:54:38 +01001908 int ret;
1909
Cornelia Huck48add812014-04-09 17:21:57 +02001910 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
Alexander Graf5b95b8b2013-01-17 11:54:38 +01001911 if (ret && mpic_proxy) {
Andreas Färbera47dddd2013-09-03 17:38:47 +02001912 cpu_abort(cs, "This KVM version does not support EPR\n");
Alexander Graf5b95b8b2013-01-17 11:54:38 +01001913 }
1914}
1915
David Gibsone97c3632011-09-29 21:39:10 +00001916int kvmppc_smt_threads(void)
1917{
1918 return cap_ppc_smt ? cap_ppc_smt : 1;
1919}
1920
David Gibson7f763a52012-09-12 16:57:12 +00001921#ifdef TARGET_PPC64
Alexey Kardashevskiy658fa662014-07-11 01:03:41 +10001922off_t kvmppc_alloc_rma(void **rma)
David Gibson354ac202011-09-29 21:39:11 +00001923{
David Gibson354ac202011-09-29 21:39:11 +00001924 off_t size;
1925 int fd;
1926 struct kvm_allocate_rma ret;
David Gibson354ac202011-09-29 21:39:11 +00001927
1928 /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
1929 * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
1930 * not necessary on this hardware
1931 * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
1932 *
1933 * FIXME: We should allow the user to force contiguous RMA
1934 * allocation in the cap_ppc_rma==1 case.
1935 */
1936 if (cap_ppc_rma < 2) {
1937 return 0;
1938 }
1939
1940 fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
1941 if (fd < 0) {
1942 fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
1943 strerror(errno));
1944 return -1;
1945 }
1946
1947 size = MIN(ret.rma_size, 256ul << 20);
1948
Alexey Kardashevskiy658fa662014-07-11 01:03:41 +10001949 *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
1950 if (*rma == MAP_FAILED) {
David Gibson354ac202011-09-29 21:39:11 +00001951 fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
1952 return -1;
1953 };
1954
David Gibson354ac202011-09-29 21:39:11 +00001955 return size;
1956}
1957
David Gibson7f763a52012-09-12 16:57:12 +00001958uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
1959{
David Gibsonf36951c2013-04-07 19:08:18 +00001960 struct kvm_ppc_smmu_info info;
1961 long rampagesize, best_page_shift;
1962 int i;
1963
David Gibson7f763a52012-09-12 16:57:12 +00001964 if (cap_ppc_rma >= 2) {
1965 return current_size;
1966 }
David Gibsonf36951c2013-04-07 19:08:18 +00001967
1968 /* Find the largest hardware supported page size that's less than
1969 * or equal to the (logical) backing page size of guest RAM */
Andreas Färber182735e2013-05-29 22:29:20 +02001970 kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
David Gibsonf36951c2013-04-07 19:08:18 +00001971 rampagesize = getrampagesize();
1972 best_page_shift = 0;
1973
1974 for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
1975 struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
1976
1977 if (!sps->page_shift) {
1978 continue;
1979 }
1980
1981 if ((sps->page_shift > best_page_shift)
1982 && ((1UL << sps->page_shift) <= rampagesize)) {
1983 best_page_shift = sps->page_shift;
1984 }
1985 }
1986
David Gibson7f763a52012-09-12 16:57:12 +00001987 return MIN(current_size,
David Gibsonf36951c2013-04-07 19:08:18 +00001988 1ULL << (best_page_shift + hash_shift - 7));
David Gibson7f763a52012-09-12 16:57:12 +00001989}
1990#endif
1991
Alexey Kardashevskiyda953242014-05-27 15:36:30 +10001992bool kvmppc_spapr_use_multitce(void)
1993{
1994 return cap_spapr_multitce;
1995}
1996
Alexey Kardashevskiy9bb62a02014-06-10 15:39:21 +10001997void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
1998 bool vfio_accel)
David Gibson0f5cb292011-09-29 21:39:12 +00001999{
2000 struct kvm_create_spapr_tce args = {
2001 .liobn = liobn,
2002 .window_size = window_size,
2003 };
2004 long len;
2005 int fd;
2006 void *table;
2007
David Gibsonb5aec392012-02-27 17:18:07 +00002008 /* Must set fd to -1 so we don't try to munmap when called for
2009 * destroying the table, which the upper layers -will- do
2010 */
2011 *pfd = -1;
Alexey Kardashevskiy9bb62a02014-06-10 15:39:21 +10002012 if (!cap_spapr_tce || (vfio_accel && !cap_spapr_vfio)) {
David Gibson0f5cb292011-09-29 21:39:12 +00002013 return NULL;
2014 }
2015
2016 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
2017 if (fd < 0) {
David Gibsonb5aec392012-02-27 17:18:07 +00002018 fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2019 liobn);
David Gibson0f5cb292011-09-29 21:39:12 +00002020 return NULL;
2021 }
2022
Anthony Liguoria83000f2013-07-18 14:32:58 -05002023 len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(uint64_t);
David Gibson0f5cb292011-09-29 21:39:12 +00002024 /* FIXME: round this up to page size */
2025
David Gibson74b41e52011-10-27 15:56:31 +00002026 table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
David Gibson0f5cb292011-09-29 21:39:12 +00002027 if (table == MAP_FAILED) {
David Gibsonb5aec392012-02-27 17:18:07 +00002028 fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2029 liobn);
David Gibson0f5cb292011-09-29 21:39:12 +00002030 close(fd);
2031 return NULL;
2032 }
2033
2034 *pfd = fd;
2035 return table;
2036}
2037
Alexey Kardashevskiy523e7b82014-05-27 15:36:35 +10002038int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
David Gibson0f5cb292011-09-29 21:39:12 +00002039{
2040 long len;
2041
2042 if (fd < 0) {
2043 return -1;
2044 }
2045
Alexey Kardashevskiy523e7b82014-05-27 15:36:35 +10002046 len = nb_table * sizeof(uint64_t);
David Gibson0f5cb292011-09-29 21:39:12 +00002047 if ((munmap(table, len) < 0) ||
2048 (close(fd) < 0)) {
David Gibsonb5aec392012-02-27 17:18:07 +00002049 fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2050 strerror(errno));
David Gibson0f5cb292011-09-29 21:39:12 +00002051 /* Leak the table */
2052 }
2053
2054 return 0;
2055}
2056
David Gibson7f763a52012-09-12 16:57:12 +00002057int kvmppc_reset_htab(int shift_hint)
2058{
2059 uint32_t shift = shift_hint;
2060
David Gibsonace9a2c2012-09-19 21:08:42 +00002061 if (!kvm_enabled()) {
2062 /* Full emulation, tell caller to allocate htab itself */
2063 return 0;
2064 }
2065 if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
David Gibson7f763a52012-09-12 16:57:12 +00002066 int ret;
2067 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
David Gibsonace9a2c2012-09-19 21:08:42 +00002068 if (ret == -ENOTTY) {
2069 /* At least some versions of PR KVM advertise the
2070 * capability, but don't implement the ioctl(). Oops.
2071 * Return 0 so that we allocate the htab in qemu, as is
2072 * correct for PR. */
2073 return 0;
2074 } else if (ret < 0) {
David Gibson7f763a52012-09-12 16:57:12 +00002075 return ret;
2076 }
2077 return shift;
2078 }
2079
David Gibsonace9a2c2012-09-19 21:08:42 +00002080 /* We have a kernel that predates the htab reset calls. For PR
2081 * KVM, we need to allocate the htab ourselves, for an HV KVM of
2082 * this era, it has allocated a 16MB fixed size hash table
2083 * already. Kernels of this era have the GET_PVINFO capability
2084 * only on PR, so we use this hack to determine the right
2085 * answer */
2086 if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
2087 /* PR - tell caller to allocate htab */
2088 return 0;
2089 } else {
2090 /* HV - assume 16MB kernel allocated htab */
2091 return 24;
2092 }
David Gibson7f763a52012-09-12 16:57:12 +00002093}
2094
David Gibsona1e98582011-10-12 22:40:32 +00002095static inline uint32_t mfpvr(void)
2096{
2097 uint32_t pvr;
2098
2099 asm ("mfpvr %0"
2100 : "=r"(pvr));
2101 return pvr;
2102}
2103
David Gibsona7342582011-10-17 18:15:41 +00002104static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2105{
2106 if (on) {
2107 *word |= flags;
2108 } else {
2109 *word &= ~flags;
2110 }
2111}
2112
Andreas Färber2985b862013-01-06 08:31:30 +00002113static void kvmppc_host_cpu_initfn(Object *obj)
David Gibsona1e98582011-10-12 22:40:32 +00002114{
Andreas Färber2985b862013-01-06 08:31:30 +00002115 assert(kvm_enabled());
2116}
2117
2118static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
2119{
2120 PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
David Gibsona7342582011-10-17 18:15:41 +00002121 uint32_t vmx = kvmppc_get_vmx();
2122 uint32_t dfp = kvmppc_get_dfp();
David Gibson0cbad812013-04-07 19:08:19 +00002123 uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
2124 uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
David Gibsona1e98582011-10-12 22:40:32 +00002125
Andreas Färbercfe34f42013-02-17 23:16:41 +00002126 /* Now fix up the class with information we can query from the host */
Alexey Kardashevskiy3bc9ccc2013-09-27 18:05:03 +10002127 pcc->pvr = mfpvr();
David Gibsona7342582011-10-17 18:15:41 +00002128
Alexander Graf70bca532011-10-24 20:43:22 +02002129 if (vmx != -1) {
2130 /* Only override when we know what the host supports */
Andreas Färbercfe34f42013-02-17 23:16:41 +00002131 alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
2132 alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
Alexander Graf70bca532011-10-24 20:43:22 +02002133 }
2134 if (dfp != -1) {
2135 /* Only override when we know what the host supports */
Andreas Färbercfe34f42013-02-17 23:16:41 +00002136 alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
Alexander Graf70bca532011-10-24 20:43:22 +02002137 }
David Gibson0cbad812013-04-07 19:08:19 +00002138
2139 if (dcache_size != -1) {
2140 pcc->l1_dcache_size = dcache_size;
2141 }
2142
2143 if (icache_size != -1) {
2144 pcc->l1_icache_size = icache_size;
2145 }
David Gibsona1e98582011-10-12 22:40:32 +00002146}
2147
Stuart Yoder3b961122013-03-30 06:40:49 +00002148bool kvmppc_has_cap_epr(void)
2149{
2150 return cap_epr;
2151}
2152
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +01002153bool kvmppc_has_cap_htab_fd(void)
2154{
2155 return cap_htab_fd;
2156}
2157
Alexander Graf87a91de2014-06-04 12:14:08 +02002158bool kvmppc_has_cap_fixup_hcalls(void)
2159{
2160 return cap_fixup_hcalls;
2161}
2162
Alexey Kardashevskiy5b79b1c2014-04-12 03:34:25 +10002163static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
2164{
2165 ObjectClass *oc = OBJECT_CLASS(pcc);
2166
2167 while (oc && !object_class_is_abstract(oc)) {
2168 oc = object_class_get_parent(oc);
2169 }
2170 assert(oc);
2171
2172 return POWERPC_CPU_CLASS(oc);
2173}
2174
Andreas Färber5ba45762013-02-23 11:22:12 +00002175static int kvm_ppc_register_host_cpu_type(void)
2176{
2177 TypeInfo type_info = {
2178 .name = TYPE_HOST_POWERPC_CPU,
2179 .instance_init = kvmppc_host_cpu_initfn,
2180 .class_init = kvmppc_host_cpu_class_init,
2181 };
2182 uint32_t host_pvr = mfpvr();
2183 PowerPCCPUClass *pvr_pcc;
Alexey Kardashevskiy5b79b1c2014-04-12 03:34:25 +10002184 DeviceClass *dc;
Andreas Färber5ba45762013-02-23 11:22:12 +00002185
2186 pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
2187 if (pvr_pcc == NULL) {
Alexey Kardashevskiy3bc9ccc2013-09-27 18:05:03 +10002188 pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
2189 }
2190 if (pvr_pcc == NULL) {
Andreas Färber5ba45762013-02-23 11:22:12 +00002191 return -1;
2192 }
2193 type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
2194 type_register(&type_info);
Alexey Kardashevskiy5b79b1c2014-04-12 03:34:25 +10002195
2196 /* Register generic family CPU class for a family */
2197 pvr_pcc = ppc_cpu_get_family_class(pvr_pcc);
2198 dc = DEVICE_CLASS(pvr_pcc);
2199 type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
2200 type_info.name = g_strdup_printf("%s-"TYPE_POWERPC_CPU, dc->desc);
2201 type_register(&type_info);
2202
Andreas Färber5ba45762013-02-23 11:22:12 +00002203 return 0;
2204}
2205
David Gibsonfeaa64c2013-09-26 16:18:35 +10002206int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2207{
2208 struct kvm_rtas_token_args args = {
2209 .token = token,
2210 };
2211
2212 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2213 return -ENOENT;
2214 }
2215
2216 strncpy(args.name, function, sizeof(args.name));
2217
2218 return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2219}
David Gibson12b11432012-04-04 15:02:05 +10002220
Alexey Kardashevskiye68cb8b2013-07-18 14:33:03 -05002221int kvmppc_get_htab_fd(bool write)
2222{
2223 struct kvm_get_htab_fd s = {
2224 .flags = write ? KVM_GET_HTAB_WRITE : 0,
2225 .start_index = 0,
2226 };
2227
2228 if (!cap_htab_fd) {
2229 fprintf(stderr, "KVM version doesn't support saving the hash table\n");
2230 return -1;
2231 }
2232
2233 return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2234}
2235
2236int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2237{
Alex Blighbc72ad62013-08-21 16:03:08 +01002238 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Alexey Kardashevskiye68cb8b2013-07-18 14:33:03 -05002239 uint8_t buf[bufsize];
2240 ssize_t rc;
2241
2242 do {
2243 rc = read(fd, buf, bufsize);
2244 if (rc < 0) {
2245 fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2246 strerror(errno));
2247 return rc;
2248 } else if (rc) {
2249 /* Kernel already retuns data in BE format for the file */
2250 qemu_put_buffer(f, buf, rc);
2251 }
2252 } while ((rc != 0)
2253 && ((max_ns < 0)
Alex Blighbc72ad62013-08-21 16:03:08 +01002254 || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
Alexey Kardashevskiye68cb8b2013-07-18 14:33:03 -05002255
2256 return (rc == 0) ? 1 : 0;
2257}
2258
2259int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2260 uint16_t n_valid, uint16_t n_invalid)
2261{
2262 struct kvm_get_htab_header *buf;
2263 size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2264 ssize_t rc;
2265
2266 buf = alloca(chunksize);
2267 /* This is KVM on ppc, so this is all big-endian */
2268 buf->index = index;
2269 buf->n_valid = n_valid;
2270 buf->n_invalid = n_invalid;
2271
2272 qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2273
2274 rc = write(fd, buf, chunksize);
2275 if (rc < 0) {
2276 fprintf(stderr, "Error writing KVM hash table: %s\n",
2277 strerror(errno));
2278 return rc;
2279 }
2280 if (rc != chunksize) {
2281 /* We should never get a short write on a single chunk */
2282 fprintf(stderr, "Short write, restoring KVM hash table\n");
2283 return -1;
2284 }
2285 return 0;
2286}
2287
Andreas Färber20d695a2012-10-31 06:57:49 +01002288bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
Gleb Natapov4513d922010-05-10 11:21:34 +03002289{
2290 return true;
2291}
Jan Kiszkaa1b87fe2011-02-01 22:15:51 +01002292
Andreas Färber20d695a2012-10-31 06:57:49 +01002293int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
Jan Kiszkaa1b87fe2011-02-01 22:15:51 +01002294{
2295 return 1;
2296}
2297
2298int kvm_arch_on_sigbus(int code, void *addr)
2299{
2300 return 1;
2301}
Scott Wood82169662013-06-12 17:26:54 +10002302
2303void kvm_arch_init_irq_routing(KVMState *s)
2304{
2305}
Greg Kurzc65f9a02013-12-11 14:15:34 +01002306
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +01002307struct kvm_get_htab_buf {
2308 struct kvm_get_htab_header header;
2309 /*
2310 * We require one extra byte for read
2311 */
2312 target_ulong hpte[(HPTES_PER_GROUP * 2) + 1];
2313};
2314
2315uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index)
2316{
2317 int htab_fd;
2318 struct kvm_get_htab_fd ghf;
2319 struct kvm_get_htab_buf *hpte_buf;
2320
2321 ghf.flags = 0;
2322 ghf.start_index = pte_index;
2323 htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
2324 if (htab_fd < 0) {
2325 goto error_out;
2326 }
2327
2328 hpte_buf = g_malloc0(sizeof(*hpte_buf));
2329 /*
2330 * Read the hpte group
2331 */
2332 if (read(htab_fd, hpte_buf, sizeof(*hpte_buf)) < 0) {
2333 goto out_close;
2334 }
2335
2336 close(htab_fd);
2337 return (uint64_t)(uintptr_t) hpte_buf->hpte;
2338
2339out_close:
2340 g_free(hpte_buf);
2341 close(htab_fd);
2342error_out:
2343 return 0;
2344}
2345
2346void kvmppc_hash64_free_pteg(uint64_t token)
2347{
2348 struct kvm_get_htab_buf *htab_buf;
2349
2350 htab_buf = container_of((void *)(uintptr_t) token, struct kvm_get_htab_buf,
2351 hpte);
2352 g_free(htab_buf);
2353 return;
2354}
Aneesh Kumar K.Vc1385932014-02-20 18:52:38 +01002355
2356void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
2357 target_ulong pte0, target_ulong pte1)
2358{
2359 int htab_fd;
2360 struct kvm_get_htab_fd ghf;
2361 struct kvm_get_htab_buf hpte_buf;
2362
2363 ghf.flags = 0;
2364 ghf.start_index = 0; /* Ignored */
2365 htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
2366 if (htab_fd < 0) {
2367 goto error_out;
2368 }
2369
2370 hpte_buf.header.n_valid = 1;
2371 hpte_buf.header.n_invalid = 0;
2372 hpte_buf.header.index = pte_index;
2373 hpte_buf.hpte[0] = pte0;
2374 hpte_buf.hpte[1] = pte1;
2375 /*
2376 * Write the hpte entry.
2377 * CAUTION: write() has the warn_unused_result attribute. Hence we
2378 * need to check the return value, even though we do nothing.
2379 */
2380 if (write(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
2381 goto out_close;
2382 }
2383
2384out_close:
2385 close(htab_fd);
2386 return;
2387
2388error_out:
2389 return;
2390}