blob: 4a7de42b35e9239b04cb65e1c4046d63e3979d1f [file] [log] [blame]
Blue Swirl6bada5e2012-04-29 14:42:35 +00001/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010021#include "exec/cpu-all.h"
Blue Swirl6bada5e2012-04-29 14:42:35 +000022#include "helper.h"
23
Blue Swirl92fc4b52012-04-29 20:35:48 +000024#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini022c62c2012-12-17 18:19:49 +010025#include "exec/softmmu_exec.h"
Blue Swirl92fc4b52012-04-29 20:35:48 +000026#endif /* !defined(CONFIG_USER_ONLY) */
27
Blue Swirl6bada5e2012-04-29 14:42:35 +000028/* Secure Virtual Machine helpers */
29
30#if defined(CONFIG_USER_ONLY)
31
Blue Swirl052e80d2012-04-29 15:51:49 +000032void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
Blue Swirl6bada5e2012-04-29 14:42:35 +000033{
34}
35
Blue Swirl052e80d2012-04-29 15:51:49 +000036void helper_vmmcall(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +000037{
38}
39
Blue Swirl052e80d2012-04-29 15:51:49 +000040void helper_vmload(CPUX86State *env, int aflag)
Blue Swirl6bada5e2012-04-29 14:42:35 +000041{
42}
43
Blue Swirl052e80d2012-04-29 15:51:49 +000044void helper_vmsave(CPUX86State *env, int aflag)
Blue Swirl6bada5e2012-04-29 14:42:35 +000045{
46}
47
Blue Swirl052e80d2012-04-29 15:51:49 +000048void helper_stgi(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +000049{
50}
51
Blue Swirl052e80d2012-04-29 15:51:49 +000052void helper_clgi(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +000053{
54}
55
Blue Swirl052e80d2012-04-29 15:51:49 +000056void helper_skinit(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +000057{
58}
59
Blue Swirl052e80d2012-04-29 15:51:49 +000060void helper_invlpga(CPUX86State *env, int aflag)
Blue Swirl6bada5e2012-04-29 14:42:35 +000061{
62}
63
Blue Swirl052e80d2012-04-29 15:51:49 +000064void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
Blue Swirl6bada5e2012-04-29 14:42:35 +000065{
66}
67
68void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69{
70}
71
Blue Swirl052e80d2012-04-29 15:51:49 +000072void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
Blue Swirl6bada5e2012-04-29 14:42:35 +000074{
75}
76
77void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
79{
80}
81
Blue Swirl052e80d2012-04-29 15:51:49 +000082void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
Blue Swirl6bada5e2012-04-29 14:42:35 +000083 uint32_t next_eip_addend)
84{
85}
86#else
87
Avi Kivitya8170e52012-10-23 12:30:10 +020088static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
Blue Swirl6bada5e2012-04-29 14:42:35 +000089 const SegmentCache *sc)
90{
91 stw_phys(addr + offsetof(struct vmcb_seg, selector),
92 sc->selector);
93 stq_phys(addr + offsetof(struct vmcb_seg, base),
94 sc->base);
95 stl_phys(addr + offsetof(struct vmcb_seg, limit),
96 sc->limit);
97 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
98 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99}
100
Avi Kivitya8170e52012-10-23 12:30:10 +0200101static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
Blue Swirl052e80d2012-04-29 15:51:49 +0000102 SegmentCache *sc)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000103{
104 unsigned int flags;
105
106 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
107 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
108 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
109 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111}
112
Avi Kivitya8170e52012-10-23 12:30:10 +0200113static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
Blue Swirl052e80d2012-04-29 15:51:49 +0000114 int seg_reg)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000115{
116 SegmentCache sc1, *sc = &sc1;
117
Blue Swirl052e80d2012-04-29 15:51:49 +0000118 svm_load_seg(env, addr, sc);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
121}
122
Blue Swirl052e80d2012-04-29 15:51:49 +0000123void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000124{
125 target_ulong addr;
126 uint32_t event_inj;
127 uint32_t int_ctl;
128
Blue Swirl052e80d2012-04-29 15:51:49 +0000129 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000130
131 if (aflag == 2) {
liguang4b34e3a2013-05-28 16:20:59 +0800132 addr = env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000133 } else {
liguang4b34e3a2013-05-28 16:20:59 +0800134 addr = (uint32_t)env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000135 }
136
137 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
138
139 env->vm_vmcb = addr;
140
141 /* save the current CPU state in the hsave page */
142 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
143 env->gdt.base);
144 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
145 env->gdt.limit);
146
147 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
148 env->idt.base);
149 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
150 env->idt.limit);
151
152 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
153 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
158
159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
160 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
161 cpu_compute_eflags(env));
162
Blue Swirl052e80d2012-04-29 15:51:49 +0000163 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000164 &env->segs[R_ES]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000165 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000166 &env->segs[R_CS]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000167 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000168 &env->segs[R_SS]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000169 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000170 &env->segs[R_DS]);
171
172 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
liguanga78d0ea2013-05-28 16:21:07 +0800173 env->eip + next_eip_addend);
liguang08b3ded2013-05-28 16:21:04 +0800174 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
liguang4b34e3a2013-05-28 16:20:59 +0800175 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000176
177 /* load the interception bitmaps so we do not need to access the
178 vmcb in svm mode */
179 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
180 control.intercept));
181 env->intercept_cr_read = lduw_phys(env->vm_vmcb +
182 offsetof(struct vmcb,
183 control.intercept_cr_read));
184 env->intercept_cr_write = lduw_phys(env->vm_vmcb +
185 offsetof(struct vmcb,
186 control.intercept_cr_write));
187 env->intercept_dr_read = lduw_phys(env->vm_vmcb +
188 offsetof(struct vmcb,
189 control.intercept_dr_read));
190 env->intercept_dr_write = lduw_phys(env->vm_vmcb +
191 offsetof(struct vmcb,
192 control.intercept_dr_write));
193 env->intercept_exceptions = ldl_phys(env->vm_vmcb +
194 offsetof(struct vmcb,
195 control.intercept_exceptions
196 ));
197
198 /* enable intercepts */
199 env->hflags |= HF_SVMI_MASK;
200
201 env->tsc_offset = ldq_phys(env->vm_vmcb +
202 offsetof(struct vmcb, control.tsc_offset));
203
204 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
205 save.gdtr.base));
206 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
207 save.gdtr.limit));
208
209 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
210 save.idtr.base));
211 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
212 save.idtr.limit));
213
214 /* clear exit_info_2 so we behave like the real hardware */
215 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
216
217 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
218 save.cr0)));
219 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
220 save.cr4)));
221 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
222 save.cr3)));
223 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
224 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
225 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
226 if (int_ctl & V_INTR_MASKING_MASK) {
227 env->v_tpr = int_ctl & V_TPR_MASK;
228 env->hflags2 |= HF2_VINTR_MASK;
229 if (env->eflags & IF_MASK) {
230 env->hflags2 |= HF2_HIF_MASK;
231 }
232 }
233
234 cpu_load_efer(env,
235 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
236 env->eflags = 0;
237 cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
238 save.rflags)),
239 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
240 CC_OP = CC_OP_EFLAGS;
241
Blue Swirl052e80d2012-04-29 15:51:49 +0000242 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
243 R_ES);
244 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
245 R_CS);
246 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
247 R_SS);
248 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
249 R_DS);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000250
liguanga78d0ea2013-05-28 16:21:07 +0800251 env->eip = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
liguang0bc60a82013-05-28 16:21:09 +0800252
liguang08b3ded2013-05-28 16:21:04 +0800253 env->regs[R_ESP] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
liguang4b34e3a2013-05-28 16:20:59 +0800254 env->regs[R_EAX] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
Blue Swirl6bada5e2012-04-29 14:42:35 +0000255 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
256 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
257 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
258 save.cpl)));
259
260 /* FIXME: guest state consistency checks */
261
262 switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
263 case TLB_CONTROL_DO_NOTHING:
264 break;
265 case TLB_CONTROL_FLUSH_ALL_ASID:
266 /* FIXME: this is not 100% correct but should work for now */
267 tlb_flush(env, 1);
268 break;
269 }
270
271 env->hflags2 |= HF2_GIF_MASK;
272
273 if (int_ctl & V_IRQ_MASK) {
Andreas Färber259186a2013-01-17 18:51:17 +0100274 CPUState *cs = CPU(x86_env_get_cpu(env));
275
276 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
Blue Swirl6bada5e2012-04-29 14:42:35 +0000277 }
278
279 /* maybe we need to inject an event */
280 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
281 control.event_inj));
282 if (event_inj & SVM_EVTINJ_VALID) {
283 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
284 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
285 uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
286 offsetof(struct vmcb,
287 control.event_inj_err));
288
289 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
290 /* FIXME: need to implement valid_err */
291 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
292 case SVM_EVTINJ_TYPE_INTR:
293 env->exception_index = vector;
294 env->error_code = event_inj_err;
295 env->exception_is_int = 0;
296 env->exception_next_eip = -1;
297 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
298 /* XXX: is it always correct? */
299 do_interrupt_x86_hardirq(env, vector, 1);
300 break;
301 case SVM_EVTINJ_TYPE_NMI:
302 env->exception_index = EXCP02_NMI;
303 env->error_code = event_inj_err;
304 env->exception_is_int = 0;
liguanga78d0ea2013-05-28 16:21:07 +0800305 env->exception_next_eip = env->eip;
Blue Swirl6bada5e2012-04-29 14:42:35 +0000306 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
307 cpu_loop_exit(env);
308 break;
309 case SVM_EVTINJ_TYPE_EXEPT:
310 env->exception_index = vector;
311 env->error_code = event_inj_err;
312 env->exception_is_int = 0;
313 env->exception_next_eip = -1;
314 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
315 cpu_loop_exit(env);
316 break;
317 case SVM_EVTINJ_TYPE_SOFT:
318 env->exception_index = vector;
319 env->error_code = event_inj_err;
320 env->exception_is_int = 1;
liguanga78d0ea2013-05-28 16:21:07 +0800321 env->exception_next_eip = env->eip;
Blue Swirl6bada5e2012-04-29 14:42:35 +0000322 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
323 cpu_loop_exit(env);
324 break;
325 }
326 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
327 env->error_code);
328 }
329}
330
Blue Swirl052e80d2012-04-29 15:51:49 +0000331void helper_vmmcall(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000332{
Blue Swirl052e80d2012-04-29 15:51:49 +0000333 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000334 raise_exception(env, EXCP06_ILLOP);
335}
336
Blue Swirl052e80d2012-04-29 15:51:49 +0000337void helper_vmload(CPUX86State *env, int aflag)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000338{
339 target_ulong addr;
340
Blue Swirl052e80d2012-04-29 15:51:49 +0000341 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000342
343 if (aflag == 2) {
liguang4b34e3a2013-05-28 16:20:59 +0800344 addr = env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000345 } else {
liguang4b34e3a2013-05-28 16:20:59 +0800346 addr = (uint32_t)env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000347 }
348
349 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
350 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
Blue Swirl052e80d2012-04-29 15:51:49 +0000351 addr, ldq_phys(addr + offsetof(struct vmcb,
352 save.fs.base)),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000353 env->segs[R_FS].base);
354
Blue Swirl052e80d2012-04-29 15:51:49 +0000355 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
356 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
357 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
358 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000359
360#ifdef TARGET_X86_64
361 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
362 save.kernel_gs_base));
363 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
364 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
365 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
366#endif
367 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
368 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
369 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
370 save.sysenter_esp));
371 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
372 save.sysenter_eip));
373}
374
Blue Swirl052e80d2012-04-29 15:51:49 +0000375void helper_vmsave(CPUX86State *env, int aflag)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000376{
377 target_ulong addr;
378
Blue Swirl052e80d2012-04-29 15:51:49 +0000379 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000380
381 if (aflag == 2) {
liguang4b34e3a2013-05-28 16:20:59 +0800382 addr = env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000383 } else {
liguang4b34e3a2013-05-28 16:20:59 +0800384 addr = (uint32_t)env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000385 }
386
387 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
388 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
389 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
390 env->segs[R_FS].base);
391
Blue Swirl052e80d2012-04-29 15:51:49 +0000392 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000393 &env->segs[R_FS]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000394 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000395 &env->segs[R_GS]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000396 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000397 &env->tr);
Blue Swirl052e80d2012-04-29 15:51:49 +0000398 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000399 &env->ldt);
400
401#ifdef TARGET_X86_64
402 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
403 env->kernelgsbase);
404 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
405 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
406 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
407#endif
408 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
409 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
410 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
411 env->sysenter_esp);
412 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
413 env->sysenter_eip);
414}
415
Blue Swirl052e80d2012-04-29 15:51:49 +0000416void helper_stgi(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000417{
Blue Swirl052e80d2012-04-29 15:51:49 +0000418 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000419 env->hflags2 |= HF2_GIF_MASK;
420}
421
Blue Swirl052e80d2012-04-29 15:51:49 +0000422void helper_clgi(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000423{
Blue Swirl052e80d2012-04-29 15:51:49 +0000424 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000425 env->hflags2 &= ~HF2_GIF_MASK;
426}
427
Blue Swirl052e80d2012-04-29 15:51:49 +0000428void helper_skinit(CPUX86State *env)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000429{
Blue Swirl052e80d2012-04-29 15:51:49 +0000430 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000431 /* XXX: not implemented */
432 raise_exception(env, EXCP06_ILLOP);
433}
434
Blue Swirl052e80d2012-04-29 15:51:49 +0000435void helper_invlpga(CPUX86State *env, int aflag)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000436{
437 target_ulong addr;
438
Blue Swirl052e80d2012-04-29 15:51:49 +0000439 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000440
441 if (aflag == 2) {
liguang4b34e3a2013-05-28 16:20:59 +0800442 addr = env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000443 } else {
liguang4b34e3a2013-05-28 16:20:59 +0800444 addr = (uint32_t)env->regs[R_EAX];
Blue Swirl6bada5e2012-04-29 14:42:35 +0000445 }
446
447 /* XXX: could use the ASID to see if it is needed to do the
448 flush */
449 tlb_flush_page(env, addr);
450}
451
Blue Swirl052e80d2012-04-29 15:51:49 +0000452void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
453 uint64_t param)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000454{
455 if (likely(!(env->hflags & HF_SVMI_MASK))) {
456 return;
457 }
458 switch (type) {
459 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
460 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
Blue Swirl052e80d2012-04-29 15:51:49 +0000461 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000462 }
463 break;
464 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
465 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
Blue Swirl052e80d2012-04-29 15:51:49 +0000466 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000467 }
468 break;
469 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
470 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
Blue Swirl052e80d2012-04-29 15:51:49 +0000471 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000472 }
473 break;
474 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
475 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
Blue Swirl052e80d2012-04-29 15:51:49 +0000476 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000477 }
478 break;
479 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
480 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
Blue Swirl052e80d2012-04-29 15:51:49 +0000481 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000482 }
483 break;
484 case SVM_EXIT_MSR:
485 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
486 /* FIXME: this should be read in at vmrun (faster this way?) */
487 uint64_t addr = ldq_phys(env->vm_vmcb +
488 offsetof(struct vmcb,
489 control.msrpm_base_pa));
490 uint32_t t0, t1;
491
liguanga4165612013-05-28 16:21:01 +0800492 switch ((uint32_t)env->regs[R_ECX]) {
Blue Swirl6bada5e2012-04-29 14:42:35 +0000493 case 0 ... 0x1fff:
liguanga4165612013-05-28 16:21:01 +0800494 t0 = (env->regs[R_ECX] * 2) % 8;
495 t1 = (env->regs[R_ECX] * 2) / 8;
Blue Swirl6bada5e2012-04-29 14:42:35 +0000496 break;
497 case 0xc0000000 ... 0xc0001fff:
liguanga4165612013-05-28 16:21:01 +0800498 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
Blue Swirl6bada5e2012-04-29 14:42:35 +0000499 t1 = (t0 / 8);
500 t0 %= 8;
501 break;
502 case 0xc0010000 ... 0xc0011fff:
liguanga4165612013-05-28 16:21:01 +0800503 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
Blue Swirl6bada5e2012-04-29 14:42:35 +0000504 t1 = (t0 / 8);
505 t0 %= 8;
506 break;
507 default:
Blue Swirl052e80d2012-04-29 15:51:49 +0000508 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000509 t0 = 0;
510 t1 = 0;
511 break;
512 }
513 if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
Blue Swirl052e80d2012-04-29 15:51:49 +0000514 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000515 }
516 }
517 break;
518 default:
519 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
Blue Swirl052e80d2012-04-29 15:51:49 +0000520 helper_vmexit(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000521 }
522 break;
523 }
524}
525
Blue Swirl052e80d2012-04-29 15:51:49 +0000526void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
Blue Swirl6bada5e2012-04-29 14:42:35 +0000527 uint64_t param)
528{
Blue Swirl052e80d2012-04-29 15:51:49 +0000529 helper_svm_check_intercept_param(env, type, param);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000530}
531
Blue Swirl052e80d2012-04-29 15:51:49 +0000532void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
Blue Swirl6bada5e2012-04-29 14:42:35 +0000533 uint32_t next_eip_addend)
534{
535 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
536 /* FIXME: this should be read in at vmrun (faster this way?) */
537 uint64_t addr = ldq_phys(env->vm_vmcb +
538 offsetof(struct vmcb, control.iopm_base_pa));
539 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
540
541 if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
liguanga78d0ea2013-05-28 16:21:07 +0800542 /* next env->eip */
Blue Swirl6bada5e2012-04-29 14:42:35 +0000543 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
544 env->eip + next_eip_addend);
Blue Swirl052e80d2012-04-29 15:51:49 +0000545 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
Blue Swirl6bada5e2012-04-29 14:42:35 +0000546 }
547 }
548}
549
550/* Note: currently only 32 bits of exit_code are used */
Blue Swirl052e80d2012-04-29 15:51:49 +0000551void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000552{
Andreas Färber259186a2013-01-17 18:51:17 +0100553 CPUState *cs = CPU(x86_env_get_cpu(env));
Blue Swirl6bada5e2012-04-29 14:42:35 +0000554 uint32_t int_ctl;
555
556 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
557 PRIx64 ", " TARGET_FMT_lx ")!\n",
558 exit_code, exit_info_1,
559 ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
560 control.exit_info_2)),
liguanga78d0ea2013-05-28 16:21:07 +0800561 env->eip);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000562
563 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
564 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
565 SVM_INTERRUPT_SHADOW_MASK);
566 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
567 } else {
568 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
569 }
570
571 /* Save the VM state in the vmcb */
Blue Swirl052e80d2012-04-29 15:51:49 +0000572 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000573 &env->segs[R_ES]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000574 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000575 &env->segs[R_CS]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000576 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000577 &env->segs[R_SS]);
Blue Swirl052e80d2012-04-29 15:51:49 +0000578 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
Blue Swirl6bada5e2012-04-29 14:42:35 +0000579 &env->segs[R_DS]);
580
581 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
582 env->gdt.base);
583 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
584 env->gdt.limit);
585
586 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
587 env->idt.base);
588 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
589 env->idt.limit);
590
591 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
592 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
593 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
594 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
595 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
596
597 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
598 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
599 int_ctl |= env->v_tpr & V_TPR_MASK;
Andreas Färber259186a2013-01-17 18:51:17 +0100600 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
Blue Swirl6bada5e2012-04-29 14:42:35 +0000601 int_ctl |= V_IRQ_MASK;
602 }
603 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
604
605 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
606 cpu_compute_eflags(env));
Blue Swirl052e80d2012-04-29 15:51:49 +0000607 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
608 env->eip);
liguang08b3ded2013-05-28 16:21:04 +0800609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
liguang4b34e3a2013-05-28 16:20:59 +0800610 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000611 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
612 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
613 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
614 env->hflags & HF_CPL_MASK);
615
616 /* Reload the host state from vm_hsave */
617 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
618 env->hflags &= ~HF_SVMI_MASK;
619 env->intercept = 0;
620 env->intercept_exceptions = 0;
Andreas Färber259186a2013-01-17 18:51:17 +0100621 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
Blue Swirl6bada5e2012-04-29 14:42:35 +0000622 env->tsc_offset = 0;
623
624 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
625 save.gdtr.base));
626 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
627 save.gdtr.limit));
628
629 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
630 save.idtr.base));
631 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
632 save.idtr.limit));
633
634 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
635 save.cr0)) |
636 CR0_PE_MASK);
637 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
638 save.cr4)));
639 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
640 save.cr3)));
641 /* we need to set the efer after the crs so the hidden flags get
642 set properly */
643 cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
644 save.efer)));
645 env->eflags = 0;
646 cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
647 save.rflags)),
648 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
649 CC_OP = CC_OP_EFLAGS;
650
Blue Swirl052e80d2012-04-29 15:51:49 +0000651 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
652 R_ES);
653 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
654 R_CS);
655 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
656 R_SS);
657 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
658 R_DS);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000659
liguanga78d0ea2013-05-28 16:21:07 +0800660 env->eip = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
liguang90a25412013-05-28 16:21:10 +0800661 env->regs[R_ESP] = ldq_phys(env->vm_hsave +
662 offsetof(struct vmcb, save.rsp));
663 env->regs[R_EAX] = ldq_phys(env->vm_hsave +
664 offsetof(struct vmcb, save.rax));
Blue Swirl6bada5e2012-04-29 14:42:35 +0000665
666 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
667 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
668
669 /* other setups */
670 cpu_x86_set_cpl(env, 0);
671 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
672 exit_code);
673 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
674 exit_info_1);
675
676 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
677 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
678 control.event_inj)));
679 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
680 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
681 control.event_inj_err)));
682 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
683
684 env->hflags2 &= ~HF2_GIF_MASK;
685 /* FIXME: Resets the current ASID register to zero (host ASID). */
686
687 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
688
689 /* Clears the TSC_OFFSET inside the processor. */
690
691 /* If the host is in PAE mode, the processor reloads the host's PDPEs
692 from the page table indicated the host's CR3. If the PDPEs contain
693 illegal state, the processor causes a shutdown. */
694
695 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
696 env->cr[0] |= CR0_PE_MASK;
697 env->eflags &= ~VM_MASK;
698
699 /* Disables all breakpoints in the host DR7 register. */
700
701 /* Checks the reloaded host state for consistency. */
702
703 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
704 host's code segment or non-canonical (in the case of long mode), a
705 #GP fault is delivered inside the host. */
706
707 /* remove any pending exception */
708 env->exception_index = -1;
709 env->error_code = 0;
710 env->old_exception = -1;
711
712 cpu_loop_exit(env);
713}
714
Blue Swirl052e80d2012-04-29 15:51:49 +0000715void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
Blue Swirl6bada5e2012-04-29 14:42:35 +0000716{
Blue Swirl052e80d2012-04-29 15:51:49 +0000717 helper_vmexit(env, exit_code, exit_info_1);
Blue Swirl6bada5e2012-04-29 14:42:35 +0000718}
719
720#endif