aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU KVM support |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 10 | * See the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #ifndef QEMU_KVM_H |
| 15 | #define QEMU_KVM_H |
| 16 | |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 17 | #include "qemu/queue.h" |
Markus Armbruster | 2e5b09f | 2019-07-09 17:20:52 +0200 | [diff] [blame] | 18 | #include "hw/core/cpu.h" |
Paolo Bonzini | 4c66375 | 2015-04-08 13:30:58 +0200 | [diff] [blame] | 19 | #include "exec/memattrs.h" |
Claudio Fontana | 940e43a | 2021-02-04 17:39:24 +0100 | [diff] [blame] | 20 | #include "qemu/accel.h" |
Eduardo Habkost | db1015e | 2020-09-03 16:43:22 -0400 | [diff] [blame] | 21 | #include "qom/object.h" |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 22 | |
Thomas Huth | cbca372 | 2017-06-26 07:22:54 +0200 | [diff] [blame] | 23 | #ifdef NEED_CPU_H |
| 24 | # ifdef CONFIG_KVM |
| 25 | # include <linux/kvm.h> |
Thomas Huth | cbca372 | 2017-06-26 07:22:54 +0200 | [diff] [blame] | 26 | # define CONFIG_KVM_IS_POSSIBLE |
| 27 | # endif |
| 28 | #else |
| 29 | # define CONFIG_KVM_IS_POSSIBLE |
Michael S. Tsirkin | ca82180 | 2010-03-17 13:07:54 +0200 | [diff] [blame] | 30 | #endif |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 31 | |
Thomas Huth | cbca372 | 2017-06-26 07:22:54 +0200 | [diff] [blame] | 32 | #ifdef CONFIG_KVM_IS_POSSIBLE |
| 33 | |
liguang | d5286af | 2013-01-24 13:03:27 +0800 | [diff] [blame] | 34 | extern bool kvm_allowed; |
Jan Kiszka | 3d4b264 | 2012-01-31 19:17:52 +0100 | [diff] [blame] | 35 | extern bool kvm_kernel_irqchip; |
Matt Gingell | 32c18a2 | 2015-11-16 10:03:06 -0800 | [diff] [blame] | 36 | extern bool kvm_split_irqchip; |
Peter Maydell | 7ae26bd | 2012-07-26 15:35:11 +0100 | [diff] [blame] | 37 | extern bool kvm_async_interrupts_allowed; |
Alexander Graf | 215e79c | 2013-04-24 22:24:12 +0200 | [diff] [blame] | 38 | extern bool kvm_halt_in_kernel_allowed; |
Nikolay Nikolaev | 69e03ae | 2014-05-27 15:03:35 +0300 | [diff] [blame] | 39 | extern bool kvm_eventfds_allowed; |
Peter Maydell | cc7e0dd | 2012-07-26 15:35:14 +0100 | [diff] [blame] | 40 | extern bool kvm_irqfds_allowed; |
Eric Auger | f41389a | 2014-10-31 13:38:18 +0000 | [diff] [blame] | 41 | extern bool kvm_resamplefds_allowed; |
Peter Maydell | 614e41b | 2012-07-26 15:35:15 +0100 | [diff] [blame] | 42 | extern bool kvm_msi_via_irqfd_allowed; |
Peter Maydell | f3e1bed | 2012-07-26 15:35:16 +0100 | [diff] [blame] | 43 | extern bool kvm_gsi_routing_allowed; |
Alexey Kardashevskiy | 76fe21d | 2013-09-03 18:08:25 +1000 | [diff] [blame] | 44 | extern bool kvm_gsi_direct_mapping; |
Jordan Justen | df9c8b7 | 2013-05-29 01:27:25 -0700 | [diff] [blame] | 45 | extern bool kvm_readonly_mem_allowed; |
Pavel Fedin | 50bf31b | 2015-10-15 16:44:50 +0300 | [diff] [blame] | 46 | extern bool kvm_direct_msi_allowed; |
Jason Wang | 3510822 | 2015-11-06 16:02:46 +0800 | [diff] [blame] | 47 | extern bool kvm_ioeventfd_any_length_allowed; |
Pavel Fedin | 767a554 | 2016-10-04 13:28:09 +0100 | [diff] [blame] | 48 | extern bool kvm_msi_use_devid; |
Maxim Levitsky | 12bc5b4 | 2021-11-11 12:06:03 +0100 | [diff] [blame] | 49 | extern bool kvm_has_guest_debug; |
| 50 | extern int kvm_sstep_flags; |
Paolo Bonzini | 98c8573 | 2010-04-19 18:59:30 +0000 | [diff] [blame] | 51 | |
Jan Kiszka | 3d4b264 | 2012-01-31 19:17:52 +0100 | [diff] [blame] | 52 | #define kvm_enabled() (kvm_allowed) |
Peter Maydell | 96fda35 | 2012-07-26 15:35:17 +0100 | [diff] [blame] | 53 | /** |
| 54 | * kvm_irqchip_in_kernel: |
| 55 | * |
Eduardo Habkost | 31c707f | 2020-09-22 16:36:12 -0400 | [diff] [blame] | 56 | * Returns: true if an in-kernel irqchip was created. |
Peter Maydell | 96fda35 | 2012-07-26 15:35:17 +0100 | [diff] [blame] | 57 | * What this actually means is architecture and machine model |
Eduardo Habkost | 31c707f | 2020-09-22 16:36:12 -0400 | [diff] [blame] | 58 | * specific: on PC, for instance, it means that the LAPIC |
| 59 | * is in kernel. This function should never be used from generic |
| 60 | * target-independent code: use one of the following functions or |
| 61 | * some other specific check instead. |
Peter Maydell | 96fda35 | 2012-07-26 15:35:17 +0100 | [diff] [blame] | 62 | */ |
Jan Kiszka | 3d4b264 | 2012-01-31 19:17:52 +0100 | [diff] [blame] | 63 | #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip) |
Peter Maydell | 7ae26bd | 2012-07-26 15:35:11 +0100 | [diff] [blame] | 64 | |
| 65 | /** |
Matt Gingell | 32c18a2 | 2015-11-16 10:03:06 -0800 | [diff] [blame] | 66 | * kvm_irqchip_is_split: |
| 67 | * |
Eduardo Habkost | 31c707f | 2020-09-22 16:36:12 -0400 | [diff] [blame] | 68 | * Returns: true if the irqchip implementation is split between |
| 69 | * user and kernel space. The details are architecture and |
| 70 | * machine specific. On PC, it means that the PIC, IOAPIC, and |
| 71 | * PIT are in user space while the LAPIC is in the kernel. |
Matt Gingell | 32c18a2 | 2015-11-16 10:03:06 -0800 | [diff] [blame] | 72 | */ |
| 73 | #define kvm_irqchip_is_split() (kvm_split_irqchip) |
| 74 | |
| 75 | /** |
Peter Maydell | 7ae26bd | 2012-07-26 15:35:11 +0100 | [diff] [blame] | 76 | * kvm_async_interrupts_enabled: |
| 77 | * |
| 78 | * Returns: true if we can deliver interrupts to KVM |
| 79 | * asynchronously (ie by ioctl from any thread at any time) |
| 80 | * rather than having to do interrupt delivery synchronously |
| 81 | * (where the vcpu must be stopped at a suitable point first). |
| 82 | */ |
| 83 | #define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed) |
| 84 | |
Peter Maydell | cc7e0dd | 2012-07-26 15:35:14 +0100 | [diff] [blame] | 85 | /** |
Alexander Graf | 215e79c | 2013-04-24 22:24:12 +0200 | [diff] [blame] | 86 | * kvm_halt_in_kernel |
| 87 | * |
| 88 | * Returns: true if halted cpus should still get a KVM_RUN ioctl to run |
| 89 | * inside of kernel space. This only works if MP state is implemented. |
| 90 | */ |
| 91 | #define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed) |
| 92 | |
| 93 | /** |
Nikolay Nikolaev | 69e03ae | 2014-05-27 15:03:35 +0300 | [diff] [blame] | 94 | * kvm_eventfds_enabled: |
| 95 | * |
| 96 | * Returns: true if we can use eventfds to receive notifications |
| 97 | * from a KVM CPU (ie the kernel supports eventds and we are running |
| 98 | * with a configuration where it is meaningful to use them). |
| 99 | */ |
| 100 | #define kvm_eventfds_enabled() (kvm_eventfds_allowed) |
| 101 | |
| 102 | /** |
Peter Maydell | cc7e0dd | 2012-07-26 15:35:14 +0100 | [diff] [blame] | 103 | * kvm_irqfds_enabled: |
| 104 | * |
| 105 | * Returns: true if we can use irqfds to inject interrupts into |
| 106 | * a KVM CPU (ie the kernel supports irqfds and we are running |
| 107 | * with a configuration where it is meaningful to use them). |
| 108 | */ |
| 109 | #define kvm_irqfds_enabled() (kvm_irqfds_allowed) |
| 110 | |
Peter Maydell | 614e41b | 2012-07-26 15:35:15 +0100 | [diff] [blame] | 111 | /** |
Eric Auger | f41389a | 2014-10-31 13:38:18 +0000 | [diff] [blame] | 112 | * kvm_resamplefds_enabled: |
| 113 | * |
| 114 | * Returns: true if we can use resamplefds to inject interrupts into |
| 115 | * a KVM CPU (ie the kernel supports resamplefds and we are running |
| 116 | * with a configuration where it is meaningful to use them). |
| 117 | */ |
| 118 | #define kvm_resamplefds_enabled() (kvm_resamplefds_allowed) |
| 119 | |
| 120 | /** |
Peter Maydell | 614e41b | 2012-07-26 15:35:15 +0100 | [diff] [blame] | 121 | * kvm_msi_via_irqfd_enabled: |
| 122 | * |
| 123 | * Returns: true if we can route a PCI MSI (Message Signaled Interrupt) |
| 124 | * to a KVM CPU via an irqfd. This requires that the kernel supports |
| 125 | * this and that we're running in a configuration that permits it. |
| 126 | */ |
| 127 | #define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed) |
| 128 | |
Peter Maydell | f3e1bed | 2012-07-26 15:35:16 +0100 | [diff] [blame] | 129 | /** |
| 130 | * kvm_gsi_routing_enabled: |
| 131 | * |
| 132 | * Returns: true if GSI routing is enabled (ie the kernel supports |
| 133 | * it and we're running in a configuration that permits it). |
| 134 | */ |
| 135 | #define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed) |
| 136 | |
Jordan Justen | df9c8b7 | 2013-05-29 01:27:25 -0700 | [diff] [blame] | 137 | /** |
Alexey Kardashevskiy | 76fe21d | 2013-09-03 18:08:25 +1000 | [diff] [blame] | 138 | * kvm_gsi_direct_mapping: |
| 139 | * |
| 140 | * Returns: true if GSI direct mapping is enabled. |
| 141 | */ |
| 142 | #define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping) |
| 143 | |
| 144 | /** |
Jordan Justen | df9c8b7 | 2013-05-29 01:27:25 -0700 | [diff] [blame] | 145 | * kvm_readonly_mem_enabled: |
| 146 | * |
| 147 | * Returns: true if KVM readonly memory is enabled (ie the kernel |
| 148 | * supports it and we're running in a configuration that permits it). |
| 149 | */ |
| 150 | #define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed) |
| 151 | |
Pavel Fedin | 50bf31b | 2015-10-15 16:44:50 +0300 | [diff] [blame] | 152 | /** |
| 153 | * kvm_direct_msi_enabled: |
| 154 | * |
| 155 | * Returns: true if KVM allows direct MSI injection. |
| 156 | */ |
| 157 | #define kvm_direct_msi_enabled() (kvm_direct_msi_allowed) |
| 158 | |
Jason Wang | 3510822 | 2015-11-06 16:02:46 +0800 | [diff] [blame] | 159 | /** |
| 160 | * kvm_ioeventfd_any_length_enabled: |
| 161 | * Returns: true if KVM allows any length io eventfd. |
| 162 | */ |
| 163 | #define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed) |
| 164 | |
Pavel Fedin | 767a554 | 2016-10-04 13:28:09 +0100 | [diff] [blame] | 165 | /** |
| 166 | * kvm_msi_devid_required: |
| 167 | * Returns: true if KVM requires a device id to be provided while |
| 168 | * defining an MSI routing entry. |
| 169 | */ |
| 170 | #define kvm_msi_devid_required() (kvm_msi_use_devid) |
| 171 | |
Maxim Levitsky | 12bc5b4 | 2021-11-11 12:06:03 +0100 | [diff] [blame] | 172 | /* |
| 173 | * Does KVM support guest debugging |
| 174 | */ |
| 175 | #define kvm_supports_guest_debug() (kvm_has_guest_debug) |
| 176 | |
| 177 | /* |
| 178 | * kvm_supported_sstep_flags |
| 179 | * Returns: SSTEP_* flags that KVM supports for guest debug |
| 180 | */ |
| 181 | #define kvm_get_supported_sstep_flags() (kvm_sstep_flags) |
| 182 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 183 | #else |
Thomas Huth | cbca372 | 2017-06-26 07:22:54 +0200 | [diff] [blame] | 184 | |
Jan Kiszka | 3d4b264 | 2012-01-31 19:17:52 +0100 | [diff] [blame] | 185 | #define kvm_enabled() (0) |
| 186 | #define kvm_irqchip_in_kernel() (false) |
Paolo Bonzini | 15eafc2 | 2015-12-17 17:16:08 +0100 | [diff] [blame] | 187 | #define kvm_irqchip_is_split() (false) |
Peter Maydell | 7ae26bd | 2012-07-26 15:35:11 +0100 | [diff] [blame] | 188 | #define kvm_async_interrupts_enabled() (false) |
Alexander Graf | 215e79c | 2013-04-24 22:24:12 +0200 | [diff] [blame] | 189 | #define kvm_halt_in_kernel() (false) |
Nikolay Nikolaev | 69e03ae | 2014-05-27 15:03:35 +0300 | [diff] [blame] | 190 | #define kvm_eventfds_enabled() (false) |
Peter Maydell | cc7e0dd | 2012-07-26 15:35:14 +0100 | [diff] [blame] | 191 | #define kvm_irqfds_enabled() (false) |
Eric Auger | 879904e | 2015-07-06 12:15:14 -0600 | [diff] [blame] | 192 | #define kvm_resamplefds_enabled() (false) |
Peter Maydell | 614e41b | 2012-07-26 15:35:15 +0100 | [diff] [blame] | 193 | #define kvm_msi_via_irqfd_enabled() (false) |
Peter Maydell | f3e1bed | 2012-07-26 15:35:16 +0100 | [diff] [blame] | 194 | #define kvm_gsi_routing_allowed() (false) |
Alexey Kardashevskiy | 76fe21d | 2013-09-03 18:08:25 +1000 | [diff] [blame] | 195 | #define kvm_gsi_direct_mapping() (false) |
Jordan Justen | df9c8b7 | 2013-05-29 01:27:25 -0700 | [diff] [blame] | 196 | #define kvm_readonly_mem_enabled() (false) |
Pavel Fedin | 50bf31b | 2015-10-15 16:44:50 +0300 | [diff] [blame] | 197 | #define kvm_direct_msi_enabled() (false) |
Jason Wang | 3510822 | 2015-11-06 16:02:46 +0800 | [diff] [blame] | 198 | #define kvm_ioeventfd_any_length_enabled() (false) |
Pavel Fedin | 767a554 | 2016-10-04 13:28:09 +0100 | [diff] [blame] | 199 | #define kvm_msi_devid_required() (false) |
Maxim Levitsky | 12bc5b4 | 2021-11-11 12:06:03 +0100 | [diff] [blame] | 200 | #define kvm_supports_guest_debug() (false) |
| 201 | #define kvm_get_supported_sstep_flags() (0) |
Thomas Huth | cbca372 | 2017-06-26 07:22:54 +0200 | [diff] [blame] | 202 | |
| 203 | #endif /* CONFIG_KVM_IS_POSSIBLE */ |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 204 | |
| 205 | struct kvm_run; |
Jan Kiszka | 680c1c6 | 2011-10-16 13:23:26 +0200 | [diff] [blame] | 206 | struct kvm_lapic_state; |
Frank Blaschka | 9e03a04 | 2015-01-09 09:04:40 +0100 | [diff] [blame] | 207 | struct kvm_irq_routing_entry; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 208 | |
Jan Kiszka | 94a8d39 | 2011-01-21 21:48:17 +0100 | [diff] [blame] | 209 | typedef struct KVMCapabilityInfo { |
| 210 | const char *name; |
| 211 | int value; |
| 212 | } KVMCapabilityInfo; |
| 213 | |
| 214 | #define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP } |
| 215 | #define KVM_CAP_LAST_INFO { NULL, 0 } |
| 216 | |
Jan Kiszka | 92b4e48 | 2012-05-17 10:32:33 -0300 | [diff] [blame] | 217 | struct KVMState; |
Eduardo Habkost | 97e622d | 2020-08-25 15:20:37 -0400 | [diff] [blame] | 218 | |
| 219 | #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm") |
Jan Kiszka | 92b4e48 | 2012-05-17 10:32:33 -0300 | [diff] [blame] | 220 | typedef struct KVMState KVMState; |
Eduardo Habkost | 8110fa1 | 2020-08-31 17:07:33 -0400 | [diff] [blame] | 221 | DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE, |
| 222 | TYPE_KVM_ACCEL) |
Eduardo Habkost | 97e622d | 2020-08-25 15:20:37 -0400 | [diff] [blame] | 223 | |
Jan Kiszka | 92b4e48 | 2012-05-17 10:32:33 -0300 | [diff] [blame] | 224 | extern KVMState *kvm_state; |
David Gibson | 3607715 | 2019-10-17 12:12:35 +1100 | [diff] [blame] | 225 | typedef struct Notifier Notifier; |
Jan Kiszka | 92b4e48 | 2012-05-17 10:32:33 -0300 | [diff] [blame] | 226 | |
Longpeng(Mike) | 9568690 | 2022-02-22 22:11:15 +0800 | [diff] [blame] | 227 | typedef struct KVMRouteChange { |
| 228 | KVMState *s; |
| 229 | int changes; |
| 230 | } KVMRouteChange; |
| 231 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 232 | /* external API */ |
| 233 | |
Igor Mammedov | b886559 | 2014-10-31 16:38:32 +0000 | [diff] [blame] | 234 | bool kvm_has_free_slot(MachineState *ms); |
Greg Kurz | 62dd4ed | 2017-09-21 18:00:53 +0200 | [diff] [blame] | 235 | bool kvm_has_sync_mmu(void); |
Paolo Bonzini | 00a1555 | 2010-04-01 19:57:11 +0200 | [diff] [blame] | 236 | int kvm_has_vcpu_events(void); |
| 237 | int kvm_has_robust_singlestep(void); |
Jan Kiszka | ff44f1a | 2010-03-12 15:20:49 +0100 | [diff] [blame] | 238 | int kvm_has_debugregs(void); |
Liran Alon | ebbfef2 | 2019-06-19 19:21:38 +0300 | [diff] [blame] | 239 | int kvm_max_nested_state_length(void); |
Jan Kiszka | 8a7c739 | 2012-03-02 20:28:48 +0100 | [diff] [blame] | 240 | int kvm_has_pit_state2(void); |
Stefan Hajnoczi | d2f2b8a | 2011-01-10 13:50:05 +0200 | [diff] [blame] | 241 | int kvm_has_many_ioeventfds(void); |
Jan Kiszka | 84b058d | 2011-10-15 11:49:47 +0200 | [diff] [blame] | 242 | int kvm_has_gsi_routing(void); |
Jan Kiszka | 3ab7384 | 2012-08-27 08:28:39 +0200 | [diff] [blame] | 243 | int kvm_has_intx_set_mask(void); |
Paolo Bonzini | 00a1555 | 2010-04-01 19:57:11 +0200 | [diff] [blame] | 244 | |
Alexander Graf | 5d721b7 | 2017-07-11 11:21:26 +0100 | [diff] [blame] | 245 | /** |
| 246 | * kvm_arm_supports_user_irq |
| 247 | * |
| 248 | * Not all KVM implementations support notifications for kernel generated |
| 249 | * interrupt events to user space. This function indicates whether the current |
| 250 | * KVM implementation does support them. |
| 251 | * |
| 252 | * Returns: true if KVM supports using kernel generated IRQs from user space |
| 253 | */ |
| 254 | bool kvm_arm_supports_user_irq(void); |
| 255 | |
Brijesh Singh | b20e378 | 2018-03-08 06:48:44 -0600 | [diff] [blame] | 256 | |
Philippe Mathieu-Daudé | 82bd4ca | 2021-05-16 19:01:46 +0200 | [diff] [blame] | 257 | int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); |
| 258 | int kvm_on_sigbus(int code, void *addr); |
| 259 | |
Andreas Färber | 504134d | 2012-12-17 06:38:45 +0100 | [diff] [blame] | 260 | #ifdef NEED_CPU_H |
Paolo Bonzini | 33c1187 | 2016-03-15 16:58:45 +0100 | [diff] [blame] | 261 | #include "cpu.h" |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 262 | |
Igor Mammedov | c4cfef5 | 2013-04-23 10:29:35 +0200 | [diff] [blame] | 263 | void kvm_flush_coalesced_mmio_buffer(void); |
| 264 | |
Andreas Färber | 6227881 | 2013-06-27 17:12:06 +0200 | [diff] [blame] | 265 | int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 266 | target_ulong len, int type); |
Andreas Färber | 6227881 | 2013-06-27 17:12:06 +0200 | [diff] [blame] | 267 | int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 268 | target_ulong len, int type); |
Andreas Färber | 1d5791f | 2013-05-27 14:40:48 +0200 | [diff] [blame] | 269 | void kvm_remove_all_breakpoints(CPUState *cpu); |
Stefan Weil | 38e478e | 2013-07-25 20:50:21 +0200 | [diff] [blame] | 270 | int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap); |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 271 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 272 | /* internal API */ |
| 273 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 274 | int kvm_ioctl(KVMState *s, int type, ...); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 275 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 276 | int kvm_vm_ioctl(KVMState *s, int type, ...); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 277 | |
Andreas Färber | 1bc2265 | 2012-10-31 06:06:49 +0100 | [diff] [blame] | 278 | int kvm_vcpu_ioctl(CPUState *cpu, int type, ...); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 279 | |
Christoffer Dall | 0a6a7cc | 2014-02-26 17:20:00 +0000 | [diff] [blame] | 280 | /** |
| 281 | * kvm_device_ioctl - call an ioctl on a kvm device |
| 282 | * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE |
| 283 | * @type: The device-ctrl ioctl number |
| 284 | * |
| 285 | * Returns: -errno on error, nonnegative on success |
| 286 | */ |
| 287 | int kvm_device_ioctl(int fd, int type, ...); |
| 288 | |
| 289 | /** |
Dominik Dingel | d0a073a | 2015-03-12 13:53:49 +0100 | [diff] [blame] | 290 | * kvm_vm_check_attr - check for existence of a specific vm attribute |
| 291 | * @s: The KVMState pointer |
| 292 | * @group: the group |
| 293 | * @attr: the attribute of that group to query for |
| 294 | * |
| 295 | * Returns: 1 if the attribute exists |
| 296 | * 0 if the attribute either does not exist or if the vm device |
| 297 | * interface is unavailable |
| 298 | */ |
| 299 | int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr); |
| 300 | |
| 301 | /** |
Pavel Fedin | 4b3cfe7 | 2015-09-24 01:29:36 +0100 | [diff] [blame] | 302 | * kvm_device_check_attr - check for existence of a specific device attribute |
| 303 | * @fd: The device file descriptor |
| 304 | * @group: the group |
| 305 | * @attr: the attribute of that group to query for |
| 306 | * |
| 307 | * Returns: 1 if the attribute exists |
| 308 | * 0 if the attribute either does not exist or if the vm device |
| 309 | * interface is unavailable |
| 310 | */ |
| 311 | int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr); |
| 312 | |
| 313 | /** |
Greg Kurz | 84b6ea0 | 2019-09-12 15:57:26 +0200 | [diff] [blame] | 314 | * kvm_device_access - set or get value of a specific device attribute |
Pavel Fedin | 4b3cfe7 | 2015-09-24 01:29:36 +0100 | [diff] [blame] | 315 | * @fd: The device file descriptor |
| 316 | * @group: the group |
| 317 | * @attr: the attribute of that group to set or get |
| 318 | * @val: pointer to a storage area for the value |
| 319 | * @write: true for set and false for get operation |
Eric Auger | 556969e | 2017-06-13 14:57:00 +0100 | [diff] [blame] | 320 | * @errp: error object handle |
Pavel Fedin | 4b3cfe7 | 2015-09-24 01:29:36 +0100 | [diff] [blame] | 321 | * |
Eric Auger | 556969e | 2017-06-13 14:57:00 +0100 | [diff] [blame] | 322 | * Returns: 0 on success |
| 323 | * < 0 on error |
| 324 | * Use kvm_device_check_attr() in order to check for the availability |
| 325 | * of optional attributes. |
Pavel Fedin | 4b3cfe7 | 2015-09-24 01:29:36 +0100 | [diff] [blame] | 326 | */ |
Eric Auger | 556969e | 2017-06-13 14:57:00 +0100 | [diff] [blame] | 327 | int kvm_device_access(int fd, int group, uint64_t attr, |
| 328 | void *val, bool write, Error **errp); |
Pavel Fedin | 4b3cfe7 | 2015-09-24 01:29:36 +0100 | [diff] [blame] | 329 | |
| 330 | /** |
Christoffer Dall | 0a6a7cc | 2014-02-26 17:20:00 +0000 | [diff] [blame] | 331 | * kvm_create_device - create a KVM device for the device control API |
| 332 | * @KVMState: The KVMState pointer |
| 333 | * @type: The KVM device type (see Documentation/virtual/kvm/devices in the |
| 334 | * kernel source) |
| 335 | * @test: If true, only test if device can be created, but don't actually |
| 336 | * create the device. |
| 337 | * |
| 338 | * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd; |
| 339 | */ |
| 340 | int kvm_create_device(KVMState *s, uint64_t type, bool test); |
| 341 | |
Peter Xu | 29039ac | 2016-03-30 17:27:24 +0100 | [diff] [blame] | 342 | /** |
| 343 | * kvm_device_supported - probe whether KVM supports specific device |
| 344 | * |
| 345 | * @vmfd: The fd handler for VM |
| 346 | * @type: type of device |
| 347 | * |
| 348 | * @return: true if supported, otherwise false. |
| 349 | */ |
| 350 | bool kvm_device_supported(int vmfd, uint64_t type); |
Christoffer Dall | 0a6a7cc | 2014-02-26 17:20:00 +0000 | [diff] [blame] | 351 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 352 | /* Arch specific hooks */ |
| 353 | |
Jan Kiszka | 94a8d39 | 2011-01-21 21:48:17 +0100 | [diff] [blame] | 354 | extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; |
| 355 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 356 | void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); |
Paolo Bonzini | 4c66375 | 2015-04-08 13:30:58 +0200 | [diff] [blame] | 357 | MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 358 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 359 | int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 360 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 361 | int kvm_arch_process_async_events(CPUState *cpu); |
Marcelo Tosatti | 0af691d | 2010-05-04 09:45:27 -0300 | [diff] [blame] | 362 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 363 | int kvm_arch_get_registers(CPUState *cpu); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 364 | |
Jan Kiszka | ea375f9 | 2010-03-01 19:10:30 +0100 | [diff] [blame] | 365 | /* state subset only touched by the VCPU itself during runtime */ |
| 366 | #define KVM_PUT_RUNTIME_STATE 1 |
| 367 | /* state subset modified during VCPU reset */ |
| 368 | #define KVM_PUT_RESET_STATE 2 |
| 369 | /* full state set, modified during initialization or on vmload */ |
| 370 | #define KVM_PUT_FULL_STATE 3 |
| 371 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 372 | int kvm_arch_put_registers(CPUState *cpu, int level); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 373 | |
Marcel Apfelbaum | b16565b | 2015-02-04 17:43:51 +0200 | [diff] [blame] | 374 | int kvm_arch_init(MachineState *ms, KVMState *s); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 375 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 376 | int kvm_arch_init_vcpu(CPUState *cpu); |
Liran Alon | b1115c9 | 2019-06-19 19:21:32 +0300 | [diff] [blame] | 377 | int kvm_arch_destroy_vcpu(CPUState *cpu); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 378 | |
Greg Kurz | 41264b3 | 2016-04-26 15:41:04 +0200 | [diff] [blame] | 379 | bool kvm_vcpu_id_is_valid(int vcpu_id); |
| 380 | |
Eduardo Habkost | b164e48 | 2013-01-22 18:25:01 -0200 | [diff] [blame] | 381 | /* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */ |
| 382 | unsigned long kvm_arch_vcpu_id(CPUState *cpu); |
| 383 | |
Dongjiu Geng | e24fd07 | 2020-05-12 11:06:08 +0800 | [diff] [blame] | 384 | #ifdef KVM_HAVE_MCE_INJECTION |
Paolo Bonzini | 2ae41db | 2017-02-08 12:48:54 +0100 | [diff] [blame] | 385 | void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); |
| 386 | #endif |
Marcelo Tosatti | c0532a7 | 2010-10-11 15:31:21 -0300 | [diff] [blame] | 387 | |
Jan Kiszka | 84b058d | 2011-10-15 11:49:47 +0200 | [diff] [blame] | 388 | void kvm_arch_init_irq_routing(KVMState *s); |
| 389 | |
Frank Blaschka | 9e03a04 | 2015-01-09 09:04:40 +0100 | [diff] [blame] | 390 | int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, |
Pavel Fedin | dc9f06c | 2015-10-15 16:44:52 +0300 | [diff] [blame] | 391 | uint64_t address, uint32_t data, PCIDevice *dev); |
Frank Blaschka | 9e03a04 | 2015-01-09 09:04:40 +0100 | [diff] [blame] | 392 | |
Peter Xu | 38d8749 | 2016-07-14 13:56:31 +0800 | [diff] [blame] | 393 | /* Notify arch about newly added MSI routes */ |
| 394 | int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, |
| 395 | int vector, PCIDevice *dev); |
| 396 | /* Notify arch about released MSI routes */ |
| 397 | int kvm_arch_release_virq_post(int virq); |
| 398 | |
Eric Auger | 1850b6b | 2015-06-02 14:56:23 +0100 | [diff] [blame] | 399 | int kvm_arch_msi_data_to_gsi(uint32_t data); |
| 400 | |
Peter Maydell | 3889c3f | 2012-07-26 15:35:12 +0100 | [diff] [blame] | 401 | int kvm_set_irq(KVMState *s, int irq, int level); |
Jan Kiszka | 04fa27f | 2012-05-16 15:41:10 -0300 | [diff] [blame] | 402 | int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg); |
Jan Kiszka | 84b058d | 2011-10-15 11:49:47 +0200 | [diff] [blame] | 403 | |
Jan Kiszka | 1df186d | 2012-05-17 10:32:32 -0300 | [diff] [blame] | 404 | void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin); |
Jan Kiszka | 84b058d | 2011-10-15 11:49:47 +0200 | [diff] [blame] | 405 | |
David Gibson | 3607715 | 2019-10-17 12:12:35 +1100 | [diff] [blame] | 406 | void kvm_irqchip_add_change_notifier(Notifier *n); |
| 407 | void kvm_irqchip_remove_change_notifier(Notifier *n); |
| 408 | void kvm_irqchip_change_notify(void); |
| 409 | |
Jan Kiszka | 680c1c6 | 2011-10-16 13:23:26 +0200 | [diff] [blame] | 410 | void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic); |
| 411 | |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 412 | struct kvm_guest_debug; |
| 413 | struct kvm_debug_exit_arch; |
| 414 | |
| 415 | struct kvm_sw_breakpoint { |
| 416 | target_ulong pc; |
| 417 | target_ulong saved_insn; |
| 418 | int use_count; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 419 | QTAILQ_ENTRY(kvm_sw_breakpoint) entry; |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 420 | }; |
| 421 | |
Andreas Färber | a60f24b | 2012-12-01 05:35:08 +0100 | [diff] [blame] | 422 | struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 423 | target_ulong pc); |
| 424 | |
Andreas Färber | a60f24b | 2012-12-01 05:35:08 +0100 | [diff] [blame] | 425 | int kvm_sw_breakpoints_active(CPUState *cpu); |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 426 | |
Andreas Färber | 80b7cd7 | 2013-06-19 17:37:31 +0200 | [diff] [blame] | 427 | int kvm_arch_insert_sw_breakpoint(CPUState *cpu, |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 428 | struct kvm_sw_breakpoint *bp); |
Andreas Färber | 80b7cd7 | 2013-06-19 17:37:31 +0200 | [diff] [blame] | 429 | int kvm_arch_remove_sw_breakpoint(CPUState *cpu, |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 430 | struct kvm_sw_breakpoint *bp); |
| 431 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, |
| 432 | target_ulong len, int type); |
| 433 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, |
| 434 | target_ulong len, int type); |
| 435 | void kvm_arch_remove_all_hw_breakpoints(void); |
| 436 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 437 | void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg); |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 438 | |
Andreas Färber | 20d695a | 2012-10-31 06:57:49 +0100 | [diff] [blame] | 439 | bool kvm_arch_stop_on_emulation_error(CPUState *cpu); |
Gleb Natapov | 4513d92 | 2010-05-10 11:21:34 +0300 | [diff] [blame] | 440 | |
Anthony Liguori | ad7b8b3 | 2009-05-08 15:33:24 -0500 | [diff] [blame] | 441 | int kvm_check_extension(KVMState *s, unsigned int extension); |
| 442 | |
Alexander Graf | 7d0a07f | 2014-07-14 19:15:15 +0200 | [diff] [blame] | 443 | int kvm_vm_check_extension(KVMState *s, unsigned int extension); |
| 444 | |
Cornelia Huck | 40f1ee2 | 2013-10-23 18:19:26 +0200 | [diff] [blame] | 445 | #define kvm_vm_enable_cap(s, capability, cap_flags, ...) \ |
| 446 | ({ \ |
| 447 | struct kvm_enable_cap cap = { \ |
| 448 | .cap = capability, \ |
| 449 | .flags = cap_flags, \ |
| 450 | }; \ |
| 451 | uint64_t args_tmp[] = { __VA_ARGS__ }; \ |
Greg Kurz | 1b7ac7c | 2017-08-07 13:36:44 +0200 | [diff] [blame] | 452 | size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ |
| 453 | memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ |
Cornelia Huck | 40f1ee2 | 2013-10-23 18:19:26 +0200 | [diff] [blame] | 454 | kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \ |
| 455 | }) |
| 456 | |
| 457 | #define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \ |
| 458 | ({ \ |
| 459 | struct kvm_enable_cap cap = { \ |
| 460 | .cap = capability, \ |
| 461 | .flags = cap_flags, \ |
| 462 | }; \ |
| 463 | uint64_t args_tmp[] = { __VA_ARGS__ }; \ |
Greg Kurz | 1b7ac7c | 2017-08-07 13:36:44 +0200 | [diff] [blame] | 464 | size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ |
| 465 | memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ |
Cornelia Huck | 40f1ee2 | 2013-10-23 18:19:26 +0200 | [diff] [blame] | 466 | kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \ |
| 467 | }) |
| 468 | |
Jan Kiszka | ba9bc59 | 2011-06-08 16:11:05 +0200 | [diff] [blame] | 469 | uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function, |
Sheng Yang | c958a8b | 2010-06-17 15:18:13 +0800 | [diff] [blame] | 470 | uint32_t index, int reg); |
Paolo Bonzini | ede146c | 2019-07-01 17:38:54 +0200 | [diff] [blame] | 471 | uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index); |
Robert Hoo | f57bceb | 2018-10-15 12:47:23 +0800 | [diff] [blame] | 472 | |
James Hogan | 97577fd | 2013-08-27 12:19:10 +0100 | [diff] [blame] | 473 | |
James Hogan | aed6efb | 2014-06-17 23:10:31 +0100 | [diff] [blame] | 474 | void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len); |
| 475 | |
James Hogan | 97577fd | 2013-08-27 12:19:10 +0100 | [diff] [blame] | 476 | #if !defined(CONFIG_USER_ONLY) |
| 477 | int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr, |
| 478 | hwaddr *phys_addr); |
| 479 | #endif |
| 480 | |
| 481 | #endif /* NEED_CPU_H */ |
| 482 | |
Andreas Färber | dd1750d | 2013-05-01 13:45:44 +0200 | [diff] [blame] | 483 | void kvm_cpu_synchronize_state(CPUState *cpu); |
Avi Kivity | b827df5 | 2009-05-03 17:04:01 +0300 | [diff] [blame] | 484 | |
Paolo Bonzini | 18268b6 | 2017-02-09 09:41:14 +0100 | [diff] [blame] | 485 | void kvm_init_cpu_signals(CPUState *cpu); |
| 486 | |
Peter Xu | d1f6af6 | 2016-07-14 13:56:30 +0800 | [diff] [blame] | 487 | /** |
| 488 | * kvm_irqchip_add_msi_route - Add MSI route for specific vector |
Longpeng(Mike) | def4c55 | 2022-02-22 22:11:16 +0800 | [diff] [blame] | 489 | * @c: KVMRouteChange instance. |
Peter Xu | d1f6af6 | 2016-07-14 13:56:30 +0800 | [diff] [blame] | 490 | * @vector: which vector to add. This can be either MSI/MSIX |
| 491 | * vector. The function will automatically detect whether |
| 492 | * MSI/MSIX is enabled, and fetch corresponding MSI |
| 493 | * message. |
| 494 | * @dev: Owner PCI device to add the route. If @dev is specified |
| 495 | * as @NULL, an empty MSI message will be inited. |
| 496 | * @return: virq (>=0) when success, errno (<0) when failed. |
| 497 | */ |
Longpeng(Mike) | def4c55 | 2022-02-22 22:11:16 +0800 | [diff] [blame] | 498 | int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); |
Pavel Fedin | dc9f06c | 2015-10-15 16:44:52 +0300 | [diff] [blame] | 499 | int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, |
| 500 | PCIDevice *dev); |
Peter Xu | 3f1fea0 | 2016-07-14 13:56:33 +0800 | [diff] [blame] | 501 | void kvm_irqchip_commit_routes(KVMState *s); |
Longpeng(Mike) | 9568690 | 2022-02-22 22:11:15 +0800 | [diff] [blame] | 502 | |
| 503 | static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s) |
| 504 | { |
| 505 | return (KVMRouteChange) { .s = s, .changes = 0 }; |
| 506 | } |
| 507 | |
| 508 | static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c) |
| 509 | { |
| 510 | if (c->changes) { |
| 511 | kvm_irqchip_commit_routes(c->s); |
| 512 | c->changes = 0; |
| 513 | } |
| 514 | } |
| 515 | |
Jan Kiszka | 1e2aa8b | 2012-05-17 10:32:34 -0300 | [diff] [blame] | 516 | void kvm_irqchip_release_virq(KVMState *s, int virq); |
Jan Kiszka | 39853bb | 2012-05-17 10:32:36 -0300 | [diff] [blame] | 517 | |
Cornelia Huck | d426d9f | 2013-07-15 17:45:03 +0200 | [diff] [blame] | 518 | int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter); |
Andrey Smetanin | 977a8d9 | 2015-11-10 15:52:42 +0300 | [diff] [blame] | 519 | int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint); |
Cornelia Huck | d426d9f | 2013-07-15 17:45:03 +0200 | [diff] [blame] | 520 | |
Eric Auger | 1c9b71a | 2015-07-06 12:15:13 -0600 | [diff] [blame] | 521 | int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, |
| 522 | EventNotifier *rn, int virq); |
| 523 | int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, |
| 524 | int virq); |
Eric Auger | 197e352 | 2015-07-06 12:15:13 -0600 | [diff] [blame] | 525 | int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, |
| 526 | EventNotifier *rn, qemu_irq irq); |
| 527 | int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, |
| 528 | qemu_irq irq); |
| 529 | void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi); |
Jason Baron | d8ee038 | 2012-11-14 15:54:02 -0500 | [diff] [blame] | 530 | void kvm_pc_setup_irq_routing(bool pci_enabled); |
Alexander Graf | 7b77459 | 2013-04-16 15:58:13 +0200 | [diff] [blame] | 531 | void kvm_init_irq_routing(KVMState *s); |
Christoffer Dall | d6032e0 | 2014-02-26 17:20:00 +0000 | [diff] [blame] | 532 | |
Paolo Bonzini | 4376c40 | 2019-11-13 11:17:12 +0100 | [diff] [blame] | 533 | bool kvm_kernel_irqchip_allowed(void); |
| 534 | bool kvm_kernel_irqchip_required(void); |
| 535 | bool kvm_kernel_irqchip_split(void); |
| 536 | |
Christoffer Dall | d6032e0 | 2014-02-26 17:20:00 +0000 | [diff] [blame] | 537 | /** |
| 538 | * kvm_arch_irqchip_create: |
| 539 | * @KVMState: The KVMState pointer |
| 540 | * |
| 541 | * Allow architectures to create an in-kernel irq chip themselves. |
| 542 | * |
| 543 | * Returns: < 0: error |
| 544 | * 0: irq chip was not created |
| 545 | * > 0: irq chip was created |
| 546 | */ |
Paolo Bonzini | 4376c40 | 2019-11-13 11:17:12 +0100 | [diff] [blame] | 547 | int kvm_arch_irqchip_create(KVMState *s); |
Cornelia Huck | ada4135 | 2014-05-09 10:06:46 +0200 | [diff] [blame] | 548 | |
| 549 | /** |
| 550 | * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl |
| 551 | * @id: The register ID |
| 552 | * @source: The pointer to the value to be set. It must point to a variable |
| 553 | * of the correct type/size for the register being accessed. |
| 554 | * |
| 555 | * Returns: 0 on success, or a negative errno on failure. |
| 556 | */ |
| 557 | int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source); |
| 558 | |
| 559 | /** |
| 560 | * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl |
| 561 | * @id: The register ID |
| 562 | * @target: The pointer where the value is to be stored. It must point to a |
| 563 | * variable of the correct type/size for the register being accessed. |
| 564 | * |
| 565 | * Returns: 0 on success, or a negative errno on failure. |
| 566 | */ |
| 567 | int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target); |
Sam Bobroff | c64abd1 | 2017-03-20 10:46:43 +1100 | [diff] [blame] | 568 | struct ppc_radix_page_info *kvm_get_radix_page_info(void); |
Bharata B Rao | 44f2e6c | 2016-06-01 15:21:24 +0530 | [diff] [blame] | 569 | int kvm_get_max_memslots(void); |
Peter Xu | c82d9d4 | 2020-03-18 10:52:03 -0400 | [diff] [blame] | 570 | |
| 571 | /* Notify resamplefd for EOI of specific interrupts. */ |
| 572 | void kvm_resample_fd_notify(int gsi); |
| 573 | |
Tom Lendacky | 92a5199 | 2021-01-26 11:36:47 -0600 | [diff] [blame] | 574 | /** |
| 575 | * kvm_cpu_check_are_resettable - return whether CPUs can be reset |
| 576 | * |
| 577 | * Returns: true: CPUs are resettable |
| 578 | * false: CPUs are not resettable |
| 579 | */ |
| 580 | bool kvm_cpu_check_are_resettable(void); |
| 581 | |
| 582 | bool kvm_arch_cpu_check_are_resettable(void); |
| 583 | |
Hyman Huang(黄勇) | 7786ae4 | 2021-06-29 16:01:18 +0000 | [diff] [blame] | 584 | bool kvm_dirty_ring_enabled(void); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 585 | #endif |