Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Hyper-V guest/hypervisor interaction |
| 3 | * |
| 4 | * Copyright (c) 2015-2018 Virtuozzo International GmbH. |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 7 | * See the COPYING file in the top-level directory. |
| 8 | */ |
| 9 | |
| 10 | #include "qemu/osdep.h" |
| 11 | #include "qemu/main-loop.h" |
Markus Armbruster | 0b8fa32 | 2019-05-23 16:35:07 +0200 | [diff] [blame] | 12 | #include "qemu/module.h" |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 13 | #include "qapi/error.h" |
Roman Kagan | 267e071 | 2018-09-21 11:22:11 +0300 | [diff] [blame] | 14 | #include "exec/address-spaces.h" |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 15 | #include "sysemu/kvm.h" |
Roman Kagan | f5642f8 | 2018-09-21 11:22:13 +0300 | [diff] [blame] | 16 | #include "qemu/bitops.h" |
Roman Kagan | 8d3bc0b | 2018-09-21 11:22:15 +0300 | [diff] [blame] | 17 | #include "qemu/error-report.h" |
Simran Singhal | 08b689a | 2020-04-02 12:20:35 +0530 | [diff] [blame] | 18 | #include "qemu/lockable.h" |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 19 | #include "qemu/queue.h" |
| 20 | #include "qemu/rcu.h" |
| 21 | #include "qemu/rcu_queue.h" |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 22 | #include "hw/hyperv/hyperv.h" |
Eduardo Habkost | db1015e | 2020-09-03 16:43:22 -0400 | [diff] [blame] | 23 | #include "qom/object.h" |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 24 | |
Eduardo Habkost | db1015e | 2020-09-03 16:43:22 -0400 | [diff] [blame] | 25 | struct SynICState { |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 26 | DeviceState parent_obj; |
| 27 | |
| 28 | CPUState *cs; |
| 29 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 30 | bool sctl_enabled; |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 31 | hwaddr msg_page_addr; |
| 32 | hwaddr event_page_addr; |
Roman Kagan | 267e071 | 2018-09-21 11:22:11 +0300 | [diff] [blame] | 33 | MemoryRegion msg_page_mr; |
| 34 | MemoryRegion event_page_mr; |
| 35 | struct hyperv_message_page *msg_page; |
| 36 | struct hyperv_event_flags_page *event_page; |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 37 | |
| 38 | QemuMutex sint_routes_mutex; |
| 39 | QLIST_HEAD(, HvSintRoute) sint_routes; |
Eduardo Habkost | db1015e | 2020-09-03 16:43:22 -0400 | [diff] [blame] | 40 | }; |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 41 | |
| 42 | #define TYPE_SYNIC "hyperv-synic" |
Eduardo Habkost | 8063396 | 2020-09-16 14:25:19 -0400 | [diff] [blame] | 43 | OBJECT_DECLARE_SIMPLE_TYPE(SynICState, SYNIC) |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 44 | |
Jon Doron | d42cd96 | 2020-04-24 15:34:39 +0300 | [diff] [blame] | 45 | static bool synic_enabled; |
| 46 | |
| 47 | bool hyperv_is_synic_enabled(void) |
| 48 | { |
| 49 | return synic_enabled; |
| 50 | } |
| 51 | |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 52 | static SynICState *get_synic(CPUState *cs) |
| 53 | { |
| 54 | return SYNIC(object_resolve_path_component(OBJECT(cs), "synic")); |
| 55 | } |
| 56 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 57 | static void synic_update(SynICState *synic, bool sctl_enable, |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 58 | hwaddr msg_page_addr, hwaddr event_page_addr) |
| 59 | { |
| 60 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 61 | synic->sctl_enabled = sctl_enable; |
Roman Kagan | 267e071 | 2018-09-21 11:22:11 +0300 | [diff] [blame] | 62 | if (synic->msg_page_addr != msg_page_addr) { |
| 63 | if (synic->msg_page_addr) { |
| 64 | memory_region_del_subregion(get_system_memory(), |
| 65 | &synic->msg_page_mr); |
| 66 | } |
| 67 | if (msg_page_addr) { |
| 68 | memory_region_add_subregion(get_system_memory(), msg_page_addr, |
| 69 | &synic->msg_page_mr); |
| 70 | } |
| 71 | synic->msg_page_addr = msg_page_addr; |
| 72 | } |
| 73 | if (synic->event_page_addr != event_page_addr) { |
| 74 | if (synic->event_page_addr) { |
| 75 | memory_region_del_subregion(get_system_memory(), |
| 76 | &synic->event_page_mr); |
| 77 | } |
| 78 | if (event_page_addr) { |
| 79 | memory_region_add_subregion(get_system_memory(), event_page_addr, |
| 80 | &synic->event_page_mr); |
| 81 | } |
| 82 | synic->event_page_addr = event_page_addr; |
| 83 | } |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 84 | } |
| 85 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 86 | void hyperv_synic_update(CPUState *cs, bool sctl_enable, |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 87 | hwaddr msg_page_addr, hwaddr event_page_addr) |
| 88 | { |
| 89 | SynICState *synic = get_synic(cs); |
| 90 | |
| 91 | if (!synic) { |
| 92 | return; |
| 93 | } |
| 94 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 95 | synic_update(synic, sctl_enable, msg_page_addr, event_page_addr); |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | static void synic_realize(DeviceState *dev, Error **errp) |
| 99 | { |
Roman Kagan | 267e071 | 2018-09-21 11:22:11 +0300 | [diff] [blame] | 100 | Object *obj = OBJECT(dev); |
| 101 | SynICState *synic = SYNIC(dev); |
| 102 | char *msgp_name, *eventp_name; |
| 103 | uint32_t vp_index; |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 104 | |
Roman Kagan | 267e071 | 2018-09-21 11:22:11 +0300 | [diff] [blame] | 105 | /* memory region names have to be globally unique */ |
| 106 | vp_index = hyperv_vp_index(synic->cs); |
| 107 | msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index); |
| 108 | eventp_name = g_strdup_printf("synic-%u-event-page", vp_index); |
| 109 | |
| 110 | memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name, |
| 111 | sizeof(*synic->msg_page), &error_abort); |
| 112 | memory_region_init_ram(&synic->event_page_mr, obj, eventp_name, |
| 113 | sizeof(*synic->event_page), &error_abort); |
| 114 | synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr); |
| 115 | synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr); |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 116 | qemu_mutex_init(&synic->sint_routes_mutex); |
| 117 | QLIST_INIT(&synic->sint_routes); |
Roman Kagan | 267e071 | 2018-09-21 11:22:11 +0300 | [diff] [blame] | 118 | |
| 119 | g_free(msgp_name); |
| 120 | g_free(eventp_name); |
| 121 | } |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 122 | |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 123 | static void synic_reset(DeviceState *dev) |
| 124 | { |
| 125 | SynICState *synic = SYNIC(dev); |
Roman Kagan | 267e071 | 2018-09-21 11:22:11 +0300 | [diff] [blame] | 126 | memset(synic->msg_page, 0, sizeof(*synic->msg_page)); |
| 127 | memset(synic->event_page, 0, sizeof(*synic->event_page)); |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 128 | synic_update(synic, false, 0, 0); |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 129 | assert(QLIST_EMPTY(&synic->sint_routes)); |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static void synic_class_init(ObjectClass *klass, void *data) |
| 133 | { |
| 134 | DeviceClass *dc = DEVICE_CLASS(klass); |
| 135 | |
| 136 | dc->realize = synic_realize; |
| 137 | dc->reset = synic_reset; |
| 138 | dc->user_creatable = false; |
| 139 | } |
| 140 | |
| 141 | void hyperv_synic_add(CPUState *cs) |
| 142 | { |
| 143 | Object *obj; |
| 144 | SynICState *synic; |
| 145 | |
| 146 | obj = object_new(TYPE_SYNIC); |
| 147 | synic = SYNIC(obj); |
| 148 | synic->cs = cs; |
Markus Armbruster | d262312 | 2020-05-05 17:29:22 +0200 | [diff] [blame] | 149 | object_property_add_child(OBJECT(cs), "synic", obj); |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 150 | object_unref(obj); |
Markus Armbruster | ce189ab | 2020-06-10 07:32:45 +0200 | [diff] [blame] | 151 | qdev_realize(DEVICE(obj), NULL, &error_abort); |
Jon Doron | d42cd96 | 2020-04-24 15:34:39 +0300 | [diff] [blame] | 152 | synic_enabled = true; |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | void hyperv_synic_reset(CPUState *cs) |
| 156 | { |
Roman Kagan | 30a759b | 2018-11-26 15:28:44 +0000 | [diff] [blame] | 157 | SynICState *synic = get_synic(cs); |
| 158 | |
| 159 | if (synic) { |
Damien Hedde | f703a04 | 2020-01-30 16:02:03 +0000 | [diff] [blame] | 160 | device_legacy_reset(DEVICE(synic)); |
Roman Kagan | 30a759b | 2018-11-26 15:28:44 +0000 | [diff] [blame] | 161 | } |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | static const TypeInfo synic_type_info = { |
| 165 | .name = TYPE_SYNIC, |
| 166 | .parent = TYPE_DEVICE, |
| 167 | .instance_size = sizeof(SynICState), |
| 168 | .class_init = synic_class_init, |
| 169 | }; |
| 170 | |
| 171 | static void synic_register_types(void) |
| 172 | { |
| 173 | type_register_static(&synic_type_info); |
| 174 | } |
| 175 | |
| 176 | type_init(synic_register_types) |
| 177 | |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 178 | /* |
| 179 | * KVM has its own message producers (SynIC timers). To guarantee |
| 180 | * serialization with both KVM vcpu and the guest cpu, the messages are first |
| 181 | * staged in an intermediate area and then posted to the SynIC message page in |
| 182 | * the vcpu thread. |
| 183 | */ |
| 184 | typedef struct HvSintStagedMessage { |
| 185 | /* message content staged by hyperv_post_msg */ |
| 186 | struct hyperv_message msg; |
| 187 | /* callback + data (r/o) to complete the processing in a BH */ |
| 188 | HvSintMsgCb cb; |
| 189 | void *cb_data; |
| 190 | /* message posting status filled by cpu_post_msg */ |
| 191 | int status; |
| 192 | /* passing the buck: */ |
| 193 | enum { |
| 194 | /* initial state */ |
| 195 | HV_STAGED_MSG_FREE, |
| 196 | /* |
| 197 | * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE -> |
| 198 | * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu |
| 199 | */ |
| 200 | HV_STAGED_MSG_BUSY, |
| 201 | /* |
| 202 | * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot, |
| 203 | * notify the guest, records the status, marks the posting done (BUSY |
| 204 | * -> POSTED), and schedules sint_msg_bh BH |
| 205 | */ |
| 206 | HV_STAGED_MSG_POSTED, |
| 207 | /* |
| 208 | * sint_msg_bh (BH) verifies that the posting is done, runs the |
| 209 | * callback, and starts over (POSTED -> FREE) |
| 210 | */ |
| 211 | } state; |
| 212 | } HvSintStagedMessage; |
| 213 | |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 214 | struct HvSintRoute { |
| 215 | uint32_t sint; |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 216 | SynICState *synic; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 217 | int gsi; |
| 218 | EventNotifier sint_set_notifier; |
| 219 | EventNotifier sint_ack_notifier; |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 220 | |
| 221 | HvSintStagedMessage *staged_msg; |
| 222 | |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 223 | unsigned refcount; |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 224 | QLIST_ENTRY(HvSintRoute) link; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 225 | }; |
| 226 | |
| 227 | static CPUState *hyperv_find_vcpu(uint32_t vp_index) |
| 228 | { |
| 229 | CPUState *cs = qemu_get_cpu(vp_index); |
| 230 | assert(hyperv_vp_index(cs) == vp_index); |
| 231 | return cs; |
| 232 | } |
| 233 | |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 234 | /* |
| 235 | * BH to complete the processing of a staged message. |
| 236 | */ |
| 237 | static void sint_msg_bh(void *opaque) |
| 238 | { |
| 239 | HvSintRoute *sint_route = opaque; |
| 240 | HvSintStagedMessage *staged_msg = sint_route->staged_msg; |
| 241 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 242 | if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) { |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 243 | /* status nor ready yet (spurious ack from guest?), ignore */ |
| 244 | return; |
| 245 | } |
| 246 | |
| 247 | staged_msg->cb(staged_msg->cb_data, staged_msg->status); |
| 248 | staged_msg->status = 0; |
| 249 | |
| 250 | /* staged message processing finished, ready to start over */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 251 | qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE); |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 252 | /* drop the reference taken in hyperv_post_msg */ |
| 253 | hyperv_sint_route_unref(sint_route); |
| 254 | } |
| 255 | |
| 256 | /* |
| 257 | * Worker to transfer the message from the staging area into the SynIC message |
| 258 | * page in vcpu context. |
| 259 | */ |
| 260 | static void cpu_post_msg(CPUState *cs, run_on_cpu_data data) |
| 261 | { |
| 262 | HvSintRoute *sint_route = data.host_ptr; |
| 263 | HvSintStagedMessage *staged_msg = sint_route->staged_msg; |
| 264 | SynICState *synic = sint_route->synic; |
| 265 | struct hyperv_message *dst_msg; |
| 266 | bool wait_for_sint_ack = false; |
| 267 | |
| 268 | assert(staged_msg->state == HV_STAGED_MSG_BUSY); |
| 269 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 270 | if (!synic->msg_page_addr) { |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 271 | staged_msg->status = -ENXIO; |
| 272 | goto posted; |
| 273 | } |
| 274 | |
| 275 | dst_msg = &synic->msg_page->slot[sint_route->sint]; |
| 276 | |
| 277 | if (dst_msg->header.message_type != HV_MESSAGE_NONE) { |
| 278 | dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING; |
| 279 | staged_msg->status = -EAGAIN; |
| 280 | wait_for_sint_ack = true; |
| 281 | } else { |
| 282 | memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg)); |
| 283 | staged_msg->status = hyperv_sint_route_set_sint(sint_route); |
| 284 | } |
| 285 | |
| 286 | memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page)); |
| 287 | |
| 288 | posted: |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 289 | qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED); |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 290 | /* |
| 291 | * Notify the msg originator of the progress made; if the slot was busy we |
| 292 | * set msg_pending flag in it so it will be the guest who will do EOM and |
| 293 | * trigger the notification from KVM via sint_ack_notifier |
| 294 | */ |
| 295 | if (!wait_for_sint_ack) { |
| 296 | aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, |
| 297 | sint_route); |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | /* |
| 302 | * Post a Hyper-V message to the staging area, for delivery to guest in the |
| 303 | * vcpu thread. |
| 304 | */ |
| 305 | int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg) |
| 306 | { |
| 307 | HvSintStagedMessage *staged_msg = sint_route->staged_msg; |
| 308 | |
| 309 | assert(staged_msg); |
| 310 | |
| 311 | /* grab the staging area */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 312 | if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE, |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 313 | HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) { |
| 314 | return -EAGAIN; |
| 315 | } |
| 316 | |
| 317 | memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg)); |
| 318 | |
| 319 | /* hold a reference on sint_route until the callback is finished */ |
| 320 | hyperv_sint_route_ref(sint_route); |
| 321 | |
| 322 | /* schedule message posting attempt in vcpu thread */ |
| 323 | async_run_on_cpu(sint_route->synic->cs, cpu_post_msg, |
| 324 | RUN_ON_CPU_HOST_PTR(sint_route)); |
| 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | static void sint_ack_handler(EventNotifier *notifier) |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 329 | { |
| 330 | HvSintRoute *sint_route = container_of(notifier, HvSintRoute, |
| 331 | sint_ack_notifier); |
| 332 | event_notifier_test_and_clear(notifier); |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 333 | |
| 334 | /* |
| 335 | * the guest consumed the previous message so complete the current one with |
| 336 | * -EAGAIN and let the msg originator retry |
| 337 | */ |
| 338 | aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route); |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 339 | } |
| 340 | |
Roman Kagan | f5642f8 | 2018-09-21 11:22:13 +0300 | [diff] [blame] | 341 | /* |
| 342 | * Set given event flag for a given sint on a given vcpu, and signal the sint. |
| 343 | */ |
| 344 | int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) |
| 345 | { |
| 346 | int ret; |
| 347 | SynICState *synic = sint_route->synic; |
| 348 | unsigned long *flags, set_mask; |
| 349 | unsigned set_idx; |
| 350 | |
| 351 | if (eventno > HV_EVENT_FLAGS_COUNT) { |
| 352 | return -EINVAL; |
| 353 | } |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 354 | if (!synic->sctl_enabled || !synic->event_page_addr) { |
Roman Kagan | f5642f8 | 2018-09-21 11:22:13 +0300 | [diff] [blame] | 355 | return -ENXIO; |
| 356 | } |
| 357 | |
| 358 | set_idx = BIT_WORD(eventno); |
| 359 | set_mask = BIT_MASK(eventno); |
| 360 | flags = synic->event_page->slot[sint_route->sint].flags; |
| 361 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 362 | if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) { |
Roman Kagan | f5642f8 | 2018-09-21 11:22:13 +0300 | [diff] [blame] | 363 | memory_region_set_dirty(&synic->event_page_mr, 0, |
| 364 | sizeof(*synic->event_page)); |
| 365 | ret = hyperv_sint_route_set_sint(sint_route); |
| 366 | } else { |
| 367 | ret = 0; |
| 368 | } |
| 369 | return ret; |
| 370 | } |
| 371 | |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 372 | HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 373 | HvSintMsgCb cb, void *cb_data) |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 374 | { |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 375 | HvSintRoute *sint_route = NULL; |
| 376 | EventNotifier *ack_notifier = NULL; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 377 | int r, gsi; |
| 378 | CPUState *cs; |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 379 | SynICState *synic; |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 380 | bool ack_event_initialized = false; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 381 | |
| 382 | cs = hyperv_find_vcpu(vp_index); |
| 383 | if (!cs) { |
| 384 | return NULL; |
| 385 | } |
| 386 | |
Roman Kagan | 606c34b | 2018-09-21 11:22:09 +0300 | [diff] [blame] | 387 | synic = get_synic(cs); |
| 388 | if (!synic) { |
| 389 | return NULL; |
| 390 | } |
| 391 | |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 392 | sint_route = g_new0(HvSintRoute, 1); |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 393 | if (!sint_route) { |
| 394 | return NULL; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 395 | } |
| 396 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 397 | sint_route->synic = synic; |
| 398 | sint_route->sint = sint; |
| 399 | sint_route->refcount = 1; |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 400 | |
| 401 | ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 402 | if (ack_notifier) { |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 403 | sint_route->staged_msg = g_new0(HvSintStagedMessage, 1); |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 404 | if (!sint_route->staged_msg) { |
| 405 | goto cleanup_err_sint; |
| 406 | } |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 407 | sint_route->staged_msg->cb = cb; |
| 408 | sint_route->staged_msg->cb_data = cb_data; |
| 409 | |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 410 | r = event_notifier_init(ack_notifier, false); |
| 411 | if (r) { |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 412 | goto cleanup_err_sint; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 413 | } |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 414 | event_notifier_set_handler(ack_notifier, sint_ack_handler); |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 415 | ack_event_initialized = true; |
| 416 | } |
| 417 | |
| 418 | /* See if we are done or we need to setup a GSI for this SintRoute */ |
| 419 | if (!synic->sctl_enabled) { |
| 420 | goto cleanup; |
| 421 | } |
| 422 | |
| 423 | /* We need to setup a GSI for this SintRoute */ |
| 424 | r = event_notifier_init(&sint_route->sint_set_notifier, false); |
| 425 | if (r) { |
| 426 | goto cleanup_err_sint; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 427 | } |
| 428 | |
| 429 | gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint); |
| 430 | if (gsi < 0) { |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 431 | goto cleanup_err_sint_notifier; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, |
| 435 | &sint_route->sint_set_notifier, |
| 436 | ack_notifier, gsi); |
| 437 | if (r) { |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 438 | goto cleanup_err_irqfd; |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 439 | } |
| 440 | sint_route->gsi = gsi; |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 441 | cleanup: |
| 442 | qemu_mutex_lock(&synic->sint_routes_mutex); |
| 443 | QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link); |
| 444 | qemu_mutex_unlock(&synic->sint_routes_mutex); |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 445 | return sint_route; |
| 446 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 447 | cleanup_err_irqfd: |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 448 | kvm_irqchip_release_virq(kvm_state, gsi); |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 449 | |
| 450 | cleanup_err_sint_notifier: |
| 451 | event_notifier_cleanup(&sint_route->sint_set_notifier); |
| 452 | |
| 453 | cleanup_err_sint: |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 454 | if (ack_notifier) { |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 455 | if (ack_event_initialized) { |
| 456 | event_notifier_set_handler(ack_notifier, NULL); |
| 457 | event_notifier_cleanup(ack_notifier); |
| 458 | } |
| 459 | |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 460 | g_free(sint_route->staged_msg); |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 461 | } |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 462 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 463 | g_free(sint_route); |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 464 | return NULL; |
| 465 | } |
| 466 | |
| 467 | void hyperv_sint_route_ref(HvSintRoute *sint_route) |
| 468 | { |
| 469 | sint_route->refcount++; |
| 470 | } |
| 471 | |
| 472 | void hyperv_sint_route_unref(HvSintRoute *sint_route) |
| 473 | { |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 474 | SynICState *synic; |
| 475 | |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 476 | if (!sint_route) { |
| 477 | return; |
| 478 | } |
| 479 | |
| 480 | assert(sint_route->refcount > 0); |
| 481 | |
| 482 | if (--sint_route->refcount) { |
| 483 | return; |
| 484 | } |
| 485 | |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 486 | synic = sint_route->synic; |
| 487 | qemu_mutex_lock(&synic->sint_routes_mutex); |
| 488 | QLIST_REMOVE(sint_route, link); |
| 489 | qemu_mutex_unlock(&synic->sint_routes_mutex); |
| 490 | |
| 491 | if (sint_route->gsi) { |
| 492 | kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, |
| 493 | &sint_route->sint_set_notifier, |
| 494 | sint_route->gsi); |
| 495 | kvm_irqchip_release_virq(kvm_state, sint_route->gsi); |
| 496 | event_notifier_cleanup(&sint_route->sint_set_notifier); |
| 497 | } |
| 498 | |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 499 | if (sint_route->staged_msg) { |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 500 | event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); |
| 501 | event_notifier_cleanup(&sint_route->sint_ack_notifier); |
Roman Kagan | 4cbaf3c | 2018-09-21 11:22:12 +0300 | [diff] [blame] | 502 | g_free(sint_route->staged_msg); |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 503 | } |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 504 | g_free(sint_route); |
| 505 | } |
| 506 | |
| 507 | int hyperv_sint_route_set_sint(HvSintRoute *sint_route) |
| 508 | { |
Jon Doron | 64ddecc | 2022-02-16 12:24:57 +0200 | [diff] [blame] | 509 | if (!sint_route->gsi) { |
| 510 | return 0; |
| 511 | } |
| 512 | |
Roman Kagan | 701189e | 2018-09-21 11:20:39 +0300 | [diff] [blame] | 513 | return event_notifier_set(&sint_route->sint_set_notifier); |
| 514 | } |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 515 | |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 516 | typedef struct MsgHandler { |
| 517 | struct rcu_head rcu; |
| 518 | QLIST_ENTRY(MsgHandler) link; |
| 519 | uint32_t conn_id; |
| 520 | HvMsgHandler handler; |
| 521 | void *data; |
| 522 | } MsgHandler; |
| 523 | |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 524 | typedef struct EventFlagHandler { |
| 525 | struct rcu_head rcu; |
| 526 | QLIST_ENTRY(EventFlagHandler) link; |
| 527 | uint32_t conn_id; |
| 528 | EventNotifier *notifier; |
| 529 | } EventFlagHandler; |
| 530 | |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 531 | static QLIST_HEAD(, MsgHandler) msg_handlers; |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 532 | static QLIST_HEAD(, EventFlagHandler) event_flag_handlers; |
| 533 | static QemuMutex handlers_mutex; |
| 534 | |
| 535 | static void __attribute__((constructor)) hv_init(void) |
| 536 | { |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 537 | QLIST_INIT(&msg_handlers); |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 538 | QLIST_INIT(&event_flag_handlers); |
| 539 | qemu_mutex_init(&handlers_mutex); |
| 540 | } |
| 541 | |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 542 | int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data) |
| 543 | { |
| 544 | int ret; |
| 545 | MsgHandler *mh; |
| 546 | |
Simran Singhal | 08b689a | 2020-04-02 12:20:35 +0530 | [diff] [blame] | 547 | QEMU_LOCK_GUARD(&handlers_mutex); |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 548 | QLIST_FOREACH(mh, &msg_handlers, link) { |
| 549 | if (mh->conn_id == conn_id) { |
| 550 | if (handler) { |
| 551 | ret = -EEXIST; |
| 552 | } else { |
| 553 | QLIST_REMOVE_RCU(mh, link); |
| 554 | g_free_rcu(mh, rcu); |
| 555 | ret = 0; |
| 556 | } |
Simran Singhal | 08b689a | 2020-04-02 12:20:35 +0530 | [diff] [blame] | 557 | return ret; |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 558 | } |
| 559 | } |
| 560 | |
| 561 | if (handler) { |
| 562 | mh = g_new(MsgHandler, 1); |
| 563 | mh->conn_id = conn_id; |
| 564 | mh->handler = handler; |
| 565 | mh->data = data; |
| 566 | QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link); |
| 567 | ret = 0; |
| 568 | } else { |
| 569 | ret = -ENOENT; |
| 570 | } |
Simran Singhal | 08b689a | 2020-04-02 12:20:35 +0530 | [diff] [blame] | 571 | |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 572 | return ret; |
| 573 | } |
| 574 | |
| 575 | uint16_t hyperv_hcall_post_message(uint64_t param, bool fast) |
| 576 | { |
| 577 | uint16_t ret; |
| 578 | hwaddr len; |
| 579 | struct hyperv_post_message_input *msg; |
| 580 | MsgHandler *mh; |
| 581 | |
| 582 | if (fast) { |
| 583 | return HV_STATUS_INVALID_HYPERCALL_CODE; |
| 584 | } |
| 585 | if (param & (__alignof__(*msg) - 1)) { |
| 586 | return HV_STATUS_INVALID_ALIGNMENT; |
| 587 | } |
| 588 | |
| 589 | len = sizeof(*msg); |
| 590 | msg = cpu_physical_memory_map(param, &len, 0); |
| 591 | if (len < sizeof(*msg)) { |
| 592 | ret = HV_STATUS_INSUFFICIENT_MEMORY; |
| 593 | goto unmap; |
| 594 | } |
| 595 | if (msg->payload_size > sizeof(msg->payload)) { |
| 596 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
| 597 | goto unmap; |
| 598 | } |
| 599 | |
| 600 | ret = HV_STATUS_INVALID_CONNECTION_ID; |
Dr. David Alan Gilbert | b66173a | 2019-12-13 13:19:30 +0000 | [diff] [blame] | 601 | WITH_RCU_READ_LOCK_GUARD() { |
| 602 | QLIST_FOREACH_RCU(mh, &msg_handlers, link) { |
| 603 | if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) { |
| 604 | ret = mh->handler(msg, mh->data); |
| 605 | break; |
| 606 | } |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 607 | } |
| 608 | } |
Roman Kagan | 76036a5 | 2018-09-21 11:22:16 +0300 | [diff] [blame] | 609 | |
| 610 | unmap: |
| 611 | cpu_physical_memory_unmap(msg, len, 0, 0); |
| 612 | return ret; |
| 613 | } |
| 614 | |
Roman Kagan | 8d3bc0b | 2018-09-21 11:22:15 +0300 | [diff] [blame] | 615 | static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 616 | { |
| 617 | int ret; |
| 618 | EventFlagHandler *handler; |
| 619 | |
Simran Singhal | 08b689a | 2020-04-02 12:20:35 +0530 | [diff] [blame] | 620 | QEMU_LOCK_GUARD(&handlers_mutex); |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 621 | QLIST_FOREACH(handler, &event_flag_handlers, link) { |
| 622 | if (handler->conn_id == conn_id) { |
| 623 | if (notifier) { |
| 624 | ret = -EEXIST; |
| 625 | } else { |
| 626 | QLIST_REMOVE_RCU(handler, link); |
| 627 | g_free_rcu(handler, rcu); |
| 628 | ret = 0; |
| 629 | } |
Simran Singhal | 08b689a | 2020-04-02 12:20:35 +0530 | [diff] [blame] | 630 | return ret; |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 631 | } |
| 632 | } |
| 633 | |
| 634 | if (notifier) { |
| 635 | handler = g_new(EventFlagHandler, 1); |
| 636 | handler->conn_id = conn_id; |
| 637 | handler->notifier = notifier; |
| 638 | QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link); |
| 639 | ret = 0; |
| 640 | } else { |
| 641 | ret = -ENOENT; |
| 642 | } |
Simran Singhal | 08b689a | 2020-04-02 12:20:35 +0530 | [diff] [blame] | 643 | |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 644 | return ret; |
| 645 | } |
| 646 | |
Roman Kagan | 8d3bc0b | 2018-09-21 11:22:15 +0300 | [diff] [blame] | 647 | static bool process_event_flags_userspace; |
| 648 | |
| 649 | int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) |
| 650 | { |
| 651 | if (!process_event_flags_userspace && |
| 652 | !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) { |
| 653 | process_event_flags_userspace = true; |
| 654 | |
| 655 | warn_report("Hyper-V event signaling is not supported by this kernel; " |
| 656 | "using slower userspace hypercall processing"); |
| 657 | } |
| 658 | |
| 659 | if (!process_event_flags_userspace) { |
| 660 | struct kvm_hyperv_eventfd hvevfd = { |
| 661 | .conn_id = conn_id, |
| 662 | .fd = notifier ? event_notifier_get_fd(notifier) : -1, |
| 663 | .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN, |
| 664 | }; |
| 665 | |
| 666 | return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd); |
| 667 | } |
| 668 | return set_event_flag_handler(conn_id, notifier); |
| 669 | } |
| 670 | |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 671 | uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast) |
| 672 | { |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 673 | EventFlagHandler *handler; |
| 674 | |
| 675 | if (unlikely(!fast)) { |
| 676 | hwaddr addr = param; |
| 677 | |
| 678 | if (addr & (__alignof__(addr) - 1)) { |
| 679 | return HV_STATUS_INVALID_ALIGNMENT; |
| 680 | } |
| 681 | |
| 682 | param = ldq_phys(&address_space_memory, addr); |
| 683 | } |
| 684 | |
| 685 | /* |
| 686 | * Per spec, bits 32-47 contain the extra "flag number". However, we |
| 687 | * have no use for it, and in all known usecases it is zero, so just |
| 688 | * report lookup failure if it isn't. |
| 689 | */ |
| 690 | if (param & 0xffff00000000ULL) { |
| 691 | return HV_STATUS_INVALID_PORT_ID; |
| 692 | } |
| 693 | /* remaining bits are reserved-zero */ |
| 694 | if (param & ~HV_CONNECTION_ID_MASK) { |
| 695 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
| 696 | } |
| 697 | |
Dr. David Alan Gilbert | b66173a | 2019-12-13 13:19:30 +0000 | [diff] [blame] | 698 | RCU_READ_LOCK_GUARD(); |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 699 | QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) { |
| 700 | if (handler->conn_id == param) { |
| 701 | event_notifier_set(handler->notifier); |
Dr. David Alan Gilbert | b66173a | 2019-12-13 13:19:30 +0000 | [diff] [blame] | 702 | return 0; |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 703 | } |
| 704 | } |
Dr. David Alan Gilbert | b66173a | 2019-12-13 13:19:30 +0000 | [diff] [blame] | 705 | return HV_STATUS_INVALID_CONNECTION_ID; |
Roman Kagan | e6ea9f4 | 2018-09-21 11:22:14 +0300 | [diff] [blame] | 706 | } |
Jon Doron | 73d2407 | 2022-02-16 12:24:59 +0200 | [diff] [blame] | 707 | |
| 708 | static HvSynDbgHandler hv_syndbg_handler; |
| 709 | static void *hv_syndbg_context; |
| 710 | |
| 711 | void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context) |
| 712 | { |
| 713 | assert(!hv_syndbg_handler); |
| 714 | hv_syndbg_handler = handler; |
| 715 | hv_syndbg_context = context; |
| 716 | } |
| 717 | |
| 718 | uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa) |
| 719 | { |
| 720 | uint16_t ret; |
| 721 | HvSynDbgMsg msg; |
| 722 | struct hyperv_reset_debug_session_output *reset_dbg_session = NULL; |
| 723 | hwaddr len; |
| 724 | |
| 725 | if (!hv_syndbg_handler) { |
| 726 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
| 727 | goto cleanup; |
| 728 | } |
| 729 | |
| 730 | len = sizeof(*reset_dbg_session); |
| 731 | reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1); |
| 732 | if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) { |
| 733 | ret = HV_STATUS_INSUFFICIENT_MEMORY; |
| 734 | goto cleanup; |
| 735 | } |
| 736 | |
| 737 | msg.type = HV_SYNDBG_MSG_CONNECTION_INFO; |
| 738 | ret = hv_syndbg_handler(hv_syndbg_context, &msg); |
| 739 | if (ret) { |
| 740 | goto cleanup; |
| 741 | } |
| 742 | |
| 743 | reset_dbg_session->host_ip = msg.u.connection_info.host_ip; |
| 744 | reset_dbg_session->host_port = msg.u.connection_info.host_port; |
| 745 | /* The following fields are only used as validation for KDVM */ |
| 746 | memset(&reset_dbg_session->host_mac, 0, |
| 747 | sizeof(reset_dbg_session->host_mac)); |
| 748 | reset_dbg_session->target_ip = msg.u.connection_info.host_ip; |
| 749 | reset_dbg_session->target_port = msg.u.connection_info.host_port; |
| 750 | memset(&reset_dbg_session->target_mac, 0, |
| 751 | sizeof(reset_dbg_session->target_mac)); |
| 752 | cleanup: |
| 753 | if (reset_dbg_session) { |
| 754 | cpu_physical_memory_unmap(reset_dbg_session, |
| 755 | sizeof(*reset_dbg_session), 1, len); |
| 756 | } |
| 757 | |
| 758 | return ret; |
| 759 | } |
| 760 | |
| 761 | uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa, |
| 762 | bool fast) |
| 763 | { |
| 764 | uint16_t ret; |
| 765 | struct hyperv_retrieve_debug_data_input *debug_data_in = NULL; |
| 766 | struct hyperv_retrieve_debug_data_output *debug_data_out = NULL; |
| 767 | hwaddr in_len, out_len; |
| 768 | HvSynDbgMsg msg; |
| 769 | |
| 770 | if (fast || !hv_syndbg_handler) { |
| 771 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
| 772 | goto cleanup; |
| 773 | } |
| 774 | |
| 775 | in_len = sizeof(*debug_data_in); |
| 776 | debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0); |
| 777 | if (!debug_data_in || in_len < sizeof(*debug_data_in)) { |
| 778 | ret = HV_STATUS_INSUFFICIENT_MEMORY; |
| 779 | goto cleanup; |
| 780 | } |
| 781 | |
| 782 | out_len = sizeof(*debug_data_out); |
| 783 | debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1); |
| 784 | if (!debug_data_out || out_len < sizeof(*debug_data_out)) { |
| 785 | ret = HV_STATUS_INSUFFICIENT_MEMORY; |
| 786 | goto cleanup; |
| 787 | } |
| 788 | |
| 789 | msg.type = HV_SYNDBG_MSG_RECV; |
| 790 | msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out); |
| 791 | msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out); |
| 792 | msg.u.recv.options = debug_data_in->options; |
| 793 | msg.u.recv.timeout = debug_data_in->timeout; |
| 794 | msg.u.recv.is_raw = true; |
| 795 | ret = hv_syndbg_handler(hv_syndbg_context, &msg); |
| 796 | if (ret == HV_STATUS_NO_DATA) { |
| 797 | debug_data_out->retrieved_count = 0; |
| 798 | debug_data_out->remaining_count = debug_data_in->count; |
| 799 | goto cleanup; |
| 800 | } else if (ret != HV_STATUS_SUCCESS) { |
| 801 | goto cleanup; |
| 802 | } |
| 803 | |
| 804 | debug_data_out->retrieved_count = msg.u.recv.retrieved_count; |
| 805 | debug_data_out->remaining_count = |
| 806 | debug_data_in->count - msg.u.recv.retrieved_count; |
| 807 | cleanup: |
| 808 | if (debug_data_out) { |
| 809 | cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1, |
| 810 | out_len); |
| 811 | } |
| 812 | |
| 813 | if (debug_data_in) { |
| 814 | cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0, |
| 815 | in_len); |
| 816 | } |
| 817 | |
| 818 | return ret; |
| 819 | } |
| 820 | |
| 821 | uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast) |
| 822 | { |
| 823 | uint16_t ret; |
| 824 | struct hyperv_post_debug_data_input *post_data_in = NULL; |
| 825 | struct hyperv_post_debug_data_output *post_data_out = NULL; |
| 826 | hwaddr in_len, out_len; |
| 827 | HvSynDbgMsg msg; |
| 828 | |
| 829 | if (fast || !hv_syndbg_handler) { |
| 830 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
| 831 | goto cleanup; |
| 832 | } |
| 833 | |
| 834 | in_len = sizeof(*post_data_in); |
| 835 | post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0); |
| 836 | if (!post_data_in || in_len < sizeof(*post_data_in)) { |
| 837 | ret = HV_STATUS_INSUFFICIENT_MEMORY; |
| 838 | goto cleanup; |
| 839 | } |
| 840 | |
| 841 | if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) { |
| 842 | ret = HV_STATUS_INVALID_PARAMETER; |
| 843 | goto cleanup; |
| 844 | } |
| 845 | |
| 846 | out_len = sizeof(*post_data_out); |
| 847 | post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1); |
| 848 | if (!post_data_out || out_len < sizeof(*post_data_out)) { |
| 849 | ret = HV_STATUS_INSUFFICIENT_MEMORY; |
| 850 | goto cleanup; |
| 851 | } |
| 852 | |
| 853 | msg.type = HV_SYNDBG_MSG_SEND; |
| 854 | msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in); |
| 855 | msg.u.send.count = post_data_in->count; |
| 856 | msg.u.send.is_raw = true; |
| 857 | ret = hv_syndbg_handler(hv_syndbg_context, &msg); |
| 858 | if (ret != HV_STATUS_SUCCESS) { |
| 859 | goto cleanup; |
| 860 | } |
| 861 | |
| 862 | post_data_out->pending_count = msg.u.send.pending_count; |
| 863 | ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS : |
| 864 | HV_STATUS_SUCCESS; |
| 865 | cleanup: |
| 866 | if (post_data_out) { |
| 867 | cpu_physical_memory_unmap(post_data_out, |
| 868 | sizeof(*post_data_out), 1, out_len); |
| 869 | } |
| 870 | |
| 871 | if (post_data_in) { |
| 872 | cpu_physical_memory_unmap(post_data_in, |
| 873 | sizeof(*post_data_in), 0, in_len); |
| 874 | } |
| 875 | |
| 876 | return ret; |
| 877 | } |
| 878 | |
| 879 | uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count) |
| 880 | { |
| 881 | HvSynDbgMsg msg; |
| 882 | |
| 883 | if (!hv_syndbg_handler) { |
| 884 | return HV_SYNDBG_STATUS_INVALID; |
| 885 | } |
| 886 | |
| 887 | msg.type = HV_SYNDBG_MSG_SEND; |
| 888 | msg.u.send.buf_gpa = ingpa; |
| 889 | msg.u.send.count = count; |
| 890 | msg.u.send.is_raw = false; |
| 891 | if (hv_syndbg_handler(hv_syndbg_context, &msg)) { |
| 892 | return HV_SYNDBG_STATUS_INVALID; |
| 893 | } |
| 894 | |
| 895 | return HV_SYNDBG_STATUS_SEND_SUCCESS; |
| 896 | } |
| 897 | |
| 898 | uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count) |
| 899 | { |
| 900 | uint16_t ret; |
| 901 | HvSynDbgMsg msg; |
| 902 | |
| 903 | if (!hv_syndbg_handler) { |
| 904 | return HV_SYNDBG_STATUS_INVALID; |
| 905 | } |
| 906 | |
| 907 | msg.type = HV_SYNDBG_MSG_RECV; |
| 908 | msg.u.recv.buf_gpa = ingpa; |
| 909 | msg.u.recv.count = count; |
| 910 | msg.u.recv.options = 0; |
| 911 | msg.u.recv.timeout = 0; |
| 912 | msg.u.recv.is_raw = false; |
| 913 | ret = hv_syndbg_handler(hv_syndbg_context, &msg); |
| 914 | if (ret != HV_STATUS_SUCCESS) { |
| 915 | return 0; |
| 916 | } |
| 917 | |
| 918 | return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS, |
| 919 | msg.u.recv.retrieved_count); |
| 920 | } |
| 921 | |
| 922 | void hyperv_syndbg_set_pending_page(uint64_t ingpa) |
| 923 | { |
| 924 | HvSynDbgMsg msg; |
| 925 | |
| 926 | if (!hv_syndbg_handler) { |
| 927 | return; |
| 928 | } |
| 929 | |
| 930 | msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE; |
| 931 | msg.u.pending_page.buf_gpa = ingpa; |
| 932 | hv_syndbg_handler(hv_syndbg_context, &msg); |
| 933 | } |
| 934 | |
| 935 | uint64_t hyperv_syndbg_query_options(void) |
| 936 | { |
| 937 | HvSynDbgMsg msg; |
| 938 | |
| 939 | if (!hv_syndbg_handler) { |
| 940 | return 0; |
| 941 | } |
| 942 | |
| 943 | msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS; |
| 944 | if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) { |
| 945 | return 0; |
| 946 | } |
| 947 | |
| 948 | return msg.u.query_options.options; |
| 949 | } |