Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 1 | /* |
| 2 | * CPU thread main loop - common bits for user and system mode emulation |
| 3 | * |
| 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Chetan Pant | d6ea423 | 2020-10-23 12:33:53 +0000 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "qemu/osdep.h" |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 21 | #include "qemu/main-loop.h" |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 22 | #include "exec/cpu-common.h" |
Markus Armbruster | 2e5b09f | 2019-07-09 17:20:52 +0200 | [diff] [blame] | 23 | #include "hw/core/cpu.h" |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 24 | #include "sysemu/cpus.h" |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 25 | #include "qemu/lockable.h" |
Philippe Mathieu-Daudé | 8a8dc26 | 2022-11-24 16:36:49 +0100 | [diff] [blame] | 26 | #include "trace/trace-root.h" |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 27 | |
Jamie Iles | 370ed60 | 2023-04-27 03:09:24 +0100 | [diff] [blame] | 28 | QemuMutex qemu_cpu_list_lock; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 29 | static QemuCond exclusive_cond; |
| 30 | static QemuCond exclusive_resume; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 31 | static QemuCond qemu_work_cond; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 32 | |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 33 | /* >= 1 if a thread is inside start_exclusive/end_exclusive. Written |
| 34 | * under qemu_cpu_list_lock, read with atomic operations. |
| 35 | */ |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 36 | static int pending_cpus; |
| 37 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 38 | void qemu_init_cpu_list(void) |
| 39 | { |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 40 | /* This is needed because qemu_init_cpu_list is also called by the |
| 41 | * child process in a fork. */ |
| 42 | pending_cpus = 0; |
| 43 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 44 | qemu_mutex_init(&qemu_cpu_list_lock); |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 45 | qemu_cond_init(&exclusive_cond); |
| 46 | qemu_cond_init(&exclusive_resume); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 47 | qemu_cond_init(&qemu_work_cond); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | void cpu_list_lock(void) |
| 51 | { |
| 52 | qemu_mutex_lock(&qemu_cpu_list_lock); |
| 53 | } |
| 54 | |
| 55 | void cpu_list_unlock(void) |
| 56 | { |
| 57 | qemu_mutex_unlock(&qemu_cpu_list_lock); |
| 58 | } |
| 59 | |
| 60 | static bool cpu_index_auto_assigned; |
| 61 | |
| 62 | static int cpu_get_free_index(void) |
| 63 | { |
| 64 | CPUState *some_cpu; |
Alex Bennée | 716386e | 2020-05-20 15:05:38 +0100 | [diff] [blame] | 65 | int max_cpu_index = 0; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 66 | |
| 67 | cpu_index_auto_assigned = true; |
| 68 | CPU_FOREACH(some_cpu) { |
Alex Bennée | 716386e | 2020-05-20 15:05:38 +0100 | [diff] [blame] | 69 | if (some_cpu->cpu_index >= max_cpu_index) { |
| 70 | max_cpu_index = some_cpu->cpu_index + 1; |
| 71 | } |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 72 | } |
Alex Bennée | 716386e | 2020-05-20 15:05:38 +0100 | [diff] [blame] | 73 | return max_cpu_index; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 74 | } |
| 75 | |
Philippe Mathieu-Daudé | 3c55dd5 | 2023-10-09 09:02:04 +0200 | [diff] [blame] | 76 | CPUTailQ cpus_queue = QTAILQ_HEAD_INITIALIZER(cpus_queue); |
Hyman Huang(黄勇) | ab1a161 | 2022-06-26 01:38:31 +0800 | [diff] [blame] | 77 | static unsigned int cpu_list_generation_id; |
| 78 | |
| 79 | unsigned int cpu_list_generation_id_get(void) |
| 80 | { |
| 81 | return cpu_list_generation_id; |
| 82 | } |
Philippe Mathieu-Daudé | 421a75e | 2020-07-02 12:40:17 +0200 | [diff] [blame] | 83 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 84 | void cpu_list_add(CPUState *cpu) |
| 85 | { |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 86 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 87 | if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { |
| 88 | cpu->cpu_index = cpu_get_free_index(); |
| 89 | assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); |
| 90 | } else { |
| 91 | assert(!cpu_index_auto_assigned); |
| 92 | } |
Philippe Mathieu-Daudé | 3c55dd5 | 2023-10-09 09:02:04 +0200 | [diff] [blame] | 93 | QTAILQ_INSERT_TAIL_RCU(&cpus_queue, cpu, node); |
Hyman Huang(黄勇) | ab1a161 | 2022-06-26 01:38:31 +0800 | [diff] [blame] | 94 | cpu_list_generation_id++; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | void cpu_list_remove(CPUState *cpu) |
| 98 | { |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 99 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 100 | if (!QTAILQ_IN_USE(cpu, node)) { |
| 101 | /* there is nothing to undo since cpu_exec_init() hasn't been called */ |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 102 | return; |
| 103 | } |
| 104 | |
Philippe Mathieu-Daudé | 3c55dd5 | 2023-10-09 09:02:04 +0200 | [diff] [blame] | 105 | QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 106 | cpu->cpu_index = UNASSIGNED_CPU_INDEX; |
Hyman Huang(黄勇) | ab1a161 | 2022-06-26 01:38:31 +0800 | [diff] [blame] | 107 | cpu_list_generation_id++; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 108 | } |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 109 | |
Philippe Mathieu-Daudé | 421a75e | 2020-07-02 12:40:17 +0200 | [diff] [blame] | 110 | CPUState *qemu_get_cpu(int index) |
| 111 | { |
| 112 | CPUState *cpu; |
| 113 | |
| 114 | CPU_FOREACH(cpu) { |
| 115 | if (cpu->cpu_index == index) { |
| 116 | return cpu; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | return NULL; |
| 121 | } |
| 122 | |
| 123 | /* current CPU in the current thread. It is only valid inside cpu_exec() */ |
| 124 | __thread CPUState *current_cpu; |
| 125 | |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 126 | struct qemu_work_item { |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 127 | QSIMPLEQ_ENTRY(qemu_work_item) node; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 128 | run_on_cpu_func func; |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 129 | run_on_cpu_data data; |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 130 | bool free, exclusive, done; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 131 | }; |
| 132 | |
| 133 | static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) |
| 134 | { |
| 135 | qemu_mutex_lock(&cpu->work_mutex); |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 136 | QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 137 | wi->done = false; |
| 138 | qemu_mutex_unlock(&cpu->work_mutex); |
| 139 | |
| 140 | qemu_cpu_kick(cpu); |
| 141 | } |
| 142 | |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 143 | void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 144 | QemuMutex *mutex) |
| 145 | { |
| 146 | struct qemu_work_item wi; |
| 147 | |
| 148 | if (qemu_cpu_is_self(cpu)) { |
| 149 | func(cpu, data); |
| 150 | return; |
| 151 | } |
| 152 | |
| 153 | wi.func = func; |
| 154 | wi.data = data; |
Paolo Bonzini | 0e55539 | 2016-09-06 17:28:03 +0200 | [diff] [blame] | 155 | wi.done = false; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 156 | wi.free = false; |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 157 | wi.exclusive = false; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 158 | |
| 159 | queue_work_on_cpu(cpu, &wi); |
Paolo Bonzini | 42abcc5 | 2023-03-03 11:07:04 +0100 | [diff] [blame] | 160 | while (!qatomic_load_acquire(&wi.done)) { |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 161 | CPUState *self_cpu = current_cpu; |
| 162 | |
| 163 | qemu_cond_wait(&qemu_work_cond, mutex); |
| 164 | current_cpu = self_cpu; |
| 165 | } |
| 166 | } |
| 167 | |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 168 | void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 169 | { |
| 170 | struct qemu_work_item *wi; |
| 171 | |
Markus Armbruster | b21e238 | 2022-03-15 15:41:56 +0100 | [diff] [blame] | 172 | wi = g_new0(struct qemu_work_item, 1); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 173 | wi->func = func; |
| 174 | wi->data = data; |
| 175 | wi->free = true; |
| 176 | |
| 177 | queue_work_on_cpu(cpu, wi); |
| 178 | } |
| 179 | |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 180 | /* Wait for pending exclusive operations to complete. The CPU list lock |
| 181 | must be held. */ |
| 182 | static inline void exclusive_idle(void) |
| 183 | { |
| 184 | while (pending_cpus) { |
| 185 | qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock); |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | /* Start an exclusive operation. |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 190 | Must only be called from outside cpu_exec. */ |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 191 | void start_exclusive(void) |
| 192 | { |
| 193 | CPUState *other_cpu; |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 194 | int running_cpus; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 195 | |
Ilya Leoshkevich | df8a688 | 2023-02-14 15:08:27 +0100 | [diff] [blame] | 196 | if (current_cpu->exclusive_context_count) { |
| 197 | current_cpu->exclusive_context_count++; |
| 198 | return; |
| 199 | } |
| 200 | |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 201 | qemu_mutex_lock(&qemu_cpu_list_lock); |
| 202 | exclusive_idle(); |
| 203 | |
| 204 | /* Make all other cpus stop executing. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 205 | qatomic_set(&pending_cpus, 1); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 206 | |
| 207 | /* Write pending_cpus before reading other_cpu->running. */ |
| 208 | smp_mb(); |
| 209 | running_cpus = 0; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 210 | CPU_FOREACH(other_cpu) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 211 | if (qatomic_read(&other_cpu->running)) { |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 212 | other_cpu->has_waiter = true; |
| 213 | running_cpus++; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 214 | qemu_cpu_kick(other_cpu); |
| 215 | } |
| 216 | } |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 217 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 218 | qatomic_set(&pending_cpus, running_cpus + 1); |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 219 | while (pending_cpus > 1) { |
| 220 | qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); |
| 221 | } |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 222 | |
| 223 | /* Can release mutex, no one will enter another exclusive |
| 224 | * section until end_exclusive resets pending_cpus to 0. |
| 225 | */ |
| 226 | qemu_mutex_unlock(&qemu_cpu_list_lock); |
Emilio G. Cota | cfbc3c6 | 2018-11-26 17:14:43 -0500 | [diff] [blame] | 227 | |
Ilya Leoshkevich | df8a688 | 2023-02-14 15:08:27 +0100 | [diff] [blame] | 228 | current_cpu->exclusive_context_count = 1; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 229 | } |
| 230 | |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 231 | /* Finish an exclusive operation. */ |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 232 | void end_exclusive(void) |
| 233 | { |
Ilya Leoshkevich | df8a688 | 2023-02-14 15:08:27 +0100 | [diff] [blame] | 234 | current_cpu->exclusive_context_count--; |
| 235 | if (current_cpu->exclusive_context_count) { |
| 236 | return; |
| 237 | } |
Emilio G. Cota | cfbc3c6 | 2018-11-26 17:14:43 -0500 | [diff] [blame] | 238 | |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 239 | qemu_mutex_lock(&qemu_cpu_list_lock); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 240 | qatomic_set(&pending_cpus, 0); |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 241 | qemu_cond_broadcast(&exclusive_resume); |
| 242 | qemu_mutex_unlock(&qemu_cpu_list_lock); |
| 243 | } |
| 244 | |
| 245 | /* Wait for exclusive ops to finish, and begin cpu execution. */ |
| 246 | void cpu_exec_start(CPUState *cpu) |
| 247 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 248 | qatomic_set(&cpu->running, true); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 249 | |
| 250 | /* Write cpu->running before reading pending_cpus. */ |
| 251 | smp_mb(); |
| 252 | |
| 253 | /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1. |
| 254 | * After taking the lock we'll see cpu->has_waiter == true and run---not |
| 255 | * for long because start_exclusive kicked us. cpu_exec_end will |
| 256 | * decrement pending_cpus and signal the waiter. |
| 257 | * |
| 258 | * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1. |
| 259 | * This includes the case when an exclusive item is running now. |
| 260 | * Then we'll see cpu->has_waiter == false and wait for the item to |
| 261 | * complete. |
| 262 | * |
| 263 | * 3. pending_cpus == 0. Then start_exclusive is definitely going to |
| 264 | * see cpu->running == true, and it will kick the CPU. |
| 265 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 266 | if (unlikely(qatomic_read(&pending_cpus))) { |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 267 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 268 | if (!cpu->has_waiter) { |
| 269 | /* Not counted in pending_cpus, let the exclusive item |
| 270 | * run. Since we have the lock, just set cpu->running to true |
| 271 | * while holding it; no need to check pending_cpus again. |
| 272 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 273 | qatomic_set(&cpu->running, false); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 274 | exclusive_idle(); |
| 275 | /* Now pending_cpus is zero. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 276 | qatomic_set(&cpu->running, true); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 277 | } else { |
| 278 | /* Counted in pending_cpus, go ahead and release the |
| 279 | * waiter at cpu_exec_end. |
| 280 | */ |
| 281 | } |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 282 | } |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | /* Mark cpu as not executing, and release pending exclusive ops. */ |
| 286 | void cpu_exec_end(CPUState *cpu) |
| 287 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 288 | qatomic_set(&cpu->running, false); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 289 | |
| 290 | /* Write cpu->running before reading pending_cpus. */ |
| 291 | smp_mb(); |
| 292 | |
| 293 | /* 1. start_exclusive saw cpu->running == true. Then it will increment |
| 294 | * pending_cpus and wait for exclusive_cond. After taking the lock |
| 295 | * we'll see cpu->has_waiter == true. |
| 296 | * |
| 297 | * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1. |
| 298 | * This includes the case when an exclusive item started after setting |
| 299 | * cpu->running to false and before we read pending_cpus. Then we'll see |
| 300 | * cpu->has_waiter == false and not touch pending_cpus. The next call to |
| 301 | * cpu_exec_start will run exclusive_idle if still necessary, thus waiting |
| 302 | * for the item to complete. |
| 303 | * |
| 304 | * 3. pending_cpus == 0. Then start_exclusive is definitely going to |
| 305 | * see cpu->running == false, and it can ignore this CPU until the |
| 306 | * next cpu_exec_start. |
| 307 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 308 | if (unlikely(qatomic_read(&pending_cpus))) { |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 309 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 310 | if (cpu->has_waiter) { |
| 311 | cpu->has_waiter = false; |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 312 | qatomic_set(&pending_cpus, pending_cpus - 1); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 313 | if (pending_cpus == 1) { |
| 314 | qemu_cond_signal(&exclusive_cond); |
| 315 | } |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 316 | } |
| 317 | } |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 318 | } |
| 319 | |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 320 | void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, |
| 321 | run_on_cpu_data data) |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 322 | { |
| 323 | struct qemu_work_item *wi; |
| 324 | |
Markus Armbruster | b21e238 | 2022-03-15 15:41:56 +0100 | [diff] [blame] | 325 | wi = g_new0(struct qemu_work_item, 1); |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 326 | wi->func = func; |
| 327 | wi->data = data; |
| 328 | wi->free = true; |
| 329 | wi->exclusive = true; |
| 330 | |
| 331 | queue_work_on_cpu(cpu, wi); |
| 332 | } |
| 333 | |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 334 | void process_queued_cpu_work(CPUState *cpu) |
| 335 | { |
| 336 | struct qemu_work_item *wi; |
| 337 | |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 338 | qemu_mutex_lock(&cpu->work_mutex); |
| 339 | if (QSIMPLEQ_EMPTY(&cpu->work_list)) { |
| 340 | qemu_mutex_unlock(&cpu->work_mutex); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 341 | return; |
| 342 | } |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 343 | while (!QSIMPLEQ_EMPTY(&cpu->work_list)) { |
| 344 | wi = QSIMPLEQ_FIRST(&cpu->work_list); |
| 345 | QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 346 | qemu_mutex_unlock(&cpu->work_mutex); |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 347 | if (wi->exclusive) { |
| 348 | /* Running work items outside the BQL avoids the following deadlock: |
| 349 | * 1) start_exclusive() is called with the BQL taken while another |
| 350 | * CPU is running; 2) cpu_exec in the other CPU tries to takes the |
| 351 | * BQL, so it goes to sleep; start_exclusive() is sleeping too, so |
| 352 | * neither CPU can proceed. |
| 353 | */ |
| 354 | qemu_mutex_unlock_iothread(); |
| 355 | start_exclusive(); |
| 356 | wi->func(cpu, wi->data); |
| 357 | end_exclusive(); |
| 358 | qemu_mutex_lock_iothread(); |
| 359 | } else { |
| 360 | wi->func(cpu, wi->data); |
| 361 | } |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 362 | qemu_mutex_lock(&cpu->work_mutex); |
| 363 | if (wi->free) { |
| 364 | g_free(wi); |
| 365 | } else { |
Paolo Bonzini | 42abcc5 | 2023-03-03 11:07:04 +0100 | [diff] [blame] | 366 | qatomic_store_release(&wi->done, true); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 367 | } |
| 368 | } |
| 369 | qemu_mutex_unlock(&cpu->work_mutex); |
| 370 | qemu_cond_broadcast(&qemu_work_cond); |
| 371 | } |
Philippe Mathieu-Daudé | 8a8dc26 | 2022-11-24 16:36:49 +0100 | [diff] [blame] | 372 | |
| 373 | /* Add a breakpoint. */ |
| 374 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, |
| 375 | CPUBreakpoint **breakpoint) |
| 376 | { |
| 377 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 378 | CPUBreakpoint *bp; |
| 379 | |
| 380 | if (cc->gdb_adjust_breakpoint) { |
| 381 | pc = cc->gdb_adjust_breakpoint(cpu, pc); |
| 382 | } |
| 383 | |
| 384 | bp = g_malloc(sizeof(*bp)); |
| 385 | |
| 386 | bp->pc = pc; |
| 387 | bp->flags = flags; |
| 388 | |
| 389 | /* keep all GDB-injected breakpoints in front */ |
| 390 | if (flags & BP_GDB) { |
| 391 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); |
| 392 | } else { |
| 393 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); |
| 394 | } |
| 395 | |
| 396 | if (breakpoint) { |
| 397 | *breakpoint = bp; |
| 398 | } |
| 399 | |
| 400 | trace_breakpoint_insert(cpu->cpu_index, pc, flags); |
| 401 | return 0; |
| 402 | } |
| 403 | |
| 404 | /* Remove a specific breakpoint. */ |
| 405 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) |
| 406 | { |
| 407 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 408 | CPUBreakpoint *bp; |
| 409 | |
| 410 | if (cc->gdb_adjust_breakpoint) { |
| 411 | pc = cc->gdb_adjust_breakpoint(cpu, pc); |
| 412 | } |
| 413 | |
| 414 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
| 415 | if (bp->pc == pc && bp->flags == flags) { |
| 416 | cpu_breakpoint_remove_by_ref(cpu, bp); |
| 417 | return 0; |
| 418 | } |
| 419 | } |
| 420 | return -ENOENT; |
| 421 | } |
| 422 | |
| 423 | /* Remove a specific breakpoint by reference. */ |
| 424 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp) |
| 425 | { |
| 426 | QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); |
| 427 | |
| 428 | trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); |
| 429 | g_free(bp); |
| 430 | } |
| 431 | |
| 432 | /* Remove all matching breakpoints. */ |
| 433 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) |
| 434 | { |
| 435 | CPUBreakpoint *bp, *next; |
| 436 | |
| 437 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { |
| 438 | if (bp->flags & mask) { |
| 439 | cpu_breakpoint_remove_by_ref(cpu, bp); |
| 440 | } |
| 441 | } |
| 442 | } |