bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1 | /* |
陳韋任 | e965fc3 | 2012-02-06 14:02:55 +0800 | [diff] [blame] | 2 | * emulator main execution loop |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | 66321a1 | 2005-04-06 20:47:48 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 5 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Thomas Huth | fb0343d | 2019-01-23 15:08:56 +0100 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 10 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 15 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 18 | */ |
Markus Armbruster | a8d2532 | 2019-05-23 16:35:08 +0200 | [diff] [blame] | 19 | |
Peter Maydell | 7b31bbc | 2016-01-26 18:16:56 +0000 | [diff] [blame] | 20 | #include "qemu/osdep.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 21 | #include "qemu/qemu-print.h" |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 22 | #include "qapi/error.h" |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 23 | #include "qapi/type-helpers.h" |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 24 | #include "hw/core/tcg-cpu-ops.h" |
Yang Zhong | d9bb58e | 2017-06-02 14:06:44 +0800 | [diff] [blame] | 25 | #include "trace.h" |
Paolo Bonzini | 76cad71 | 2012-10-24 11:12:21 +0200 | [diff] [blame] | 26 | #include "disas/disas.h" |
Paolo Bonzini | 63c9155 | 2016-03-15 13:18:37 +0100 | [diff] [blame] | 27 | #include "exec/exec-all.h" |
Philippe Mathieu-Daudé | dcb32f1 | 2020-01-01 12:23:00 +0100 | [diff] [blame] | 28 | #include "tcg/tcg.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 29 | #include "qemu/atomic.h" |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 30 | #include "qemu/rcu.h" |
Paolo Bonzini | 508127e | 2016-01-07 16:55:28 +0300 | [diff] [blame] | 31 | #include "exec/log.h" |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 32 | #include "qemu/main-loop.h" |
Paolo Bonzini | d2528bd | 2017-03-03 12:01:16 +0100 | [diff] [blame] | 33 | #include "sysemu/cpus.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 34 | #include "exec/cpu-all.h" |
| 35 | #include "sysemu/cpu-timers.h" |
Philippe Mathieu-Daudé | 5b5968c | 2022-12-19 18:09:43 +0100 | [diff] [blame] | 36 | #include "exec/replay-core.h" |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 37 | #include "sysemu/tcg.h" |
Philippe Mathieu-Daudé | a3e7f70 | 2023-06-11 10:58:22 +0200 | [diff] [blame] | 38 | #include "exec/helper-proto-common.h" |
Richard Henderson | a976a99 | 2022-08-15 15:13:05 -0500 | [diff] [blame] | 39 | #include "tb-jmp-cache.h" |
Philippe Mathieu-Daudé | e5ceadf | 2021-05-24 19:04:53 +0200 | [diff] [blame] | 40 | #include "tb-hash.h" |
Philippe Mathieu-Daudé | e5ceadf | 2021-05-24 19:04:53 +0200 | [diff] [blame] | 41 | #include "tb-context.h" |
Philippe Mathieu-Daudé | 5934660 | 2023-09-14 20:57:15 +0200 | [diff] [blame] | 42 | #include "internal-common.h" |
Philippe Mathieu-Daudé | 4c268d6 | 2023-09-14 20:57:14 +0200 | [diff] [blame] | 43 | #include "internal-target.h" |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 44 | |
| 45 | /* -icount align implementation. */ |
| 46 | |
| 47 | typedef struct SyncClocks { |
| 48 | int64_t diff_clk; |
| 49 | int64_t last_cpu_icount; |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 50 | int64_t realtime_clock; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 51 | } SyncClocks; |
| 52 | |
| 53 | #if !defined(CONFIG_USER_ONLY) |
| 54 | /* Allow the guest to have a max 3ms advance. |
| 55 | * The difference between the 2 clocks could therefore |
| 56 | * oscillate around 0. |
| 57 | */ |
| 58 | #define VM_CLOCK_ADVANCE 3000000 |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 59 | #define THRESHOLD_REDUCE 1.5 |
| 60 | #define MAX_DELAY_PRINT_RATE 2000000000LL |
| 61 | #define MAX_NB_PRINTS 100 |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 62 | |
Philippe Mathieu-Daudé | 00c9a5c | 2022-12-19 18:09:40 +0100 | [diff] [blame] | 63 | int64_t max_delay; |
| 64 | int64_t max_advance; |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 65 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 66 | static void align_clocks(SyncClocks *sc, CPUState *cpu) |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 67 | { |
| 68 | int64_t cpu_icount; |
| 69 | |
| 70 | if (!icount_align_option) { |
| 71 | return; |
| 72 | } |
| 73 | |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 74 | cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low; |
Claudio Fontana | 8191d36 | 2020-08-31 16:18:34 +0200 | [diff] [blame] | 75 | sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 76 | sc->last_cpu_icount = cpu_icount; |
| 77 | |
| 78 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { |
| 79 | #ifndef _WIN32 |
| 80 | struct timespec sleep_delay, rem_delay; |
| 81 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; |
| 82 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; |
| 83 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { |
Paolo Bonzini | a498d0e | 2015-01-28 10:09:55 +0100 | [diff] [blame] | 84 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 85 | } else { |
| 86 | sc->diff_clk = 0; |
| 87 | } |
| 88 | #else |
| 89 | Sleep(sc->diff_clk / SCALE_MS); |
| 90 | sc->diff_clk = 0; |
| 91 | #endif |
| 92 | } |
| 93 | } |
| 94 | |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 95 | static void print_delay(const SyncClocks *sc) |
| 96 | { |
| 97 | static float threshold_delay; |
| 98 | static int64_t last_realtime_clock; |
| 99 | static int nb_prints; |
| 100 | |
| 101 | if (icount_align_option && |
| 102 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && |
| 103 | nb_prints < MAX_NB_PRINTS) { |
| 104 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || |
| 105 | (-sc->diff_clk / (float)1000000000LL < |
| 106 | (threshold_delay - THRESHOLD_REDUCE))) { |
| 107 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 108 | qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", |
| 109 | threshold_delay - 1, |
| 110 | threshold_delay); |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 111 | nb_prints++; |
| 112 | last_realtime_clock = sc->realtime_clock; |
| 113 | } |
| 114 | } |
| 115 | } |
| 116 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 117 | static void init_delay_params(SyncClocks *sc, CPUState *cpu) |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 118 | { |
| 119 | if (!icount_align_option) { |
| 120 | return; |
| 121 | } |
Paolo Bonzini | 2e91cc6 | 2015-01-28 10:16:37 +0100 | [diff] [blame] | 122 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
| 123 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 124 | sc->last_cpu_icount |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 125 | = cpu->icount_extra + cpu->neg.icount_decr.u16.low; |
Sebastian Tanase | 27498be | 2014-07-25 11:56:33 +0200 | [diff] [blame] | 126 | if (sc->diff_clk < max_delay) { |
| 127 | max_delay = sc->diff_clk; |
| 128 | } |
| 129 | if (sc->diff_clk > max_advance) { |
| 130 | max_advance = sc->diff_clk; |
| 131 | } |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 132 | |
| 133 | /* Print every 2s max if the guest is late. We limit the number |
| 134 | of printed messages to NB_PRINT_MAX(currently 100) */ |
| 135 | print_delay(sc); |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 136 | } |
| 137 | #else |
| 138 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) |
| 139 | { |
| 140 | } |
| 141 | |
| 142 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) |
| 143 | { |
| 144 | } |
| 145 | #endif /* CONFIG USER ONLY */ |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 146 | |
Philippe Mathieu-Daudé | b254c34 | 2024-01-10 18:09:56 +0100 | [diff] [blame] | 147 | bool tcg_cflags_has(CPUState *cpu, uint32_t flags) |
| 148 | { |
| 149 | return cpu->tcg_cflags & flags; |
| 150 | } |
| 151 | |
| 152 | void tcg_cflags_set(CPUState *cpu, uint32_t flags) |
| 153 | { |
| 154 | cpu->tcg_cflags |= flags; |
| 155 | } |
| 156 | |
Richard Henderson | 043e35d | 2021-07-17 15:18:40 -0700 | [diff] [blame] | 157 | uint32_t curr_cflags(CPUState *cpu) |
| 158 | { |
Richard Henderson | 84f15616 | 2021-07-17 15:18:41 -0700 | [diff] [blame] | 159 | uint32_t cflags = cpu->tcg_cflags; |
| 160 | |
Richard Henderson | 04f5b64 | 2021-07-17 15:18:43 -0700 | [diff] [blame] | 161 | /* |
Richard Henderson | c2ffd75 | 2021-07-19 10:43:46 -1000 | [diff] [blame] | 162 | * Record gdb single-step. We should be exiting the TB by raising |
| 163 | * EXCP_DEBUG, but to simplify other tests, disable chaining too. |
| 164 | * |
Richard Henderson | 04f5b64 | 2021-07-17 15:18:43 -0700 | [diff] [blame] | 165 | * For singlestep and -d nochain, suppress goto_tb so that |
| 166 | * we can log -d cpu,exec after every TB. |
| 167 | */ |
Richard Henderson | c2ffd75 | 2021-07-19 10:43:46 -1000 | [diff] [blame] | 168 | if (unlikely(cpu->singlestep_enabled)) { |
| 169 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; |
Peter Maydell | 0e33928 | 2023-04-17 17:40:34 +0100 | [diff] [blame] | 170 | } else if (qatomic_read(&one_insn_per_tb)) { |
Richard Henderson | 04f5b64 | 2021-07-17 15:18:43 -0700 | [diff] [blame] | 171 | cflags |= CF_NO_GOTO_TB | 1; |
| 172 | } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { |
Richard Henderson | fb95701 | 2021-07-17 15:18:42 -0700 | [diff] [blame] | 173 | cflags |= CF_NO_GOTO_TB; |
Richard Henderson | 84f15616 | 2021-07-17 15:18:41 -0700 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | return cflags; |
Richard Henderson | 043e35d | 2021-07-17 15:18:40 -0700 | [diff] [blame] | 177 | } |
| 178 | |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 179 | struct tb_desc { |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 180 | vaddr pc; |
| 181 | uint64_t cs_base; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 182 | CPUArchState *env; |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame] | 183 | tb_page_addr_t page_addr0; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 184 | uint32_t flags; |
| 185 | uint32_t cflags; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 186 | }; |
| 187 | |
| 188 | static bool tb_lookup_cmp(const void *p, const void *d) |
| 189 | { |
| 190 | const TranslationBlock *tb = p; |
| 191 | const struct tb_desc *desc = d; |
| 192 | |
Anton Johansson | 279513c | 2023-02-27 14:51:47 +0100 | [diff] [blame] | 193 | if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && |
Richard Henderson | 28905cf | 2022-09-20 13:21:40 +0200 | [diff] [blame] | 194 | tb_page_addr0(tb) == desc->page_addr0 && |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 195 | tb->cs_base == desc->cs_base && |
| 196 | tb->flags == desc->flags && |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 197 | tb_cflags(tb) == desc->cflags) { |
| 198 | /* check next page if needed */ |
Richard Henderson | 28905cf | 2022-09-20 13:21:40 +0200 | [diff] [blame] | 199 | tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); |
| 200 | if (tb_phys_page1 == -1) { |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 201 | return true; |
| 202 | } else { |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame] | 203 | tb_page_addr_t phys_page1; |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 204 | vaddr virt_page1; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 205 | |
Richard Henderson | 9867b30 | 2022-08-22 18:50:46 -0700 | [diff] [blame] | 206 | /* |
| 207 | * We know that the first page matched, and an otherwise valid TB |
| 208 | * encountered an incomplete instruction at the end of that page, |
| 209 | * therefore we know that generating a new TB from the current PC |
| 210 | * must also require reading from the next page -- even if the |
| 211 | * second pages do not match, and therefore the resulting insn |
| 212 | * is different for the new TB. Therefore any exception raised |
| 213 | * here by the faulting lookup is not premature. |
| 214 | */ |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame] | 215 | virt_page1 = TARGET_PAGE_ALIGN(desc->pc); |
| 216 | phys_page1 = get_page_addr_code(desc->env, virt_page1); |
Richard Henderson | 28905cf | 2022-09-20 13:21:40 +0200 | [diff] [blame] | 217 | if (tb_phys_page1 == phys_page1) { |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 218 | return true; |
| 219 | } |
| 220 | } |
| 221 | } |
| 222 | return false; |
| 223 | } |
| 224 | |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 225 | static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc, |
| 226 | uint64_t cs_base, uint32_t flags, |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 227 | uint32_t cflags) |
| 228 | { |
| 229 | tb_page_addr_t phys_pc; |
| 230 | struct tb_desc desc; |
| 231 | uint32_t h; |
| 232 | |
Richard Henderson | b77af26 | 2023-09-13 17:22:49 -0700 | [diff] [blame] | 233 | desc.env = cpu_env(cpu); |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 234 | desc.cs_base = cs_base; |
| 235 | desc.flags = flags; |
| 236 | desc.cflags = cflags; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 237 | desc.pc = pc; |
| 238 | phys_pc = get_page_addr_code(desc.env, pc); |
| 239 | if (phys_pc == -1) { |
| 240 | return NULL; |
| 241 | } |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame] | 242 | desc.page_addr0 = phys_pc; |
Anton Johansson | 4be7902 | 2023-02-27 14:51:39 +0100 | [diff] [blame] | 243 | h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), |
Alex Bennée | 367189e | 2023-05-26 17:54:01 +0100 | [diff] [blame] | 244 | flags, cs_base, cflags); |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 245 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); |
| 246 | } |
| 247 | |
Richard Henderson | 632cb63 | 2021-06-29 12:31:19 -0700 | [diff] [blame] | 248 | /* Might cause an exception, so have a longjmp destination ready */ |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 249 | static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, |
| 250 | uint64_t cs_base, uint32_t flags, |
| 251 | uint32_t cflags) |
Richard Henderson | 632cb63 | 2021-06-29 12:31:19 -0700 | [diff] [blame] | 252 | { |
| 253 | TranslationBlock *tb; |
Richard Henderson | 8ed558e | 2022-08-12 09:53:53 -0700 | [diff] [blame] | 254 | CPUJumpCache *jc; |
Richard Henderson | 632cb63 | 2021-06-29 12:31:19 -0700 | [diff] [blame] | 255 | uint32_t hash; |
| 256 | |
| 257 | /* we should never be trying to look up an INVALID tb */ |
| 258 | tcg_debug_assert(!(cflags & CF_INVALID)); |
| 259 | |
| 260 | hash = tb_jmp_cache_hash_func(pc); |
Richard Henderson | 8ed558e | 2022-08-12 09:53:53 -0700 | [diff] [blame] | 261 | jc = cpu->tb_jmp_cache; |
Richard Henderson | 632cb63 | 2021-06-29 12:31:19 -0700 | [diff] [blame] | 262 | |
Paolo Bonzini | d157e54 | 2024-01-22 16:34:09 +0100 | [diff] [blame] | 263 | tb = qatomic_read(&jc->array[hash].tb); |
| 264 | if (likely(tb && |
| 265 | jc->array[hash].pc == pc && |
| 266 | tb->cs_base == cs_base && |
| 267 | tb->flags == flags && |
| 268 | tb_cflags(tb) == cflags)) { |
| 269 | goto hit; |
Richard Henderson | 632cb63 | 2021-06-29 12:31:19 -0700 | [diff] [blame] | 270 | } |
Anton Johansson | 2dd5b7a | 2023-02-27 14:51:46 +0100 | [diff] [blame] | 271 | |
Paolo Bonzini | d157e54 | 2024-01-22 16:34:09 +0100 | [diff] [blame] | 272 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); |
| 273 | if (tb == NULL) { |
| 274 | return NULL; |
| 275 | } |
| 276 | |
| 277 | jc->array[hash].pc = pc; |
| 278 | qatomic_set(&jc->array[hash].tb, tb); |
| 279 | |
| 280 | hit: |
| 281 | /* |
| 282 | * As long as tb is not NULL, the contents are consistent. Therefore, |
| 283 | * the virtual PC has to match for non-CF_PCREL translations. |
| 284 | */ |
| 285 | assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc); |
Richard Henderson | 632cb63 | 2021-06-29 12:31:19 -0700 | [diff] [blame] | 286 | return tb; |
| 287 | } |
| 288 | |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 289 | static void log_cpu_exec(vaddr pc, CPUState *cpu, |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 290 | const TranslationBlock *tb) |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 291 | { |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 292 | if (qemu_log_in_addr_range(pc)) { |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 293 | qemu_log_mask(CPU_LOG_EXEC, |
Richard Henderson | 85314e1 | 2023-04-01 15:28:18 -0700 | [diff] [blame] | 294 | "Trace %d: %p [%08" PRIx64 |
Peter Maydell | e60a7d0 | 2023-07-17 11:05:08 +0100 | [diff] [blame] | 295 | "/%016" VADDR_PRIx "/%08x/%08x] %s\n", |
Richard Henderson | 7eabad3 | 2021-06-30 08:31:46 -0700 | [diff] [blame] | 296 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, |
| 297 | tb->flags, tb->cflags, lookup_symbol(pc)); |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 298 | |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 299 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { |
Richard Henderson | c60f599 | 2022-04-17 11:29:47 -0700 | [diff] [blame] | 300 | FILE *logfile = qemu_log_trylock(); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 301 | if (logfile) { |
| 302 | int flags = 0; |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 303 | |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 304 | if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { |
| 305 | flags |= CPU_DUMP_FPU; |
| 306 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 307 | #if defined(TARGET_I386) |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 308 | flags |= CPU_DUMP_CCOP; |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 309 | #endif |
Ivan Klokov | b84694d | 2023-04-10 15:44:50 +0300 | [diff] [blame] | 310 | if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) { |
| 311 | flags |= CPU_DUMP_VPU; |
| 312 | } |
Richard Henderson | c769fbd | 2022-04-17 11:29:54 -0700 | [diff] [blame] | 313 | cpu_dump_state(cpu, logfile, flags); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 314 | qemu_log_unlock(logfile); |
| 315 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 316 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 317 | } |
| 318 | } |
| 319 | |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 320 | static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc, |
Leandro Lupori | 69993c4 | 2022-10-25 17:24:22 -0300 | [diff] [blame] | 321 | uint32_t *cflags) |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 322 | { |
| 323 | CPUBreakpoint *bp; |
| 324 | bool match_page = false; |
| 325 | |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 326 | /* |
| 327 | * Singlestep overrides breakpoints. |
| 328 | * This requirement is visible in the record-replay tests, where |
| 329 | * we would fail to make forward progress in reverse-continue. |
| 330 | * |
| 331 | * TODO: gdb singlestep should only override gdb breakpoints, |
| 332 | * so that one could (gdb) singlestep into the guest kernel's |
| 333 | * architectural breakpoint handler. |
| 334 | */ |
| 335 | if (cpu->singlestep_enabled) { |
| 336 | return false; |
| 337 | } |
| 338 | |
| 339 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
| 340 | /* |
| 341 | * If we have an exact pc match, trigger the breakpoint. |
| 342 | * Otherwise, note matches within the page. |
| 343 | */ |
| 344 | if (pc == bp->pc) { |
| 345 | bool match_bp = false; |
| 346 | |
| 347 | if (bp->flags & BP_GDB) { |
| 348 | match_bp = true; |
| 349 | } else if (bp->flags & BP_CPU) { |
| 350 | #ifdef CONFIG_USER_ONLY |
| 351 | g_assert_not_reached(); |
| 352 | #else |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 353 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
| 354 | assert(tcg_ops->debug_check_breakpoint); |
| 355 | match_bp = tcg_ops->debug_check_breakpoint(cpu); |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 356 | #endif |
| 357 | } |
| 358 | |
| 359 | if (match_bp) { |
| 360 | cpu->exception_index = EXCP_DEBUG; |
| 361 | return true; |
| 362 | } |
| 363 | } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { |
| 364 | match_page = true; |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | /* |
| 369 | * Within the same page as a breakpoint, single-step, |
| 370 | * returning to helper_lookup_tb_ptr after each insn looking |
| 371 | * for the actual breakpoint. |
| 372 | * |
| 373 | * TODO: Perhaps better to record all of the TBs associated |
| 374 | * with a given virtual page that contains a breakpoint, and |
| 375 | * then invalidate them when a new overlapping breakpoint is |
| 376 | * set on the page. Non-overlapping TBs would not be |
| 377 | * invalidated, nor would any TB need to be invalidated as |
| 378 | * breakpoints are removed. |
| 379 | */ |
| 380 | if (match_page) { |
Richard Henderson | d828b92 | 2024-03-21 16:54:11 -1000 | [diff] [blame] | 381 | *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | CF_BP_PAGE | 1; |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 382 | } |
| 383 | return false; |
| 384 | } |
| 385 | |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 386 | static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc, |
Leandro Lupori | 69993c4 | 2022-10-25 17:24:22 -0300 | [diff] [blame] | 387 | uint32_t *cflags) |
| 388 | { |
| 389 | return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && |
| 390 | check_for_breakpoints_slow(cpu, pc, cflags); |
| 391 | } |
| 392 | |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 393 | /** |
| 394 | * helper_lookup_tb_ptr: quick check for next tb |
| 395 | * @env: current cpu state |
| 396 | * |
| 397 | * Look for an existing TB matching the current cpu state. |
| 398 | * If found, return the code pointer. If not found, return |
| 399 | * the tcg epilogue so that we return into cpu_tb_exec. |
| 400 | */ |
| 401 | const void *HELPER(lookup_tb_ptr)(CPUArchState *env) |
| 402 | { |
| 403 | CPUState *cpu = env_cpu(env); |
| 404 | TranslationBlock *tb; |
Anton Johansson | bb5de52 | 2023-06-21 15:56:24 +0200 | [diff] [blame] | 405 | vaddr pc; |
| 406 | uint64_t cs_base; |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 407 | uint32_t flags, cflags; |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 408 | |
Peter Maydell | 62bcba8 | 2024-02-19 17:31:51 +0000 | [diff] [blame] | 409 | /* |
| 410 | * By definition we've just finished a TB, so I/O is OK. |
| 411 | * Avoid the possibility of calling cpu_io_recompile() if |
| 412 | * a page table walk triggered by tb_lookup() calling |
| 413 | * probe_access_internal() happens to touch an MMIO device. |
| 414 | * The next TB, if we chain to it, will clear the flag again. |
| 415 | */ |
| 416 | cpu->neg.can_do_io = true; |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 417 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
| 418 | |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 419 | cflags = curr_cflags(cpu); |
| 420 | if (check_for_breakpoints(cpu, pc, &cflags)) { |
| 421 | cpu_loop_exit(cpu); |
| 422 | } |
| 423 | |
| 424 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 425 | if (tb == NULL) { |
| 426 | return tcg_code_gen_epilogue; |
| 427 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 428 | |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 429 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
| 430 | log_cpu_exec(pc, cpu, tb); |
| 431 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 432 | |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 433 | return tb->tc.ptr; |
| 434 | } |
| 435 | |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 436 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
Daniele Buono | c905a36 | 2020-12-04 18:06:12 -0500 | [diff] [blame] | 437 | /* |
| 438 | * Disable CFI checks. |
| 439 | * TCG creates binary blobs at runtime, with the transformed code. |
| 440 | * A TB is a blob of binary code, created at runtime and called with an |
| 441 | * indirect function call. Since such function did not exist at compile time, |
| 442 | * the CFI runtime has no way to verify its signature and would fail. |
| 443 | * TCG is not considered a security-sensitive part of QEMU so this does not |
| 444 | * affect the impact of CFI in environment with high security requirements |
| 445 | */ |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 446 | static inline TranslationBlock * QEMU_DISABLE_CFI |
| 447 | cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 448 | { |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 449 | uintptr_t ret; |
| 450 | TranslationBlock *last_tb; |
Richard Henderson | db0c51a | 2020-10-28 12:05:44 -0700 | [diff] [blame] | 451 | const void *tb_ptr = itb->tc.ptr; |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 452 | |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 453 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
| 454 | log_cpu_exec(log_pc(cpu, itb), cpu, itb); |
| 455 | } |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 456 | |
Roman Bolshakov | 653b87e | 2021-01-13 06:28:07 +0300 | [diff] [blame] | 457 | qemu_thread_jit_execute(); |
Philippe Mathieu-Daudé | 94956d7 | 2024-01-29 17:44:46 +0100 | [diff] [blame] | 458 | ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr); |
Richard Henderson | 464dacf | 2023-09-15 15:41:39 -0700 | [diff] [blame] | 459 | cpu->neg.can_do_io = true; |
Richard Henderson | e04660a | 2023-03-15 17:43:10 +0000 | [diff] [blame] | 460 | qemu_plugin_disable_mem_helpers(cpu); |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 461 | /* |
| 462 | * TODO: Delay swapping back to the read-write region of the TB |
| 463 | * until we actually need to modify the TB. The read-only copy, |
| 464 | * coming from the rx region, shares the same host TLB entry as |
| 465 | * the code that executed the exit_tb opcode that arrived here. |
| 466 | * If we insist on touching both the RX and the RW pages, we |
| 467 | * double the host TLB pressure. |
| 468 | */ |
| 469 | last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); |
| 470 | *tb_exit = ret & TB_EXIT_MASK; |
Alex Bennée | 6db8b53 | 2014-08-01 17:08:57 +0100 | [diff] [blame] | 471 | |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 472 | trace_exec_tb_exit(last_tb, *tb_exit); |
| 473 | |
| 474 | if (*tb_exit > TB_EXIT_IDX1) { |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 475 | /* We didn't start executing this TB (eg because the instruction |
| 476 | * counter hit zero); we must restore the guest PC to the address |
| 477 | * of the start of the TB. |
| 478 | */ |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 479 | CPUClass *cc = cpu->cc; |
| 480 | const TCGCPUOps *tcg_ops = cc->tcg_ops; |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 481 | |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 482 | if (tcg_ops->synchronize_from_tb) { |
| 483 | tcg_ops->synchronize_from_tb(cpu, last_tb); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 484 | } else { |
Anton Johansson | 4be7902 | 2023-02-27 14:51:39 +0100 | [diff] [blame] | 485 | tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 486 | assert(cc->set_pc); |
Anton Johansson | 279513c | 2023-02-27 14:51:47 +0100 | [diff] [blame] | 487 | cc->set_pc(cpu, last_tb->pc); |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 488 | } |
| 489 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 490 | vaddr pc = log_pc(cpu, last_tb); |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 491 | if (qemu_log_in_addr_range(pc)) { |
Peter Maydell | e60a7d0 | 2023-07-17 11:05:08 +0100 | [diff] [blame] | 492 | qemu_log("Stopped execution of TB chain before %p [%016" |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 493 | VADDR_PRIx "] %s\n", |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 494 | last_tb->tc.ptr, pc, lookup_symbol(pc)); |
| 495 | } |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 496 | } |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 497 | } |
Richard Henderson | c9460d7 | 2021-07-18 15:12:12 -1000 | [diff] [blame] | 498 | |
| 499 | /* |
| 500 | * If gdb single-step, and we haven't raised another exception, |
| 501 | * raise a debug exception. Single-step with another exception |
| 502 | * is handled in cpu_handle_exception. |
| 503 | */ |
| 504 | if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { |
| 505 | cpu->exception_index = EXCP_DEBUG; |
| 506 | cpu_loop_exit(cpu); |
| 507 | } |
| 508 | |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 509 | return last_tb; |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 510 | } |
| 511 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 512 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 513 | static void cpu_exec_enter(CPUState *cpu) |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 514 | { |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 515 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 516 | |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 517 | if (tcg_ops->cpu_exec_enter) { |
| 518 | tcg_ops->cpu_exec_enter(cpu); |
Eduardo Habkost | 80c4750 | 2020-12-12 16:55:17 +0100 | [diff] [blame] | 519 | } |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | static void cpu_exec_exit(CPUState *cpu) |
| 523 | { |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 524 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 525 | |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 526 | if (tcg_ops->cpu_exec_exit) { |
| 527 | tcg_ops->cpu_exec_exit(cpu); |
Eduardo Habkost | 80c4750 | 2020-12-12 16:55:17 +0100 | [diff] [blame] | 528 | } |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 529 | } |
| 530 | |
Richard Henderson | cb62bd1 | 2023-07-06 08:45:13 +0100 | [diff] [blame] | 531 | static void cpu_exec_longjmp_cleanup(CPUState *cpu) |
| 532 | { |
| 533 | /* Non-buggy compilers preserve this; assert the correct value. */ |
| 534 | g_assert(cpu == current_cpu); |
| 535 | |
| 536 | #ifdef CONFIG_USER_ONLY |
| 537 | clear_helper_retaddr(); |
| 538 | if (have_mmap_lock()) { |
| 539 | mmap_unlock(); |
| 540 | } |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 541 | #else |
| 542 | /* |
| 543 | * For softmmu, a tlb_fill fault during translation will land here, |
| 544 | * and we need to release any page locks held. In system mode we |
| 545 | * have one tcg_ctx per thread, so we know it was this cpu doing |
| 546 | * the translation. |
| 547 | * |
| 548 | * Alternative 1: Install a cleanup to be called via an exception |
| 549 | * handling safe longjmp. It seems plausible that all our hosts |
| 550 | * support such a thing. We'd have to properly register unwind info |
| 551 | * for the JIT for EH, rather that just for GDB. |
| 552 | * |
| 553 | * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to |
| 554 | * capture the cpu_loop_exit longjmp, perform the cleanup, and |
| 555 | * jump again to arrive here. |
| 556 | */ |
| 557 | if (tcg_ctx->gen_tb) { |
| 558 | tb_unlock_pages(tcg_ctx->gen_tb); |
| 559 | tcg_ctx->gen_tb = NULL; |
| 560 | } |
Richard Henderson | cb62bd1 | 2023-07-06 08:45:13 +0100 | [diff] [blame] | 561 | #endif |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 562 | if (bql_locked()) { |
| 563 | bql_unlock(); |
Richard Henderson | cb62bd1 | 2023-07-06 08:45:13 +0100 | [diff] [blame] | 564 | } |
| 565 | assert_no_pages_locked(); |
| 566 | } |
| 567 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 568 | void cpu_exec_step_atomic(CPUState *cpu) |
| 569 | { |
Richard Henderson | b77af26 | 2023-09-13 17:22:49 -0700 | [diff] [blame] | 570 | CPUArchState *env = cpu_env(cpu); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 571 | TranslationBlock *tb; |
Anton Johansson | bb5de52 | 2023-06-21 15:56:24 +0200 | [diff] [blame] | 572 | vaddr pc; |
| 573 | uint64_t cs_base; |
Richard Henderson | 258afb4 | 2021-07-17 15:18:44 -0700 | [diff] [blame] | 574 | uint32_t flags, cflags; |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 575 | int tb_exit; |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 576 | |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 577 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 578 | start_exclusive(); |
Douglas Crosher | bfff072 | 2020-09-22 17:42:41 +1000 | [diff] [blame] | 579 | g_assert(cpu == current_cpu); |
| 580 | g_assert(!cpu->running); |
| 581 | cpu->running = true; |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 582 | |
Alex Bennée | 6f04cb1 | 2021-02-24 16:58:07 +0000 | [diff] [blame] | 583 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
Alex Bennée | 6f04cb1 | 2021-02-24 16:58:07 +0000 | [diff] [blame] | 584 | |
Richard Henderson | 258afb4 | 2021-07-17 15:18:44 -0700 | [diff] [blame] | 585 | cflags = curr_cflags(cpu); |
| 586 | /* Execute in a serial context. */ |
| 587 | cflags &= ~CF_PARALLEL; |
| 588 | /* After 1 insn, return and release the exclusive lock. */ |
| 589 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 590 | /* |
| 591 | * No need to check_for_breakpoints here. |
| 592 | * We only arrive in cpu_exec_step_atomic after beginning execution |
| 593 | * of an insn that includes an atomic operation we can't handle. |
| 594 | * Any breakpoint for this insn will have been recognized earlier. |
| 595 | */ |
Richard Henderson | 258afb4 | 2021-07-17 15:18:44 -0700 | [diff] [blame] | 596 | |
| 597 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 598 | if (tb == NULL) { |
| 599 | mmap_lock(); |
Emilio G. Cota | 95590e2 | 2017-08-01 15:40:16 -0400 | [diff] [blame] | 600 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 601 | mmap_unlock(); |
| 602 | } |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 603 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 604 | cpu_exec_enter(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 605 | /* execute the generated code */ |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 606 | trace_exec_tb(tb, pc); |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 607 | cpu_tb_exec(cpu, tb, &tb_exit); |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 608 | cpu_exec_exit(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 609 | } else { |
Richard Henderson | cb62bd1 | 2023-07-06 08:45:13 +0100 | [diff] [blame] | 610 | cpu_exec_longjmp_cleanup(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 611 | } |
Peter Maydell | 426eeec | 2017-11-02 16:35:36 +0000 | [diff] [blame] | 612 | |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 613 | /* |
| 614 | * As we start the exclusive region before codegen we must still |
| 615 | * be in the region if we longjump out of either the codegen or |
| 616 | * the execution. |
| 617 | */ |
| 618 | g_assert(cpu_in_exclusive_context(cpu)); |
Douglas Crosher | bfff072 | 2020-09-22 17:42:41 +1000 | [diff] [blame] | 619 | cpu->running = false; |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 620 | end_exclusive(); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 621 | } |
| 622 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 623 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) |
| 624 | { |
Richard Henderson | 2fd2e78 | 2022-12-05 16:55:40 -0600 | [diff] [blame] | 625 | /* |
| 626 | * Get the rx view of the structure, from which we find the |
| 627 | * executable code address, and tb_target_set_jmp_target can |
| 628 | * produce a pc-relative displacement to jmp_target_addr[n]. |
| 629 | */ |
| 630 | const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb); |
| 631 | uintptr_t offset = tb->jmp_insn_offset[n]; |
| 632 | uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset; |
| 633 | uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; |
| 634 | |
Richard Henderson | 9da6079 | 2022-11-26 18:54:23 -0800 | [diff] [blame] | 635 | tb->jmp_target_addr[n] = addr; |
Richard Henderson | 2fd2e78 | 2022-12-05 16:55:40 -0600 | [diff] [blame] | 636 | tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw); |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 637 | } |
| 638 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 639 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
| 640 | TranslationBlock *tb_next) |
| 641 | { |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 642 | uintptr_t old; |
| 643 | |
Roman Bolshakov | 653b87e | 2021-01-13 06:28:07 +0300 | [diff] [blame] | 644 | qemu_thread_jit_write(); |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 645 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 646 | qemu_spin_lock(&tb_next->jmp_lock); |
| 647 | |
| 648 | /* make sure the destination TB is valid */ |
| 649 | if (tb_next->cflags & CF_INVALID) { |
| 650 | goto out_unlock_next; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 651 | } |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 652 | /* Atomically claim the jump destination slot only if it was NULL */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 653 | old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, |
| 654 | (uintptr_t)tb_next); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 655 | if (old) { |
| 656 | goto out_unlock_next; |
| 657 | } |
| 658 | |
| 659 | /* patch the native jump address */ |
| 660 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); |
| 661 | |
| 662 | /* add in TB jmp list */ |
| 663 | tb->jmp_list_next[n] = tb_next->jmp_list_head; |
| 664 | tb_next->jmp_list_head = (uintptr_t)tb | n; |
| 665 | |
| 666 | qemu_spin_unlock(&tb_next->jmp_lock); |
| 667 | |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 668 | qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n", |
| 669 | tb->tc.ptr, n, tb_next->tc.ptr); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 670 | return; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 671 | |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 672 | out_unlock_next: |
| 673 | qemu_spin_unlock(&tb_next->jmp_lock); |
| 674 | return; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 675 | } |
| 676 | |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 677 | static inline bool cpu_handle_halt(CPUState *cpu) |
| 678 | { |
Philippe Mathieu-Daudé | 0596fa1 | 2021-09-12 19:27:02 +0200 | [diff] [blame] | 679 | #ifndef CONFIG_USER_ONLY |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 680 | if (cpu->halted) { |
Philippe Mathieu-Daudé | aa6fb65 | 2024-01-24 11:16:38 +0100 | [diff] [blame] | 681 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
Peter Maydell | 0487c63 | 2024-07-04 16:57:10 +0100 | [diff] [blame] | 682 | bool leave_halt = tcg_ops->cpu_exec_halt(cpu); |
Philippe Mathieu-Daudé | aa6fb65 | 2024-01-24 11:16:38 +0100 | [diff] [blame] | 683 | |
Peter Maydell | 408b2b3 | 2024-04-30 15:00:34 +0100 | [diff] [blame] | 684 | if (!leave_halt) { |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 685 | return true; |
| 686 | } |
| 687 | |
| 688 | cpu->halted = 0; |
| 689 | } |
Philippe Mathieu-Daudé | 0596fa1 | 2021-09-12 19:27:02 +0200 | [diff] [blame] | 690 | #endif /* !CONFIG_USER_ONLY */ |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 691 | |
| 692 | return false; |
| 693 | } |
| 694 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 695 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 696 | { |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 697 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 698 | CPUWatchpoint *wp; |
| 699 | |
Andreas Färber | ff4700b | 2013-08-26 18:23:18 +0200 | [diff] [blame] | 700 | if (!cpu->watchpoint_hit) { |
| 701 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 702 | wp->flags &= ~BP_WATCHPOINT_HIT; |
| 703 | } |
| 704 | } |
Peter Maydell | 86025ee | 2014-09-12 14:06:48 +0100 | [diff] [blame] | 705 | |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 706 | if (tcg_ops->debug_excp_handler) { |
| 707 | tcg_ops->debug_excp_handler(cpu); |
Eduardo Habkost | 710384d | 2020-12-12 16:55:18 +0100 | [diff] [blame] | 708 | } |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 709 | } |
| 710 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 711 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
| 712 | { |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 713 | if (cpu->exception_index < 0) { |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 714 | #ifndef CONFIG_USER_ONLY |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 715 | if (replay_has_exception() |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 716 | && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) { |
Alex Bennée | a11bbb6 | 2021-02-13 13:03:19 +0000 | [diff] [blame] | 717 | /* Execute just one insn to trigger exception pending in the log */ |
Pavel Dovgalyuk | c3e97f6 | 2022-01-31 14:25:40 +0300 | [diff] [blame] | 718 | cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) |
Richard Henderson | cf9b579 | 2023-11-10 08:21:23 -0800 | [diff] [blame] | 719 | | CF_NOIRQ | 1; |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 720 | } |
| 721 | #endif |
Alex Bennée | a11bbb6 | 2021-02-13 13:03:19 +0000 | [diff] [blame] | 722 | return false; |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 723 | } |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 724 | |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 725 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
| 726 | /* exit request from the cpu execution loop */ |
| 727 | *ret = cpu->exception_index; |
| 728 | if (*ret == EXCP_DEBUG) { |
| 729 | cpu_handle_debug_exception(cpu); |
| 730 | } |
| 731 | cpu->exception_index = -1; |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 732 | return true; |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 733 | } |
Luc Michel | a7ba744 | 2020-07-16 21:39:47 +0200 | [diff] [blame] | 734 | |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 735 | #if defined(CONFIG_USER_ONLY) |
| 736 | /* |
| 737 | * If user mode only, we simulate a fake exception which will be |
| 738 | * handled outside the cpu execution loop. |
| 739 | */ |
| 740 | #if defined(TARGET_I386) |
| 741 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
| 742 | tcg_ops->fake_user_interrupt(cpu); |
| 743 | #endif /* TARGET_I386 */ |
| 744 | *ret = cpu->exception_index; |
| 745 | cpu->exception_index = -1; |
| 746 | return true; |
| 747 | #else |
| 748 | if (replay_exception()) { |
| 749 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
| 750 | |
| 751 | bql_lock(); |
| 752 | tcg_ops->do_interrupt(cpu); |
| 753 | bql_unlock(); |
| 754 | cpu->exception_index = -1; |
| 755 | |
| 756 | if (unlikely(cpu->singlestep_enabled)) { |
| 757 | /* |
| 758 | * After processing the exception, ensure an EXCP_DEBUG is |
| 759 | * raised when single-stepping so that GDB doesn't miss the |
| 760 | * next instruction. |
| 761 | */ |
| 762 | *ret = EXCP_DEBUG; |
| 763 | cpu_handle_debug_exception(cpu); |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 764 | return true; |
| 765 | } |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 766 | } else if (!replay_has_interrupt()) { |
| 767 | /* give a chance to iothread in replay mode */ |
| 768 | *ret = EXCP_INTERRUPT; |
| 769 | return true; |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 770 | } |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 771 | #endif |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 772 | |
| 773 | return false; |
| 774 | } |
| 775 | |
Philippe Mathieu-Daudé | 93c6091 | 2024-01-24 11:16:34 +0100 | [diff] [blame] | 776 | static inline bool icount_exit_request(CPUState *cpu) |
| 777 | { |
| 778 | if (!icount_enabled()) { |
| 779 | return false; |
| 780 | } |
| 781 | if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) { |
| 782 | return false; |
| 783 | } |
| 784 | return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0; |
| 785 | } |
| 786 | |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 787 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 788 | TranslationBlock **last_tb) |
| 789 | { |
Alex Bennée | aff0e20 | 2021-11-29 14:09:26 +0000 | [diff] [blame] | 790 | /* |
| 791 | * If we have requested custom cflags with CF_NOIRQ we should |
| 792 | * skip checking here. Any pending interrupts will get picked up |
| 793 | * by the next TB we execute under normal cflags. |
| 794 | */ |
| 795 | if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { |
| 796 | return false; |
| 797 | } |
| 798 | |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 799 | /* Clear the interrupt flag now since we're processing |
| 800 | * cpu->interrupt_request and cpu->exit_request. |
David Hildenbrand | d84be02 | 2017-11-29 20:13:19 +0100 | [diff] [blame] | 801 | * Ensure zeroing happens before reading cpu->exit_request or |
| 802 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 803 | */ |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 804 | qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 805 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 806 | if (unlikely(qatomic_read(&cpu->interrupt_request))) { |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 807 | int interrupt_request; |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 808 | bql_lock(); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 809 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 810 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
| 811 | /* Mask out external interrupts for this step. */ |
| 812 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; |
| 813 | } |
| 814 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
| 815 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
| 816 | cpu->exception_index = EXCP_DEBUG; |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 817 | bql_unlock(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 818 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 819 | } |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 820 | #if !defined(CONFIG_USER_ONLY) |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 821 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
| 822 | /* Do nothing */ |
| 823 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { |
| 824 | replay_interrupt(); |
| 825 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; |
| 826 | cpu->halted = 1; |
| 827 | cpu->exception_index = EXCP_HLT; |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 828 | bql_unlock(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 829 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 830 | } |
| 831 | #if defined(TARGET_I386) |
| 832 | else if (interrupt_request & CPU_INTERRUPT_INIT) { |
| 833 | X86CPU *x86_cpu = X86_CPU(cpu); |
| 834 | CPUArchState *env = &x86_cpu->env; |
| 835 | replay_interrupt(); |
Paolo Bonzini | 65c9d60 | 2017-02-16 12:30:05 +0100 | [diff] [blame] | 836 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 837 | do_cpu_init(x86_cpu); |
| 838 | cpu->exception_index = EXCP_HALTED; |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 839 | bql_unlock(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 840 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 841 | } |
| 842 | #else |
| 843 | else if (interrupt_request & CPU_INTERRUPT_RESET) { |
| 844 | replay_interrupt(); |
| 845 | cpu_reset(cpu); |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 846 | bql_unlock(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 847 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 848 | } |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 849 | #endif /* !TARGET_I386 */ |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 850 | /* The target hook has 3 exit conditions: |
| 851 | False when the interrupt isn't processed, |
| 852 | True when it is, and we should restart on a new TB, |
| 853 | and via longjmp via cpu_loop_exit. */ |
| 854 | else { |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 855 | const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 856 | |
Peter Maydell | de68028 | 2024-07-12 12:39:49 +0100 | [diff] [blame] | 857 | if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { |
Richard Henderson | b7e9a4a | 2024-01-28 13:12:54 +1000 | [diff] [blame] | 858 | if (!tcg_ops->need_replay_interrupt || |
| 859 | tcg_ops->need_replay_interrupt(interrupt_request)) { |
Pavel Dovgalyuk | 4084893 | 2020-10-03 20:12:51 +0300 | [diff] [blame] | 860 | replay_interrupt(); |
| 861 | } |
Richard Henderson | ba3c35d | 2020-07-17 09:26:59 -0700 | [diff] [blame] | 862 | /* |
| 863 | * After processing the interrupt, ensure an EXCP_DEBUG is |
| 864 | * raised when single-stepping so that GDB doesn't miss the |
| 865 | * next instruction. |
| 866 | */ |
Luc Michel | 5b7b197 | 2022-02-24 14:52:42 -1000 | [diff] [blame] | 867 | if (unlikely(cpu->singlestep_enabled)) { |
| 868 | cpu->exception_index = EXCP_DEBUG; |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 869 | bql_unlock(); |
Luc Michel | 5b7b197 | 2022-02-24 14:52:42 -1000 | [diff] [blame] | 870 | return true; |
| 871 | } |
| 872 | cpu->exception_index = -1; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 873 | *last_tb = NULL; |
| 874 | } |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 875 | /* The target hook may have updated the 'cpu->interrupt_request'; |
| 876 | * reload the 'interrupt_request' value */ |
| 877 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 878 | } |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 879 | #endif /* !CONFIG_USER_ONLY */ |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 880 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 881 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
| 882 | /* ensure that no TB jump will be modified as |
| 883 | the program flow was changed */ |
| 884 | *last_tb = NULL; |
| 885 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 886 | |
| 887 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 888 | bql_unlock(); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 889 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 890 | |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 891 | /* Finally, check if we need to exit to the main loop. */ |
Philippe Mathieu-Daudé | 93c6091 | 2024-01-24 11:16:34 +0100 | [diff] [blame] | 892 | if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 893 | qatomic_set(&cpu->exit_request, 0); |
Pavel Dovgalyuk | 5f3bdfd | 2018-02-27 12:51:41 +0300 | [diff] [blame] | 894 | if (cpu->exception_index == -1) { |
| 895 | cpu->exception_index = EXCP_INTERRUPT; |
| 896 | } |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 897 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 898 | } |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 899 | |
| 900 | return false; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 901 | } |
| 902 | |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 903 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
Anton Johansson | f0a08b0 | 2023-06-21 15:56:27 +0200 | [diff] [blame] | 904 | vaddr pc, TranslationBlock **last_tb, |
| 905 | int *tb_exit) |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 906 | { |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 907 | trace_exec_tb(tb, pc); |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 908 | tb = cpu_tb_exec(cpu, tb, tb_exit); |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 909 | if (*tb_exit != TB_EXIT_REQUESTED) { |
| 910 | *last_tb = tb; |
| 911 | return; |
| 912 | } |
| 913 | |
| 914 | *last_tb = NULL; |
Philippe Mathieu-Daudé | 0650fc1 | 2024-04-24 11:25:52 +0200 | [diff] [blame] | 915 | if (cpu_loop_exit_requested(cpu)) { |
Alex Bennée | e5143e3 | 2017-02-23 18:29:12 +0000 | [diff] [blame] | 916 | /* Something asked us to stop executing chained TBs; just |
| 917 | * continue round the main loop. Whatever requested the exit |
Paolo Bonzini | 30f3dda | 2017-03-03 16:39:18 +0100 | [diff] [blame] | 918 | * will also have set something else (eg exit_request or |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 919 | * interrupt_request) which will be handled by |
| 920 | * cpu_handle_interrupt. cpu_handle_interrupt will also |
| 921 | * clear cpu->icount_decr.u16.high. |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 922 | */ |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 923 | return; |
| 924 | } |
| 925 | |
| 926 | /* Instruction counter expired. */ |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 927 | assert(icount_enabled()); |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 928 | #ifndef CONFIG_USER_ONLY |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 929 | /* Ensure global icount has gone forward */ |
Claudio Fontana | 8191d36 | 2020-08-31 16:18:34 +0200 | [diff] [blame] | 930 | icount_update(cpu); |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 931 | /* Refill decrementer and continue execution. */ |
Philippe Mathieu-Daudé | 0650fc1 | 2024-04-24 11:25:52 +0200 | [diff] [blame] | 932 | int32_t insns_left = MIN(0xffff, cpu->icount_budget); |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 933 | cpu->neg.icount_decr.u16.low = insns_left; |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 934 | cpu->icount_extra = cpu->icount_budget - insns_left; |
Alex Bennée | bc662a3 | 2021-02-13 13:03:18 +0000 | [diff] [blame] | 935 | |
| 936 | /* |
| 937 | * If the next tb has more instructions than we have left to |
| 938 | * execute we need to ensure we find/generate a TB with exactly |
| 939 | * insns_left instructions in it. |
| 940 | */ |
Peter Maydell | c8cf47a | 2021-07-25 18:44:05 +0100 | [diff] [blame] | 941 | if (insns_left > 0 && insns_left < tb->icount) { |
| 942 | assert(insns_left <= CF_COUNT_MASK); |
| 943 | assert(cpu->icount_extra == 0); |
Alex Bennée | bc662a3 | 2021-02-13 13:03:18 +0000 | [diff] [blame] | 944 | cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 945 | } |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 946 | #endif |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 947 | } |
| 948 | |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 949 | /* main execution loop */ |
| 950 | |
Richard Henderson | 61710a7 | 2023-01-07 10:12:51 -0800 | [diff] [blame] | 951 | static int __attribute__((noinline)) |
| 952 | cpu_exec_loop(CPUState *cpu, SyncClocks *sc) |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 953 | { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 954 | int ret; |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 955 | |
| 956 | /* if an exception is pending, we execute it here */ |
| 957 | while (!cpu_handle_exception(cpu, &ret)) { |
| 958 | TranslationBlock *last_tb = NULL; |
| 959 | int tb_exit = 0; |
| 960 | |
| 961 | while (!cpu_handle_interrupt(cpu, &last_tb)) { |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 962 | TranslationBlock *tb; |
Anton Johansson | bb5de52 | 2023-06-21 15:56:24 +0200 | [diff] [blame] | 963 | vaddr pc; |
| 964 | uint64_t cs_base; |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 965 | uint32_t flags, cflags; |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 966 | |
Richard Henderson | b77af26 | 2023-09-13 17:22:49 -0700 | [diff] [blame] | 967 | cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags); |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 968 | |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 969 | /* |
| 970 | * When requested, use an exact setting for cflags for the next |
| 971 | * execution. This is used for icount, precise smc, and stop- |
| 972 | * after-access watchpoints. Since this request should never |
| 973 | * have CF_INVALID set, -1 is a convenient invalid value that |
| 974 | * does not require tcg headers for cpu_common_reset. |
| 975 | */ |
| 976 | cflags = cpu->cflags_next_tb; |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 977 | if (cflags == -1) { |
Alex Bennée | c0ae396 | 2021-02-24 16:58:08 +0000 | [diff] [blame] | 978 | cflags = curr_cflags(cpu); |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 979 | } else { |
| 980 | cpu->cflags_next_tb = -1; |
| 981 | } |
| 982 | |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 983 | if (check_for_breakpoints(cpu, pc, &cflags)) { |
| 984 | break; |
| 985 | } |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 986 | |
| 987 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); |
| 988 | if (tb == NULL) { |
Richard Henderson | 3371802 | 2023-03-31 18:52:33 -0700 | [diff] [blame] | 989 | CPUJumpCache *jc; |
Richard Henderson | a976a99 | 2022-08-15 15:13:05 -0500 | [diff] [blame] | 990 | uint32_t h; |
| 991 | |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 992 | mmap_lock(); |
| 993 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
| 994 | mmap_unlock(); |
Richard Henderson | 3371802 | 2023-03-31 18:52:33 -0700 | [diff] [blame] | 995 | |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 996 | /* |
| 997 | * We add the TB in the virtual pc hash table |
| 998 | * for the fast lookup |
| 999 | */ |
Richard Henderson | a976a99 | 2022-08-15 15:13:05 -0500 | [diff] [blame] | 1000 | h = tb_jmp_cache_hash_func(pc); |
Richard Henderson | 3371802 | 2023-03-31 18:52:33 -0700 | [diff] [blame] | 1001 | jc = cpu->tb_jmp_cache; |
Paolo Bonzini | d157e54 | 2024-01-22 16:34:09 +0100 | [diff] [blame] | 1002 | jc->array[h].pc = pc; |
| 1003 | qatomic_set(&jc->array[h].tb, tb); |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 1004 | } |
| 1005 | |
| 1006 | #ifndef CONFIG_USER_ONLY |
| 1007 | /* |
| 1008 | * We don't take care of direct jumps when address mapping |
| 1009 | * changes in system emulation. So it's not safe to make a |
| 1010 | * direct jump to a TB spanning two pages because the mapping |
| 1011 | * for the second page can change. |
| 1012 | */ |
Richard Henderson | 28905cf | 2022-09-20 13:21:40 +0200 | [diff] [blame] | 1013 | if (tb_page_addr1(tb) != -1) { |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 1014 | last_tb = NULL; |
| 1015 | } |
| 1016 | #endif |
| 1017 | /* See if we can patch the calling TB. */ |
| 1018 | if (last_tb) { |
| 1019 | tb_add_jump(last_tb, tb_exit, tb); |
| 1020 | } |
| 1021 | |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 1022 | cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 1023 | |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 1024 | /* Try to align the host and virtual clocks |
| 1025 | if the guest is in advance */ |
Richard Henderson | 61710a7 | 2023-01-07 10:12:51 -0800 | [diff] [blame] | 1026 | align_clocks(sc, cpu); |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1027 | } |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 1028 | } |
Richard Henderson | 61710a7 | 2023-01-07 10:12:51 -0800 | [diff] [blame] | 1029 | return ret; |
| 1030 | } |
| 1031 | |
| 1032 | static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc) |
| 1033 | { |
| 1034 | /* Prepare setjmp context for exception handling. */ |
| 1035 | if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { |
Richard Henderson | cb62bd1 | 2023-07-06 08:45:13 +0100 | [diff] [blame] | 1036 | cpu_exec_longjmp_cleanup(cpu); |
Richard Henderson | 61710a7 | 2023-01-07 10:12:51 -0800 | [diff] [blame] | 1037 | } |
| 1038 | |
| 1039 | return cpu_exec_loop(cpu, sc); |
| 1040 | } |
| 1041 | |
| 1042 | int cpu_exec(CPUState *cpu) |
| 1043 | { |
| 1044 | int ret; |
| 1045 | SyncClocks sc = { 0 }; |
| 1046 | |
| 1047 | /* replay_interrupt may need current_cpu */ |
| 1048 | current_cpu = cpu; |
| 1049 | |
| 1050 | if (cpu_handle_halt(cpu)) { |
| 1051 | return EXCP_HALTED; |
| 1052 | } |
| 1053 | |
Philippe Mathieu-Daudé | f5e9362 | 2024-01-24 08:41:56 +0100 | [diff] [blame] | 1054 | RCU_READ_LOCK_GUARD(); |
Richard Henderson | 61710a7 | 2023-01-07 10:12:51 -0800 | [diff] [blame] | 1055 | cpu_exec_enter(cpu); |
| 1056 | |
| 1057 | /* |
| 1058 | * Calculate difference between guest clock and host clock. |
| 1059 | * This delay includes the delay of the last cycle, so |
| 1060 | * what we have to do is sleep until it is 0. As for the |
| 1061 | * advance/delay we gain here, we try to fix it next time. |
| 1062 | */ |
| 1063 | init_delay_params(&sc, cpu); |
| 1064 | |
| 1065 | ret = cpu_exec_setjmp(cpu, &sc); |
bellard | 3fb2ded | 2003-06-24 13:22:59 +0000 | [diff] [blame] | 1066 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 1067 | cpu_exec_exit(cpu); |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1068 | return ret; |
| 1069 | } |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1070 | |
Philippe Mathieu-Daudé | fa312f2 | 2023-10-03 14:30:24 +0200 | [diff] [blame] | 1071 | bool tcg_exec_realizefn(CPUState *cpu, Error **errp) |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1072 | { |
| 1073 | static bool tcg_target_initialized; |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1074 | |
| 1075 | if (!tcg_target_initialized) { |
Peter Maydell | 0487c63 | 2024-07-04 16:57:10 +0100 | [diff] [blame] | 1076 | /* Check mandatory TCGCPUOps handlers */ |
| 1077 | #ifndef CONFIG_USER_ONLY |
| 1078 | assert(cpu->cc->tcg_ops->cpu_exec_halt); |
Peter Maydell | de68028 | 2024-07-12 12:39:49 +0100 | [diff] [blame] | 1079 | assert(cpu->cc->tcg_ops->cpu_exec_interrupt); |
Peter Maydell | 0487c63 | 2024-07-04 16:57:10 +0100 | [diff] [blame] | 1080 | #endif /* !CONFIG_USER_ONLY */ |
Richard Henderson | 991bd65 | 2024-01-28 12:57:59 +1000 | [diff] [blame] | 1081 | cpu->cc->tcg_ops->initialize(); |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1082 | tcg_target_initialized = true; |
| 1083 | } |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1084 | |
Richard Henderson | 4e4fa6c | 2022-10-31 13:26:36 +1100 | [diff] [blame] | 1085 | cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1); |
| 1086 | tlb_init(cpu); |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1087 | #ifndef CONFIG_USER_ONLY |
| 1088 | tcg_iommu_init_notifier_list(cpu); |
| 1089 | #endif /* !CONFIG_USER_ONLY */ |
Richard Henderson | 4e4fa6c | 2022-10-31 13:26:36 +1100 | [diff] [blame] | 1090 | /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ |
Philippe Mathieu-Daudé | fa312f2 | 2023-10-03 14:30:24 +0200 | [diff] [blame] | 1091 | |
| 1092 | return true; |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1093 | } |
| 1094 | |
| 1095 | /* undo the initializations in reverse order */ |
| 1096 | void tcg_exec_unrealizefn(CPUState *cpu) |
| 1097 | { |
| 1098 | #ifndef CONFIG_USER_ONLY |
| 1099 | tcg_iommu_free_notifier_list(cpu); |
| 1100 | #endif /* !CONFIG_USER_ONLY */ |
| 1101 | |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1102 | tlb_destroy(cpu); |
Emilio Cota | 4731f89 | 2023-01-24 18:01:18 +0000 | [diff] [blame] | 1103 | g_free_rcu(cpu->tb_jmp_cache, rcu); |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1104 | } |