bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Host code generation |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Thomas Huth | fb0343d | 2019-01-23 15:08:56 +0100 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 18 | */ |
Markus Armbruster | 14a48c1 | 2019-05-23 16:35:05 +0200 | [diff] [blame] | 19 | |
Peter Maydell | 7b31bbc | 2016-01-26 18:16:56 +0000 | [diff] [blame] | 20 | #include "qemu/osdep.h" |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 21 | |
Yang Zhong | 244f144 | 2017-06-02 14:06:45 +0800 | [diff] [blame] | 22 | #include "trace.h" |
Paolo Bonzini | 76cad71 | 2012-10-24 11:12:21 +0200 | [diff] [blame] | 23 | #include "disas/disas.h" |
Paolo Bonzini | 63c9155 | 2016-03-15 13:18:37 +0100 | [diff] [blame] | 24 | #include "exec/exec-all.h" |
Philippe Mathieu-Daudé | dcb32f1 | 2020-01-01 12:23:00 +0100 | [diff] [blame] | 25 | #include "tcg/tcg.h" |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 26 | #if defined(CONFIG_USER_ONLY) |
| 27 | #include "qemu.h" |
| 28 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) |
| 29 | #include <sys/param.h> |
| 30 | #if __FreeBSD_version >= 700104 |
| 31 | #define HAVE_KINFO_GETVMMAP |
| 32 | #define sigqueue sigqueue_freebsd /* avoid redefinition */ |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 33 | #include <sys/proc.h> |
| 34 | #include <machine/profile.h> |
| 35 | #define _KERNEL |
| 36 | #include <sys/user.h> |
| 37 | #undef _KERNEL |
| 38 | #undef sigqueue |
| 39 | #include <libutil.h> |
| 40 | #endif |
| 41 | #endif |
Paolo Bonzini | 0bc3cd6 | 2013-04-08 17:29:59 +0200 | [diff] [blame] | 42 | #else |
Paolo Bonzini | 8bca9a0 | 2018-05-30 11:58:36 +0200 | [diff] [blame] | 43 | #include "exec/ram_addr.h" |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 44 | #endif |
| 45 | |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 46 | #include "exec/cputlb.h" |
Paolo Bonzini | 3b9bd3f | 2020-12-16 13:27:58 +0100 | [diff] [blame] | 47 | #include "exec/translate-all.h" |
Richard Henderson | 306c872 | 2022-08-11 13:48:03 -0700 | [diff] [blame] | 48 | #include "exec/translator.h" |
Alex Bennée | 548c960 | 2023-03-02 18:57:43 -0800 | [diff] [blame] | 49 | #include "exec/tb-flush.h" |
Emilio G. Cota | 510a647 | 2015-04-22 17:50:52 -0400 | [diff] [blame] | 50 | #include "qemu/bitmap.h" |
Markus Armbruster | 3de2faa | 2019-04-17 21:17:52 +0200 | [diff] [blame] | 51 | #include "qemu/qemu-print.h" |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 52 | #include "qemu/main-loop.h" |
Peter Maydell | ad768e6 | 2022-02-08 20:08:55 +0000 | [diff] [blame] | 53 | #include "qemu/cacheinfo.h" |
Richard W.M. Jones | 533206f | 2023-03-03 08:49:48 +0000 | [diff] [blame] | 54 | #include "qemu/timer.h" |
Paolo Bonzini | 508127e | 2016-01-07 16:55:28 +0300 | [diff] [blame] | 55 | #include "exec/log.h" |
Paolo Bonzini | d2528bd | 2017-03-03 12:01:16 +0100 | [diff] [blame] | 56 | #include "sysemu/cpus.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 57 | #include "sysemu/cpu-timers.h" |
Markus Armbruster | 14a48c1 | 2019-05-23 16:35:05 +0200 | [diff] [blame] | 58 | #include "sysemu/tcg.h" |
Richard Henderson | 6bc1442 | 2020-10-28 20:14:54 -0700 | [diff] [blame] | 59 | #include "qapi/error.h" |
Richard Henderson | d9bcb58 | 2021-02-13 13:03:13 +0000 | [diff] [blame] | 60 | #include "hw/core/tcg-cpu-ops.h" |
Richard Henderson | a976a99 | 2022-08-15 15:13:05 -0500 | [diff] [blame] | 61 | #include "tb-jmp-cache.h" |
Philippe Mathieu-Daudé | e5ceadf | 2021-05-24 19:04:53 +0200 | [diff] [blame] | 62 | #include "tb-hash.h" |
| 63 | #include "tb-context.h" |
Philippe Mathieu-Daudé | 5934660 | 2023-09-14 20:57:15 +0200 | [diff] [blame] | 64 | #include "internal-common.h" |
Philippe Mathieu-Daudé | 4c268d6 | 2023-09-14 20:57:14 +0200 | [diff] [blame] | 65 | #include "internal-target.h" |
Ilya Leoshkevich | 5584e2d | 2023-01-12 16:20:13 +0100 | [diff] [blame] | 66 | #include "perf.h" |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 67 | #include "tcg/insn-start-words.h" |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 68 | |
Emilio G. Cota | 44ded3d | 2017-06-23 20:04:43 -0400 | [diff] [blame] | 69 | TBContext tb_ctx; |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 70 | |
Richard Henderson | c9ad8d2 | 2023-03-08 12:24:41 -0800 | [diff] [blame] | 71 | /* |
| 72 | * Encode VAL as a signed leb128 sequence at P. |
| 73 | * Return P incremented past the encoded value. |
| 74 | */ |
| 75 | static uint8_t *encode_sleb128(uint8_t *p, int64_t val) |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 76 | { |
| 77 | int more, byte; |
| 78 | |
| 79 | do { |
| 80 | byte = val & 0x7f; |
| 81 | val >>= 7; |
| 82 | more = !((val == 0 && (byte & 0x40) == 0) |
| 83 | || (val == -1 && (byte & 0x40) != 0)); |
| 84 | if (more) { |
| 85 | byte |= 0x80; |
| 86 | } |
| 87 | *p++ = byte; |
| 88 | } while (more); |
| 89 | |
| 90 | return p; |
| 91 | } |
| 92 | |
Richard Henderson | c9ad8d2 | 2023-03-08 12:24:41 -0800 | [diff] [blame] | 93 | /* |
| 94 | * Decode a signed leb128 sequence at *PP; increment *PP past the |
| 95 | * decoded value. Return the decoded value. |
| 96 | */ |
| 97 | static int64_t decode_sleb128(const uint8_t **pp) |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 98 | { |
Richard Henderson | db0c51a | 2020-10-28 12:05:44 -0700 | [diff] [blame] | 99 | const uint8_t *p = *pp; |
Richard Henderson | c9ad8d2 | 2023-03-08 12:24:41 -0800 | [diff] [blame] | 100 | int64_t val = 0; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 101 | int byte, shift = 0; |
| 102 | |
| 103 | do { |
| 104 | byte = *p++; |
Richard Henderson | c9ad8d2 | 2023-03-08 12:24:41 -0800 | [diff] [blame] | 105 | val |= (int64_t)(byte & 0x7f) << shift; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 106 | shift += 7; |
| 107 | } while (byte & 0x80); |
| 108 | if (shift < TARGET_LONG_BITS && (byte & 0x40)) { |
Richard Henderson | c9ad8d2 | 2023-03-08 12:24:41 -0800 | [diff] [blame] | 109 | val |= -(int64_t)1 << shift; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | *pp = p; |
| 113 | return val; |
| 114 | } |
| 115 | |
| 116 | /* Encode the data collected about the instructions while compiling TB. |
| 117 | Place the data at BLOCK, and return the number of bytes consumed. |
| 118 | |
Emilio G. Cota | 55bbc86 | 2017-10-18 18:01:42 -0400 | [diff] [blame] | 119 | The logical table consists of TARGET_INSN_START_WORDS target_ulong's, |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 120 | which come from the target's insn_start data, followed by a uintptr_t |
| 121 | which comes from the host pc of the end of the code implementing the insn. |
| 122 | |
| 123 | Each line of the table is encoded as sleb128 deltas from the previous |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 124 | line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 125 | That is, the first column is seeded with the guest pc, the last column |
| 126 | with the host pc, and the middle columns with zeros. */ |
| 127 | |
| 128 | static int encode_search(TranslationBlock *tb, uint8_t *block) |
| 129 | { |
Emilio G. Cota | b1311c4 | 2017-07-12 17:15:52 -0400 | [diff] [blame] | 130 | uint8_t *highwater = tcg_ctx->code_gen_highwater; |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 131 | uint64_t *insn_data = tcg_ctx->gen_insn_data; |
| 132 | uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 133 | uint8_t *p = block; |
| 134 | int i, j, n; |
| 135 | |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 136 | for (i = 0, n = tb->icount; i < n; ++i) { |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 137 | uint64_t prev, curr; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 138 | |
| 139 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { |
| 140 | if (i == 0) { |
Anton Johansson | 279513c | 2023-02-27 14:51:47 +0100 | [diff] [blame] | 141 | prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0); |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 142 | } else { |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 143 | prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j]; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 144 | } |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 145 | curr = insn_data[i * TARGET_INSN_START_WORDS + j]; |
| 146 | p = encode_sleb128(p, curr - prev); |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 147 | } |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 148 | prev = (i == 0 ? 0 : insn_end_off[i - 1]); |
| 149 | curr = insn_end_off[i]; |
| 150 | p = encode_sleb128(p, curr - prev); |
Richard Henderson | b125f9d | 2015-09-22 13:01:15 -0700 | [diff] [blame] | 151 | |
| 152 | /* Test for (pending) buffer overflow. The assumption is that any |
| 153 | one row beginning below the high water mark cannot overrun |
| 154 | the buffer completely. Thus we can test for overflow after |
| 155 | encoding a row without having to check during encoding. */ |
| 156 | if (unlikely(p > highwater)) { |
| 157 | return -1; |
| 158 | } |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | return p - block; |
| 162 | } |
| 163 | |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 164 | static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, |
| 165 | uint64_t *data) |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 166 | { |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 167 | uintptr_t iter_pc = (uintptr_t)tb->tc.ptr; |
Richard Henderson | db0c51a | 2020-10-28 12:05:44 -0700 | [diff] [blame] | 168 | const uint8_t *p = tb->tc.ptr + tb->tc.size; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 169 | int i, j, num_insns = tb->icount; |
bellard | 57fec1f | 2008-02-01 10:50:11 +0000 | [diff] [blame] | 170 | |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 171 | host_pc -= GETPC_ADJ; |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 172 | |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 173 | if (host_pc < iter_pc) { |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 174 | return -1; |
| 175 | } |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 176 | |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 177 | memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS); |
Anton Johansson | 4be7902 | 2023-02-27 14:51:39 +0100 | [diff] [blame] | 178 | if (!(tb_cflags(tb) & CF_PCREL)) { |
Anton Johansson | 279513c | 2023-02-27 14:51:47 +0100 | [diff] [blame] | 179 | data[0] = tb->pc; |
Richard Henderson | 8ed558e | 2022-08-12 09:53:53 -0700 | [diff] [blame] | 180 | } |
| 181 | |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 182 | /* |
| 183 | * Reconstruct the stored insn data while looking for the point |
| 184 | * at which the end of the insn exceeds host_pc. |
| 185 | */ |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 186 | for (i = 0; i < num_insns; ++i) { |
| 187 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { |
| 188 | data[j] += decode_sleb128(&p); |
| 189 | } |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 190 | iter_pc += decode_sleb128(&p); |
| 191 | if (iter_pc > host_pc) { |
| 192 | return num_insns - i; |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 193 | } |
| 194 | } |
| 195 | return -1; |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 196 | } |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 197 | |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 198 | /* |
Richard Henderson | cfa29dd | 2022-10-24 23:12:56 +1000 | [diff] [blame] | 199 | * The cpu state corresponding to 'host_pc' is restored in |
| 200 | * preparation for exiting the TB. |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 201 | */ |
| 202 | void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, |
Richard Henderson | cfa29dd | 2022-10-24 23:12:56 +1000 | [diff] [blame] | 203 | uintptr_t host_pc) |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 204 | { |
| 205 | uint64_t data[TARGET_INSN_START_WORDS]; |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 206 | int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data); |
| 207 | |
| 208 | if (insns_left < 0) { |
| 209 | return; |
| 210 | } |
| 211 | |
Richard Henderson | cfa29dd | 2022-10-24 23:12:56 +1000 | [diff] [blame] | 212 | if (tb_cflags(tb) & CF_USE_ICOUNT) { |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 213 | assert(icount_enabled()); |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 214 | /* |
| 215 | * Reset the cycle counter to the start of the block and |
| 216 | * shift if to the number of actually executed instructions. |
| 217 | */ |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 218 | cpu->neg.icount_decr.u16.low += insns_left; |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 219 | } |
Richard Henderson | d292568 | 2022-10-24 19:43:40 +1000 | [diff] [blame] | 220 | |
Richard Henderson | 04f1057 | 2022-10-24 21:17:39 +1000 | [diff] [blame] | 221 | cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); |
bellard | d19893d | 2003-06-15 19:58:51 +0000 | [diff] [blame] | 222 | } |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 223 | |
Richard Henderson | 3d419a4 | 2022-10-24 23:09:57 +1000 | [diff] [blame] | 224 | bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) |
Blue Swirl | a8a826a | 2012-12-04 20:16:07 +0000 | [diff] [blame] | 225 | { |
Richard Henderson | 4846cd3 | 2020-10-30 18:59:09 -0700 | [diff] [blame] | 226 | /* |
Richard Henderson | db0c51a | 2020-10-28 12:05:44 -0700 | [diff] [blame] | 227 | * The host_pc has to be in the rx region of the code buffer. |
Richard Henderson | 4846cd3 | 2020-10-30 18:59:09 -0700 | [diff] [blame] | 228 | * If it is not we will not be able to resolve it here. |
| 229 | * The two cases where host_pc will not be correct are: |
Alex Bennée | d25f2a7 | 2017-11-13 13:55:27 +0000 | [diff] [blame] | 230 | * |
| 231 | * - fault during translation (instruction fetch) |
| 232 | * - fault from helper (not using GETPC() macro) |
| 233 | * |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 234 | * Either way we need return early as we can't resolve it here. |
Alex Bennée | d8b2239 | 2017-03-02 10:31:32 +0000 | [diff] [blame] | 235 | */ |
Richard Henderson | db0c51a | 2020-10-28 12:05:44 -0700 | [diff] [blame] | 236 | if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { |
Richard Henderson | 4846cd3 | 2020-10-30 18:59:09 -0700 | [diff] [blame] | 237 | TranslationBlock *tb = tcg_tb_lookup(host_pc); |
Alex Bennée | d25f2a7 | 2017-11-13 13:55:27 +0000 | [diff] [blame] | 238 | if (tb) { |
Richard Henderson | cfa29dd | 2022-10-24 23:12:56 +1000 | [diff] [blame] | 239 | cpu_restore_state_from_tb(cpu, tb, host_pc); |
Richard Henderson | 4846cd3 | 2020-10-30 18:59:09 -0700 | [diff] [blame] | 240 | return true; |
Pavel Dovgalyuk | d8a499f | 2014-11-26 13:40:16 +0300 | [diff] [blame] | 241 | } |
Blue Swirl | a8a826a | 2012-12-04 20:16:07 +0000 | [diff] [blame] | 242 | } |
Richard Henderson | 4846cd3 | 2020-10-30 18:59:09 -0700 | [diff] [blame] | 243 | return false; |
Blue Swirl | a8a826a | 2012-12-04 20:16:07 +0000 | [diff] [blame] | 244 | } |
| 245 | |
Richard Henderson | 6392bd6 | 2022-10-24 22:15:04 +1000 | [diff] [blame] | 246 | bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data) |
| 247 | { |
| 248 | if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { |
| 249 | TranslationBlock *tb = tcg_tb_lookup(host_pc); |
| 250 | if (tb) { |
| 251 | return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0; |
| 252 | } |
| 253 | } |
| 254 | return false; |
| 255 | } |
| 256 | |
Richard Henderson | fa79cde | 2021-03-09 17:42:16 -0600 | [diff] [blame] | 257 | void page_init(void) |
Alexey Kardashevskiy | 47c16ed | 2014-01-17 11:12:07 -0700 | [diff] [blame] | 258 | { |
| 259 | page_size_init(); |
Vijaya Kumar K | 66ec9f4 | 2016-10-24 16:26:49 +0100 | [diff] [blame] | 260 | page_table_config_init(); |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 261 | } |
| 262 | |
Richard Henderson | 344b63b | 2022-11-06 11:12:33 +1100 | [diff] [blame] | 263 | /* |
| 264 | * Isolate the portion of code gen which can setjmp/longjmp. |
| 265 | * Return the size of the generated code, or negative on error. |
| 266 | */ |
| 267 | static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, |
Anton Johansson | 256d11f | 2023-06-21 15:56:23 +0200 | [diff] [blame] | 268 | vaddr pc, void *host_pc, |
Richard Henderson | 344b63b | 2022-11-06 11:12:33 +1100 | [diff] [blame] | 269 | int *max_insns, int64_t *ti) |
| 270 | { |
| 271 | int ret = sigsetjmp(tcg_ctx->jmp_trans, 0); |
| 272 | if (unlikely(ret != 0)) { |
| 273 | return ret; |
| 274 | } |
| 275 | |
| 276 | tcg_func_start(tcg_ctx); |
| 277 | |
| 278 | tcg_ctx->cpu = env_cpu(env); |
Richard Henderson | 597f9b2 | 2023-01-28 15:19:22 -1000 | [diff] [blame] | 279 | gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc); |
Richard Henderson | 344b63b | 2022-11-06 11:12:33 +1100 | [diff] [blame] | 280 | assert(tb->size != 0); |
| 281 | tcg_ctx->cpu = NULL; |
| 282 | *max_insns = tb->icount; |
| 283 | |
Richard Henderson | 344b63b | 2022-11-06 11:12:33 +1100 | [diff] [blame] | 284 | return tcg_gen_code(tcg_ctx, tb, pc); |
| 285 | } |
| 286 | |
Paolo Bonzini | 7569208 | 2015-08-11 10:59:50 +0200 | [diff] [blame] | 287 | /* Called with mmap_lock held for user mode emulation. */ |
Andreas Färber | 648f034 | 2013-09-01 17:43:17 +0200 | [diff] [blame] | 288 | TranslationBlock *tb_gen_code(CPUState *cpu, |
Anton Johansson | 256d11f | 2023-06-21 15:56:23 +0200 | [diff] [blame] | 289 | vaddr pc, uint64_t cs_base, |
Emilio G. Cota | 89fee74 | 2016-04-07 13:19:22 -0400 | [diff] [blame] | 290 | uint32_t flags, int cflags) |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 291 | { |
Richard Henderson | b77af26 | 2023-09-13 17:22:49 -0700 | [diff] [blame] | 292 | CPUArchState *env = cpu_env(cpu); |
Emilio G. Cota | 95590e2 | 2017-08-01 15:40:16 -0400 | [diff] [blame] | 293 | TranslationBlock *tb, *existing_tb; |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 294 | tb_page_addr_t phys_pc, phys_p2; |
Richard Henderson | fec88f6 | 2015-08-27 18:17:40 -0700 | [diff] [blame] | 295 | tcg_insn_unit *gen_code_buf; |
Richard Henderson | 8b86d6d | 2019-04-15 20:54:54 -1000 | [diff] [blame] | 296 | int gen_code_size, search_size, max_insns; |
Richard Henderson | 344b63b | 2022-11-06 11:12:33 +1100 | [diff] [blame] | 297 | int64_t ti; |
Richard Henderson | 306c872 | 2022-08-11 13:48:03 -0700 | [diff] [blame] | 298 | void *host_pc; |
Richard Henderson | fe9b676 | 2019-10-23 12:20:47 -0400 | [diff] [blame] | 299 | |
Alex Bennée | e505a06 | 2016-10-27 16:10:05 +0100 | [diff] [blame] | 300 | assert_memory_lock(); |
Roman Bolshakov | 653b87e | 2021-01-13 06:28:07 +0300 | [diff] [blame] | 301 | qemu_thread_jit_write(); |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 302 | |
Richard Henderson | 306c872 | 2022-08-11 13:48:03 -0700 | [diff] [blame] | 303 | phys_pc = get_page_addr_code_hostp(env, pc, &host_pc); |
Richard Henderson | b125f9d | 2015-09-22 13:01:15 -0700 | [diff] [blame] | 304 | |
Peter Maydell | 9739e37 | 2018-08-14 17:17:19 +0100 | [diff] [blame] | 305 | if (phys_pc == -1) { |
Alex Bennée | 873d64a | 2021-02-13 13:03:20 +0000 | [diff] [blame] | 306 | /* Generate a one-shot TB with 1 insn in it */ |
Richard Henderson | cf9b579 | 2023-11-10 08:21:23 -0800 | [diff] [blame] | 307 | cflags = (cflags & ~CF_COUNT_MASK) | 1; |
Peter Maydell | 9739e37 | 2018-08-14 17:17:19 +0100 | [diff] [blame] | 308 | } |
| 309 | |
Richard Henderson | 8b86d6d | 2019-04-15 20:54:54 -1000 | [diff] [blame] | 310 | max_insns = cflags & CF_COUNT_MASK; |
| 311 | if (max_insns == 0) { |
Richard Henderson | 8b86d6d | 2019-04-15 20:54:54 -1000 | [diff] [blame] | 312 | max_insns = TCG_MAX_INSNS; |
| 313 | } |
Richard Henderson | 78ff82b | 2021-07-17 15:18:39 -0700 | [diff] [blame] | 314 | QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); |
| 315 | |
Emilio G. Cota | e8feb96 | 2017-07-07 19:24:20 -0400 | [diff] [blame] | 316 | buffer_overflow: |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 317 | assert_no_pages_locked(); |
Richard Henderson | fe9b676 | 2019-10-23 12:20:47 -0400 | [diff] [blame] | 318 | tb = tcg_tb_alloc(tcg_ctx); |
Richard Henderson | b125f9d | 2015-09-22 13:01:15 -0700 | [diff] [blame] | 319 | if (unlikely(!tb)) { |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 320 | /* flush must be done */ |
Peter Crosthwaite | bbd77c1 | 2015-06-23 19:31:15 -0700 | [diff] [blame] | 321 | tb_flush(cpu); |
Sergey Fedorov | 3359baa | 2016-08-02 18:27:43 +0100 | [diff] [blame] | 322 | mmap_unlock(); |
Pavel Dovgalyuk | 8499c8f | 2017-01-26 15:34:18 +0300 | [diff] [blame] | 323 | /* Make the execution loop process the flush as soon as possible. */ |
| 324 | cpu->exception_index = EXCP_INTERRUPT; |
Sergey Fedorov | 3359baa | 2016-08-02 18:27:43 +0100 | [diff] [blame] | 325 | cpu_loop_exit(cpu); |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 326 | } |
Richard Henderson | fec88f6 | 2015-08-27 18:17:40 -0700 | [diff] [blame] | 327 | |
Emilio G. Cota | b1311c4 | 2017-07-12 17:15:52 -0400 | [diff] [blame] | 328 | gen_code_buf = tcg_ctx->code_gen_ptr; |
Richard Henderson | db0c51a | 2020-10-28 12:05:44 -0700 | [diff] [blame] | 329 | tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); |
Anton Johansson | 4be7902 | 2023-02-27 14:51:39 +0100 | [diff] [blame] | 330 | if (!(cflags & CF_PCREL)) { |
| 331 | tb->pc = pc; |
| 332 | } |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 333 | tb->cs_base = cs_base; |
| 334 | tb->flags = flags; |
| 335 | tb->cflags = cflags; |
Richard Henderson | 28905cf | 2022-09-20 13:21:40 +0200 | [diff] [blame] | 336 | tb_set_page_addr0(tb, phys_pc); |
| 337 | tb_set_page_addr1(tb, -1); |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 338 | if (phys_pc != -1) { |
| 339 | tb_lock_page0(phys_pc); |
| 340 | } |
| 341 | |
Richard Henderson | b7e4afb | 2022-11-26 18:39:55 -0800 | [diff] [blame] | 342 | tcg_ctx->gen_tb = tb; |
Richard Henderson | ff0c61b | 2023-04-28 09:16:01 +0100 | [diff] [blame] | 343 | tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64; |
Richard Henderson | aece72b | 2023-03-23 21:06:22 -0700 | [diff] [blame] | 344 | #ifdef CONFIG_SOFTMMU |
| 345 | tcg_ctx->page_bits = TARGET_PAGE_BITS; |
| 346 | tcg_ctx->page_mask = TARGET_PAGE_MASK; |
Richard Henderson | a66efde | 2023-04-02 10:07:57 -0700 | [diff] [blame] | 347 | tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; |
Richard Henderson | aece72b | 2023-03-23 21:06:22 -0700 | [diff] [blame] | 348 | #endif |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 349 | tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS; |
Richard Henderson | 28ea568 | 2023-03-31 22:56:55 -0700 | [diff] [blame] | 350 | #ifdef TCG_GUEST_DEFAULT_MO |
| 351 | tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO; |
| 352 | #else |
| 353 | tcg_ctx->guest_mo = TCG_MO_ALL; |
| 354 | #endif |
Richard Henderson | 4baf397 | 2023-03-09 17:46:16 -0800 | [diff] [blame] | 355 | |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 356 | restart_translate: |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 357 | trace_translate_block(tb, pc, tb->tc.ptr); |
Richard Henderson | fec88f6 | 2015-08-27 18:17:40 -0700 | [diff] [blame] | 358 | |
Richard Henderson | 344b63b | 2022-11-06 11:12:33 +1100 | [diff] [blame] | 359 | gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); |
Richard Henderson | b125f9d | 2015-09-22 13:01:15 -0700 | [diff] [blame] | 360 | if (unlikely(gen_code_size < 0)) { |
Richard Henderson | 6e6c4ef | 2019-04-15 22:06:39 -1000 | [diff] [blame] | 361 | switch (gen_code_size) { |
| 362 | case -1: |
| 363 | /* |
| 364 | * Overflow of code_gen_buffer, or the current slice of it. |
| 365 | * |
| 366 | * TODO: We don't need to re-do gen_intermediate_code, nor |
| 367 | * should we re-do the tcg optimization currently hidden |
| 368 | * inside tcg_gen_code. All that should be required is to |
| 369 | * flush the TBs, allocate a new TB, re-initialize it per |
| 370 | * above, and re-do the actual code generation. |
| 371 | */ |
Richard Henderson | ae30e86 | 2021-01-23 12:11:17 -1000 | [diff] [blame] | 372 | qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, |
| 373 | "Restarting code generation for " |
| 374 | "code_gen_buffer overflow\n"); |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 375 | tb_unlock_pages(tb); |
Richard Henderson | ad17868 | 2023-07-26 12:58:08 -0700 | [diff] [blame] | 376 | tcg_ctx->gen_tb = NULL; |
Richard Henderson | 6e6c4ef | 2019-04-15 22:06:39 -1000 | [diff] [blame] | 377 | goto buffer_overflow; |
| 378 | |
| 379 | case -2: |
| 380 | /* |
| 381 | * The code generated for the TranslationBlock is too large. |
| 382 | * The maximum size allowed by the unwind info is 64k. |
| 383 | * There may be stricter constraints from relocations |
| 384 | * in the tcg backend. |
| 385 | * |
| 386 | * Try again with half as many insns as we attempted this time. |
| 387 | * If a single insn overflows, there's a bug somewhere... |
| 388 | */ |
Richard Henderson | 6e6c4ef | 2019-04-15 22:06:39 -1000 | [diff] [blame] | 389 | assert(max_insns > 1); |
| 390 | max_insns /= 2; |
Richard Henderson | ae30e86 | 2021-01-23 12:11:17 -1000 | [diff] [blame] | 391 | qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, |
| 392 | "Restarting code generation with " |
| 393 | "smaller translation block (max %d insns)\n", |
| 394 | max_insns); |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 395 | |
| 396 | /* |
| 397 | * The half-sized TB may not cross pages. |
| 398 | * TODO: Fix all targets that cross pages except with |
| 399 | * the first insn, at which point this can't be reached. |
| 400 | */ |
| 401 | phys_p2 = tb_page_addr1(tb); |
| 402 | if (unlikely(phys_p2 != -1)) { |
| 403 | tb_unlock_page1(phys_pc, phys_p2); |
| 404 | tb_set_page_addr1(tb, -1); |
| 405 | } |
| 406 | goto restart_translate; |
| 407 | |
| 408 | case -3: |
| 409 | /* |
| 410 | * We had a page lock ordering problem. In order to avoid |
| 411 | * deadlock we had to drop the lock on page0, which means |
| 412 | * that everything we translated so far is compromised. |
| 413 | * Restart with locks held on both pages. |
| 414 | */ |
| 415 | qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, |
| 416 | "Restarting code generation with re-locked pages"); |
| 417 | goto restart_translate; |
Richard Henderson | 6e6c4ef | 2019-04-15 22:06:39 -1000 | [diff] [blame] | 418 | |
| 419 | default: |
| 420 | g_assert_not_reached(); |
| 421 | } |
Richard Henderson | b125f9d | 2015-09-22 13:01:15 -0700 | [diff] [blame] | 422 | } |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 423 | tcg_ctx->gen_tb = NULL; |
| 424 | |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 425 | search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); |
Richard Henderson | b125f9d | 2015-09-22 13:01:15 -0700 | [diff] [blame] | 426 | if (unlikely(search_size < 0)) { |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 427 | tb_unlock_pages(tb); |
Richard Henderson | b125f9d | 2015-09-22 13:01:15 -0700 | [diff] [blame] | 428 | goto buffer_overflow; |
| 429 | } |
Emilio G. Cota | 2ac01d6 | 2017-06-23 19:00:11 -0400 | [diff] [blame] | 430 | tb->tc.size = gen_code_size; |
Richard Henderson | fec88f6 | 2015-08-27 18:17:40 -0700 | [diff] [blame] | 431 | |
Ilya Leoshkevich | 5584e2d | 2023-01-12 16:20:13 +0100 | [diff] [blame] | 432 | /* |
Anton Johansson | 4be7902 | 2023-02-27 14:51:39 +0100 | [diff] [blame] | 433 | * For CF_PCREL, attribute all executions of the generated code |
| 434 | * to its first mapping. |
Ilya Leoshkevich | 5584e2d | 2023-01-12 16:20:13 +0100 | [diff] [blame] | 435 | */ |
| 436 | perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf)); |
| 437 | |
Alex Bennée | d977e1c | 2016-03-15 14:30:21 +0000 | [diff] [blame] | 438 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 439 | qemu_log_in_addr_range(pc)) { |
Richard Henderson | c60f599 | 2022-04-17 11:29:47 -0700 | [diff] [blame] | 440 | FILE *logfile = qemu_log_trylock(); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 441 | if (logfile) { |
| 442 | int code_size, data_size; |
| 443 | const tcg_target_ulong *rx_data_gen_ptr; |
| 444 | size_t chunk_start; |
| 445 | int insn = 0; |
Richard Henderson | 4c389f6 | 2020-09-10 12:15:04 -0700 | [diff] [blame] | 446 | |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 447 | if (tcg_ctx->data_gen_ptr) { |
| 448 | rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); |
| 449 | code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; |
| 450 | data_size = gen_code_size - code_size; |
| 451 | } else { |
| 452 | rx_data_gen_ptr = 0; |
| 453 | code_size = gen_code_size; |
| 454 | data_size = 0; |
Alex Bennée | 5f0df03 | 2020-05-13 18:51:34 +0100 | [diff] [blame] | 455 | } |
Alex Bennée | 5f0df03 | 2020-05-13 18:51:34 +0100 | [diff] [blame] | 456 | |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 457 | /* Dump header and the first instruction */ |
| 458 | fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); |
| 459 | fprintf(logfile, |
Richard Henderson | c9ad8d2 | 2023-03-08 12:24:41 -0800 | [diff] [blame] | 460 | " -- guest addr 0x%016" PRIx64 " + tb prologue\n", |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 461 | tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 462 | chunk_start = tcg_ctx->gen_insn_end_off[insn]; |
| 463 | disas(logfile, tb->tc.ptr, chunk_start); |
Richard Henderson | 4c389f6 | 2020-09-10 12:15:04 -0700 | [diff] [blame] | 464 | |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 465 | /* |
| 466 | * Dump each instruction chunk, wrapping up empty chunks into |
| 467 | * the next instruction. The whole array is offset so the |
| 468 | * first entry is the beginning of the 2nd instruction. |
| 469 | */ |
| 470 | while (insn < tb->icount) { |
| 471 | size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; |
| 472 | if (chunk_end > chunk_start) { |
Richard Henderson | c9ad8d2 | 2023-03-08 12:24:41 -0800 | [diff] [blame] | 473 | fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n", |
Richard Henderson | 747bd69 | 2023-03-31 21:30:31 -0700 | [diff] [blame] | 474 | tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 475 | disas(logfile, tb->tc.ptr + chunk_start, |
| 476 | chunk_end - chunk_start); |
| 477 | chunk_start = chunk_end; |
| 478 | } |
| 479 | insn++; |
| 480 | } |
| 481 | |
| 482 | if (chunk_start < code_size) { |
| 483 | fprintf(logfile, " -- tb slow paths + alignment\n"); |
| 484 | disas(logfile, tb->tc.ptr + chunk_start, |
| 485 | code_size - chunk_start); |
| 486 | } |
| 487 | |
| 488 | /* Finally dump any data we may have after the block */ |
| 489 | if (data_size) { |
| 490 | int i; |
| 491 | fprintf(logfile, " data: [size=%d]\n", data_size); |
| 492 | for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { |
| 493 | if (sizeof(tcg_target_ulong) == 8) { |
| 494 | fprintf(logfile, |
| 495 | "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", |
| 496 | (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); |
| 497 | } else if (sizeof(tcg_target_ulong) == 4) { |
| 498 | fprintf(logfile, |
| 499 | "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", |
| 500 | (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); |
| 501 | } else { |
| 502 | qemu_build_not_reached(); |
| 503 | } |
Philippe Mathieu-Daudé | 6c6a4a7 | 2021-05-15 12:42:02 +0200 | [diff] [blame] | 504 | } |
Richard Henderson | 57a2694 | 2017-07-30 13:13:21 -0700 | [diff] [blame] | 505 | } |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 506 | fprintf(logfile, "\n"); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 507 | qemu_log_unlock(logfile); |
Richard Henderson | 57a2694 | 2017-07-30 13:13:21 -0700 | [diff] [blame] | 508 | } |
Richard Henderson | fec88f6 | 2015-08-27 18:17:40 -0700 | [diff] [blame] | 509 | } |
Richard Henderson | fec88f6 | 2015-08-27 18:17:40 -0700 | [diff] [blame] | 510 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 511 | qatomic_set(&tcg_ctx->code_gen_ptr, (void *) |
Richard Henderson | fca8a50 | 2015-09-01 19:11:45 -0700 | [diff] [blame] | 512 | ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, |
Emilio G. Cota | e8feb96 | 2017-07-07 19:24:20 -0400 | [diff] [blame] | 513 | CODE_GEN_ALIGN)); |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 514 | |
Sergey Fedorov | 901bc3d | 2016-03-22 19:00:12 +0300 | [diff] [blame] | 515 | /* init jump list */ |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 516 | qemu_spin_init(&tb->jmp_lock); |
| 517 | tb->jmp_list_head = (uintptr_t)NULL; |
Sergey Fedorov | 901bc3d | 2016-03-22 19:00:12 +0300 | [diff] [blame] | 518 | tb->jmp_list_next[0] = (uintptr_t)NULL; |
| 519 | tb->jmp_list_next[1] = (uintptr_t)NULL; |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 520 | tb->jmp_dest[0] = (uintptr_t)NULL; |
| 521 | tb->jmp_dest[1] = (uintptr_t)NULL; |
Sergey Fedorov | 901bc3d | 2016-03-22 19:00:12 +0300 | [diff] [blame] | 522 | |
Stefan Weil | 696c706 | 2018-07-12 21:44:54 +0200 | [diff] [blame] | 523 | /* init original jump addresses which have been set during tcg_gen_code() */ |
Richard Henderson | 3a50f42 | 2022-11-26 18:20:57 -0800 | [diff] [blame] | 524 | if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { |
Sergey Fedorov | 901bc3d | 2016-03-22 19:00:12 +0300 | [diff] [blame] | 525 | tb_reset_jump(tb, 0); |
| 526 | } |
Richard Henderson | 3a50f42 | 2022-11-26 18:20:57 -0800 | [diff] [blame] | 527 | if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { |
Sergey Fedorov | 901bc3d | 2016-03-22 19:00:12 +0300 | [diff] [blame] | 528 | tb_reset_jump(tb, 1); |
| 529 | } |
| 530 | |
Alex Bennée | 873d64a | 2021-02-13 13:03:20 +0000 | [diff] [blame] | 531 | /* |
Richard Henderson | 50627f1 | 2022-08-10 21:39:29 -0700 | [diff] [blame] | 532 | * If the TB is not associated with a physical RAM page then it must be |
| 533 | * a temporary one-insn TB, and we have nothing left to do. Return early |
| 534 | * before attempting to link to other TBs or add to the lookup table. |
Alex Bennée | 873d64a | 2021-02-13 13:03:20 +0000 | [diff] [blame] | 535 | */ |
Richard Henderson | 28905cf | 2022-09-20 13:21:40 +0200 | [diff] [blame] | 536 | if (tb_page_addr0(tb) == -1) { |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 537 | assert_no_pages_locked(); |
Alex Bennée | 873d64a | 2021-02-13 13:03:20 +0000 | [diff] [blame] | 538 | return tb; |
| 539 | } |
| 540 | |
Liren Wei | f4cba75 | 2021-07-04 22:31:26 +0800 | [diff] [blame] | 541 | /* |
| 542 | * Insert TB into the corresponding region tree before publishing it |
| 543 | * through QHT. Otherwise rewinding happened in the TB might fail to |
| 544 | * lookup itself using host PC. |
| 545 | */ |
| 546 | tcg_tb_insert(tb); |
| 547 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 548 | /* |
| 549 | * No explicit memory barrier is required -- tb_link_page() makes the |
| 550 | * TB visible in a consistent state. |
Sergey Fedorov | 901bc3d | 2016-03-22 19:00:12 +0300 | [diff] [blame] | 551 | */ |
Richard Henderson | deba787 | 2023-07-06 17:55:48 +0100 | [diff] [blame] | 552 | existing_tb = tb_link_page(tb); |
| 553 | assert_no_pages_locked(); |
| 554 | |
Emilio G. Cota | 95590e2 | 2017-08-01 15:40:16 -0400 | [diff] [blame] | 555 | /* if the TB already exists, discard what we just translated */ |
| 556 | if (unlikely(existing_tb != tb)) { |
| 557 | uintptr_t orig_aligned = (uintptr_t)gen_code_buf; |
| 558 | |
| 559 | orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 560 | qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); |
Liren Wei | f4cba75 | 2021-07-04 22:31:26 +0800 | [diff] [blame] | 561 | tcg_tb_remove(tb); |
Emilio G. Cota | 95590e2 | 2017-08-01 15:40:16 -0400 | [diff] [blame] | 562 | return existing_tb; |
| 563 | } |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 564 | return tb; |
| 565 | } |
| 566 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 567 | /* user-mode: call with mmap_lock held */ |
Richard Henderson | ae57db6 | 2019-09-21 20:24:12 -0700 | [diff] [blame] | 568 | void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 569 | { |
| 570 | TranslationBlock *tb; |
| 571 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 572 | assert_memory_lock(); |
| 573 | |
Richard Henderson | ae57db6 | 2019-09-21 20:24:12 -0700 | [diff] [blame] | 574 | tb = tcg_tb_lookup(retaddr); |
Aurelien Jarno | 8d302e7 | 2015-06-13 00:45:59 +0200 | [diff] [blame] | 575 | if (tb) { |
| 576 | /* We can use retranslation to find the PC. */ |
Richard Henderson | cfa29dd | 2022-10-24 23:12:56 +1000 | [diff] [blame] | 577 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
Aurelien Jarno | 8d302e7 | 2015-06-13 00:45:59 +0200 | [diff] [blame] | 578 | tb_phys_invalidate(tb, -1); |
| 579 | } else { |
| 580 | /* The exception probably happened in a helper. The CPU state should |
| 581 | have been saved before calling it. Fetch the PC from there. */ |
Richard Henderson | b77af26 | 2023-09-13 17:22:49 -0700 | [diff] [blame] | 582 | CPUArchState *env = cpu_env(cpu); |
Anton Johansson | bb5de52 | 2023-06-21 15:56:24 +0200 | [diff] [blame] | 583 | vaddr pc; |
| 584 | uint64_t cs_base; |
Aurelien Jarno | 8d302e7 | 2015-06-13 00:45:59 +0200 | [diff] [blame] | 585 | tb_page_addr_t addr; |
Emilio G. Cota | 89fee74 | 2016-04-07 13:19:22 -0400 | [diff] [blame] | 586 | uint32_t flags; |
Aurelien Jarno | 8d302e7 | 2015-06-13 00:45:59 +0200 | [diff] [blame] | 587 | |
| 588 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
| 589 | addr = get_page_addr_code(env, pc); |
Peter Maydell | c360a0f | 2018-08-14 17:17:19 +0100 | [diff] [blame] | 590 | if (addr != -1) { |
Richard Henderson | e506ad6 | 2023-03-06 04:30:11 +0300 | [diff] [blame] | 591 | tb_invalidate_phys_range(addr, addr); |
Peter Maydell | c360a0f | 2018-08-14 17:17:19 +0100 | [diff] [blame] | 592 | } |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 593 | } |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | #ifndef CONFIG_USER_ONLY |
Alex Bennée | cfd405e | 2021-02-13 13:03:22 +0000 | [diff] [blame] | 597 | /* |
| 598 | * In deterministic execution mode, instructions doing device I/Os |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 599 | * must be at the end of the TB. |
| 600 | * |
| 601 | * Called by softmmu_template.h, with iothread mutex not held. |
| 602 | */ |
Andreas Färber | 90b40a6 | 2013-09-01 17:21:47 +0200 | [diff] [blame] | 603 | void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 604 | { |
| 605 | TranslationBlock *tb; |
Richard Henderson | d9bcb58 | 2021-02-13 13:03:13 +0000 | [diff] [blame] | 606 | CPUClass *cc; |
Richard Henderson | 87f963b | 2018-03-19 11:15:45 +0800 | [diff] [blame] | 607 | uint32_t n; |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 608 | |
Emilio G. Cota | be2cdc5 | 2017-07-26 16:58:05 -0400 | [diff] [blame] | 609 | tb = tcg_tb_lookup(retaddr); |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 610 | if (!tb) { |
Andreas Färber | a47dddd | 2013-09-03 17:38:47 +0200 | [diff] [blame] | 611 | cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 612 | (void *)retaddr); |
| 613 | } |
Richard Henderson | cfa29dd | 2022-10-24 23:12:56 +1000 | [diff] [blame] | 614 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
Richard Henderson | 87f963b | 2018-03-19 11:15:45 +0800 | [diff] [blame] | 615 | |
Richard Henderson | d9bcb58 | 2021-02-13 13:03:13 +0000 | [diff] [blame] | 616 | /* |
| 617 | * Some guests must re-execute the branch when re-executing a delay |
| 618 | * slot instruction. When this is the case, adjust icount and N |
| 619 | * to account for the re-execution of the branch. |
| 620 | */ |
Richard Henderson | 87f963b | 2018-03-19 11:15:45 +0800 | [diff] [blame] | 621 | n = 1; |
Richard Henderson | d9bcb58 | 2021-02-13 13:03:13 +0000 | [diff] [blame] | 622 | cc = CPU_GET_CLASS(cpu); |
| 623 | if (cc->tcg_ops->io_recompile_replay_branch && |
| 624 | cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 625 | cpu->neg.icount_decr.u16.low++; |
Richard Henderson | d9bcb58 | 2021-02-13 13:03:13 +0000 | [diff] [blame] | 626 | n = 2; |
| 627 | } |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 628 | |
Alex Bennée | cfd405e | 2021-02-13 13:03:22 +0000 | [diff] [blame] | 629 | /* |
| 630 | * Exit the loop and potentially generate a new TB executing the |
| 631 | * just the I/O insns. We also limit instrumentation to memory |
| 632 | * operations only (which execute after completion) so we don't |
| 633 | * double instrument the instruction. |
| 634 | */ |
Richard Henderson | cf9b579 | 2023-11-10 08:21:23 -0800 | [diff] [blame] | 635 | cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n; |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 636 | |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 637 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { |
Anton Johansson | 256d11f | 2023-06-21 15:56:23 +0200 | [diff] [blame] | 638 | vaddr pc = log_pc(cpu, tb); |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 639 | if (qemu_log_in_addr_range(pc)) { |
Peter Maydell | e60a7d0 | 2023-07-17 11:05:08 +0100 | [diff] [blame] | 640 | qemu_log("cpu_io_recompile: rewound execution of TB to %016" |
Anton Johansson | 256d11f | 2023-06-21 15:56:23 +0200 | [diff] [blame] | 641 | VADDR_PRIx "\n", pc); |
Richard Henderson | fbf59aa | 2022-08-15 15:16:06 -0500 | [diff] [blame] | 642 | } |
| 643 | } |
Peter Maydell | 1d705e8 | 2020-10-13 13:26:58 +0100 | [diff] [blame] | 644 | |
Peter Maydell | 6886b98 | 2016-05-17 15:18:04 +0100 | [diff] [blame] | 645 | cpu_loop_exit_noexc(cpu); |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 646 | } |
| 647 | |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 648 | #else /* CONFIG_USER_ONLY */ |
| 649 | |
Andreas Färber | c3affe5 | 2013-01-18 15:03:43 +0100 | [diff] [blame] | 650 | void cpu_interrupt(CPUState *cpu, int mask) |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 651 | { |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 652 | g_assert(qemu_mutex_iothread_locked()); |
Andreas Färber | 259186a | 2013-01-17 18:51:17 +0100 | [diff] [blame] | 653 | cpu->interrupt_request |= mask; |
Richard Henderson | a953b5f | 2023-09-13 15:46:45 -0700 | [diff] [blame] | 654 | qatomic_set(&cpu->neg.icount_decr.u16.high, -1); |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 655 | } |
| 656 | |
Blue Swirl | 5b6dd86 | 2012-12-02 16:04:43 +0000 | [diff] [blame] | 657 | #endif /* CONFIG_USER_ONLY */ |
Thomas Huth | 2cd5394 | 2017-06-26 07:22:55 +0200 | [diff] [blame] | 658 | |
Richard Henderson | a976a99 | 2022-08-15 15:13:05 -0500 | [diff] [blame] | 659 | /* |
| 660 | * Called by generic code at e.g. cpu reset after cpu creation, |
| 661 | * therefore we must be prepared to allocate the jump cache. |
| 662 | */ |
| 663 | void tcg_flush_jmp_cache(CPUState *cpu) |
| 664 | { |
| 665 | CPUJumpCache *jc = cpu->tb_jmp_cache; |
| 666 | |
Richard Henderson | 4e4fa6c | 2022-10-31 13:26:36 +1100 | [diff] [blame] | 667 | /* During early initialization, the cache may not yet be allocated. */ |
| 668 | if (unlikely(jc == NULL)) { |
| 669 | return; |
| 670 | } |
| 671 | |
| 672 | for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) { |
| 673 | qatomic_set(&jc->array[i].tb, NULL); |
Richard Henderson | a976a99 | 2022-08-15 15:13:05 -0500 | [diff] [blame] | 674 | } |
| 675 | } |