blob: 79a88f5fb7577a2b2f04b668911660b40f85e38e [file] [log] [blame]
bellardd19893d2003-06-15 19:58:51 +00001/*
2 * Host code generation
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardd19893d2003-06-15 19:58:51 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
Thomas Huthfb0343d2019-01-23 15:08:56 +01009 * version 2.1 of the License, or (at your option) any later version.
bellardd19893d2003-06-15 19:58:51 +000010 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellardd19893d2003-06-15 19:58:51 +000018 */
Markus Armbruster14a48c12019-05-23 16:35:05 +020019
Peter Maydell7b31bbc2016-01-26 18:16:56 +000020#include "qemu/osdep.h"
bellardd19893d2003-06-15 19:58:51 +000021
Yang Zhong244f1442017-06-02 14:06:45 +080022#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020023#include "disas/disas.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010024#include "exec/exec-all.h"
Philippe Mathieu-Daudédcb32f12020-01-01 12:23:00 +010025#include "tcg/tcg.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000026#if defined(CONFIG_USER_ONLY)
27#include "qemu.h"
28#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
29#include <sys/param.h>
30#if __FreeBSD_version >= 700104
31#define HAVE_KINFO_GETVMMAP
32#define sigqueue sigqueue_freebsd /* avoid redefinition */
Blue Swirl5b6dd862012-12-02 16:04:43 +000033#include <sys/proc.h>
34#include <machine/profile.h>
35#define _KERNEL
36#include <sys/user.h>
37#undef _KERNEL
38#undef sigqueue
39#include <libutil.h>
40#endif
41#endif
Paolo Bonzini0bc3cd62013-04-08 17:29:59 +020042#else
Paolo Bonzini8bca9a02018-05-30 11:58:36 +020043#include "exec/ram_addr.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000044#endif
45
Paolo Bonzini022c62c2012-12-17 18:19:49 +010046#include "exec/cputlb.h"
Paolo Bonzini3b9bd3f2020-12-16 13:27:58 +010047#include "exec/translate-all.h"
Richard Henderson306c8722022-08-11 13:48:03 -070048#include "exec/translator.h"
Alex Bennée548c9602023-03-02 18:57:43 -080049#include "exec/tb-flush.h"
Emilio G. Cota510a6472015-04-22 17:50:52 -040050#include "qemu/bitmap.h"
Markus Armbruster3de2faa2019-04-17 21:17:52 +020051#include "qemu/qemu-print.h"
Jan Kiszka8d04fb52017-02-23 18:29:11 +000052#include "qemu/main-loop.h"
Peter Maydellad768e62022-02-08 20:08:55 +000053#include "qemu/cacheinfo.h"
Richard W.M. Jones533206f2023-03-03 08:49:48 +000054#include "qemu/timer.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030055#include "exec/log.h"
Paolo Bonzinid2528bd2017-03-03 12:01:16 +010056#include "sysemu/cpus.h"
Claudio Fontana740b1752020-08-19 13:17:19 +020057#include "sysemu/cpu-timers.h"
Markus Armbruster14a48c12019-05-23 16:35:05 +020058#include "sysemu/tcg.h"
Richard Henderson6bc14422020-10-28 20:14:54 -070059#include "qapi/error.h"
Richard Hendersond9bcb582021-02-13 13:03:13 +000060#include "hw/core/tcg-cpu-ops.h"
Richard Hendersona976a992022-08-15 15:13:05 -050061#include "tb-jmp-cache.h"
Philippe Mathieu-Daudée5ceadf2021-05-24 19:04:53 +020062#include "tb-hash.h"
63#include "tb-context.h"
Philippe Mathieu-Daudé59346602023-09-14 20:57:15 +020064#include "internal-common.h"
Philippe Mathieu-Daudé4c268d62023-09-14 20:57:14 +020065#include "internal-target.h"
Ilya Leoshkevich5584e2d2023-01-12 16:20:13 +010066#include "perf.h"
Richard Henderson747bd692023-03-31 21:30:31 -070067#include "tcg/insn-start-words.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000068
Emilio G. Cota44ded3d2017-06-23 20:04:43 -040069TBContext tb_ctx;
bellardd19893d2003-06-15 19:58:51 +000070
Richard Hendersonc9ad8d22023-03-08 12:24:41 -080071/*
72 * Encode VAL as a signed leb128 sequence at P.
73 * Return P incremented past the encoded value.
74 */
75static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
Richard Hendersonfca8a502015-09-01 19:11:45 -070076{
77 int more, byte;
78
79 do {
80 byte = val & 0x7f;
81 val >>= 7;
82 more = !((val == 0 && (byte & 0x40) == 0)
83 || (val == -1 && (byte & 0x40) != 0));
84 if (more) {
85 byte |= 0x80;
86 }
87 *p++ = byte;
88 } while (more);
89
90 return p;
91}
92
Richard Hendersonc9ad8d22023-03-08 12:24:41 -080093/*
94 * Decode a signed leb128 sequence at *PP; increment *PP past the
95 * decoded value. Return the decoded value.
96 */
97static int64_t decode_sleb128(const uint8_t **pp)
Richard Hendersonfca8a502015-09-01 19:11:45 -070098{
Richard Hendersondb0c51a2020-10-28 12:05:44 -070099 const uint8_t *p = *pp;
Richard Hendersonc9ad8d22023-03-08 12:24:41 -0800100 int64_t val = 0;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700101 int byte, shift = 0;
102
103 do {
104 byte = *p++;
Richard Hendersonc9ad8d22023-03-08 12:24:41 -0800105 val |= (int64_t)(byte & 0x7f) << shift;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700106 shift += 7;
107 } while (byte & 0x80);
108 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
Richard Hendersonc9ad8d22023-03-08 12:24:41 -0800109 val |= -(int64_t)1 << shift;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700110 }
111
112 *pp = p;
113 return val;
114}
115
116/* Encode the data collected about the instructions while compiling TB.
117 Place the data at BLOCK, and return the number of bytes consumed.
118
Emilio G. Cota55bbc862017-10-18 18:01:42 -0400119 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
Richard Hendersonfca8a502015-09-01 19:11:45 -0700120 which come from the target's insn_start data, followed by a uintptr_t
121 which comes from the host pc of the end of the code implementing the insn.
122
123 Each line of the table is encoded as sleb128 deltas from the previous
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400124 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
Richard Hendersonfca8a502015-09-01 19:11:45 -0700125 That is, the first column is seeded with the guest pc, the last column
126 with the host pc, and the middle columns with zeros. */
127
128static int encode_search(TranslationBlock *tb, uint8_t *block)
129{
Emilio G. Cotab1311c42017-07-12 17:15:52 -0400130 uint8_t *highwater = tcg_ctx->code_gen_highwater;
Richard Henderson747bd692023-03-31 21:30:31 -0700131 uint64_t *insn_data = tcg_ctx->gen_insn_data;
132 uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700133 uint8_t *p = block;
134 int i, j, n;
135
Richard Hendersonfca8a502015-09-01 19:11:45 -0700136 for (i = 0, n = tb->icount; i < n; ++i) {
Richard Henderson747bd692023-03-31 21:30:31 -0700137 uint64_t prev, curr;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700138
139 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
140 if (i == 0) {
Anton Johansson279513c2023-02-27 14:51:47 +0100141 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
Richard Hendersonfca8a502015-09-01 19:11:45 -0700142 } else {
Richard Henderson747bd692023-03-31 21:30:31 -0700143 prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
Richard Hendersonfca8a502015-09-01 19:11:45 -0700144 }
Richard Henderson747bd692023-03-31 21:30:31 -0700145 curr = insn_data[i * TARGET_INSN_START_WORDS + j];
146 p = encode_sleb128(p, curr - prev);
Richard Hendersonfca8a502015-09-01 19:11:45 -0700147 }
Richard Henderson747bd692023-03-31 21:30:31 -0700148 prev = (i == 0 ? 0 : insn_end_off[i - 1]);
149 curr = insn_end_off[i];
150 p = encode_sleb128(p, curr - prev);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700151
152 /* Test for (pending) buffer overflow. The assumption is that any
153 one row beginning below the high water mark cannot overrun
154 the buffer completely. Thus we can test for overflow after
155 encoding a row without having to check during encoding. */
156 if (unlikely(p > highwater)) {
157 return -1;
158 }
Richard Hendersonfca8a502015-09-01 19:11:45 -0700159 }
160
161 return p - block;
162}
163
Richard Henderson6392bd62022-10-24 22:15:04 +1000164static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
165 uint64_t *data)
bellardd19893d2003-06-15 19:58:51 +0000166{
Richard Henderson6392bd62022-10-24 22:15:04 +1000167 uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
Richard Hendersondb0c51a2020-10-28 12:05:44 -0700168 const uint8_t *p = tb->tc.ptr + tb->tc.size;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700169 int i, j, num_insns = tb->icount;
bellard57fec1f2008-02-01 10:50:11 +0000170
Richard Henderson6392bd62022-10-24 22:15:04 +1000171 host_pc -= GETPC_ADJ;
Richard Henderson01ecaf42016-07-26 06:09:16 +0530172
Richard Henderson6392bd62022-10-24 22:15:04 +1000173 if (host_pc < iter_pc) {
Richard Hendersonfca8a502015-09-01 19:11:45 -0700174 return -1;
175 }
bellardd19893d2003-06-15 19:58:51 +0000176
Richard Henderson6392bd62022-10-24 22:15:04 +1000177 memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
Anton Johansson4be79022023-02-27 14:51:39 +0100178 if (!(tb_cflags(tb) & CF_PCREL)) {
Anton Johansson279513c2023-02-27 14:51:47 +0100179 data[0] = tb->pc;
Richard Henderson8ed558e2022-08-12 09:53:53 -0700180 }
181
Richard Henderson6392bd62022-10-24 22:15:04 +1000182 /*
183 * Reconstruct the stored insn data while looking for the point
184 * at which the end of the insn exceeds host_pc.
185 */
Richard Hendersonfca8a502015-09-01 19:11:45 -0700186 for (i = 0; i < num_insns; ++i) {
187 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
188 data[j] += decode_sleb128(&p);
189 }
Richard Henderson6392bd62022-10-24 22:15:04 +1000190 iter_pc += decode_sleb128(&p);
191 if (iter_pc > host_pc) {
192 return num_insns - i;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700193 }
194 }
195 return -1;
Richard Henderson6392bd62022-10-24 22:15:04 +1000196}
ths3b46e622007-09-17 08:09:54 +0000197
Richard Henderson6392bd62022-10-24 22:15:04 +1000198/*
Richard Hendersoncfa29dd2022-10-24 23:12:56 +1000199 * The cpu state corresponding to 'host_pc' is restored in
200 * preparation for exiting the TB.
Richard Henderson6392bd62022-10-24 22:15:04 +1000201 */
202void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
Richard Hendersoncfa29dd2022-10-24 23:12:56 +1000203 uintptr_t host_pc)
Richard Henderson6392bd62022-10-24 22:15:04 +1000204{
205 uint64_t data[TARGET_INSN_START_WORDS];
Richard Henderson6392bd62022-10-24 22:15:04 +1000206 int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
207
208 if (insns_left < 0) {
209 return;
210 }
211
Richard Hendersoncfa29dd2022-10-24 23:12:56 +1000212 if (tb_cflags(tb) & CF_USE_ICOUNT) {
Claudio Fontana740b1752020-08-19 13:17:19 +0200213 assert(icount_enabled());
Richard Henderson6392bd62022-10-24 22:15:04 +1000214 /*
215 * Reset the cycle counter to the start of the block and
216 * shift if to the number of actually executed instructions.
217 */
Richard Hendersona953b5f2023-09-13 15:46:45 -0700218 cpu->neg.icount_decr.u16.low += insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000219 }
Richard Hendersond2925682022-10-24 19:43:40 +1000220
Richard Henderson04f10572022-10-24 21:17:39 +1000221 cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
bellardd19893d2003-06-15 19:58:51 +0000222}
Blue Swirl5b6dd862012-12-02 16:04:43 +0000223
Richard Henderson3d419a42022-10-24 23:09:57 +1000224bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
Blue Swirla8a826a2012-12-04 20:16:07 +0000225{
Richard Henderson4846cd32020-10-30 18:59:09 -0700226 /*
Richard Hendersondb0c51a2020-10-28 12:05:44 -0700227 * The host_pc has to be in the rx region of the code buffer.
Richard Henderson4846cd32020-10-30 18:59:09 -0700228 * If it is not we will not be able to resolve it here.
229 * The two cases where host_pc will not be correct are:
Alex Bennéed25f2a72017-11-13 13:55:27 +0000230 *
231 * - fault during translation (instruction fetch)
232 * - fault from helper (not using GETPC() macro)
233 *
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400234 * Either way we need return early as we can't resolve it here.
Alex Bennéed8b22392017-03-02 10:31:32 +0000235 */
Richard Hendersondb0c51a2020-10-28 12:05:44 -0700236 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
Richard Henderson4846cd32020-10-30 18:59:09 -0700237 TranslationBlock *tb = tcg_tb_lookup(host_pc);
Alex Bennéed25f2a72017-11-13 13:55:27 +0000238 if (tb) {
Richard Hendersoncfa29dd2022-10-24 23:12:56 +1000239 cpu_restore_state_from_tb(cpu, tb, host_pc);
Richard Henderson4846cd32020-10-30 18:59:09 -0700240 return true;
Pavel Dovgalyukd8a499f2014-11-26 13:40:16 +0300241 }
Blue Swirla8a826a2012-12-04 20:16:07 +0000242 }
Richard Henderson4846cd32020-10-30 18:59:09 -0700243 return false;
Blue Swirla8a826a2012-12-04 20:16:07 +0000244}
245
Richard Henderson6392bd62022-10-24 22:15:04 +1000246bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
247{
248 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
249 TranslationBlock *tb = tcg_tb_lookup(host_pc);
250 if (tb) {
251 return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
252 }
253 }
254 return false;
255}
256
Richard Hendersonfa79cde2021-03-09 17:42:16 -0600257void page_init(void)
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -0700258{
259 page_size_init();
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100260 page_table_config_init();
Blue Swirl5b6dd862012-12-02 16:04:43 +0000261}
262
Richard Henderson344b63b2022-11-06 11:12:33 +1100263/*
264 * Isolate the portion of code gen which can setjmp/longjmp.
265 * Return the size of the generated code, or negative on error.
266 */
267static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
Anton Johansson256d11f2023-06-21 15:56:23 +0200268 vaddr pc, void *host_pc,
Richard Henderson344b63b2022-11-06 11:12:33 +1100269 int *max_insns, int64_t *ti)
270{
271 int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
272 if (unlikely(ret != 0)) {
273 return ret;
274 }
275
276 tcg_func_start(tcg_ctx);
277
278 tcg_ctx->cpu = env_cpu(env);
Richard Henderson597f9b22023-01-28 15:19:22 -1000279 gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
Richard Henderson344b63b2022-11-06 11:12:33 +1100280 assert(tb->size != 0);
281 tcg_ctx->cpu = NULL;
282 *max_insns = tb->icount;
283
Richard Henderson344b63b2022-11-06 11:12:33 +1100284 return tcg_gen_code(tcg_ctx, tb, pc);
285}
286
Paolo Bonzini75692082015-08-11 10:59:50 +0200287/* Called with mmap_lock held for user mode emulation. */
Andreas Färber648f0342013-09-01 17:43:17 +0200288TranslationBlock *tb_gen_code(CPUState *cpu,
Anton Johansson256d11f2023-06-21 15:56:23 +0200289 vaddr pc, uint64_t cs_base,
Emilio G. Cota89fee742016-04-07 13:19:22 -0400290 uint32_t flags, int cflags)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000291{
Richard Hendersonb77af262023-09-13 17:22:49 -0700292 CPUArchState *env = cpu_env(cpu);
Emilio G. Cota95590e22017-08-01 15:40:16 -0400293 TranslationBlock *tb, *existing_tb;
Richard Hendersondeba7872023-07-06 17:55:48 +0100294 tb_page_addr_t phys_pc, phys_p2;
Richard Hendersonfec88f62015-08-27 18:17:40 -0700295 tcg_insn_unit *gen_code_buf;
Richard Henderson8b86d6d2019-04-15 20:54:54 -1000296 int gen_code_size, search_size, max_insns;
Richard Henderson344b63b2022-11-06 11:12:33 +1100297 int64_t ti;
Richard Henderson306c8722022-08-11 13:48:03 -0700298 void *host_pc;
Richard Hendersonfe9b6762019-10-23 12:20:47 -0400299
Alex Bennéee505a062016-10-27 16:10:05 +0100300 assert_memory_lock();
Roman Bolshakov653b87e2021-01-13 06:28:07 +0300301 qemu_thread_jit_write();
Blue Swirl5b6dd862012-12-02 16:04:43 +0000302
Richard Henderson306c8722022-08-11 13:48:03 -0700303 phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700304
Peter Maydell9739e372018-08-14 17:17:19 +0100305 if (phys_pc == -1) {
Alex Bennée873d64a2021-02-13 13:03:20 +0000306 /* Generate a one-shot TB with 1 insn in it */
Richard Hendersoncf9b5792023-11-10 08:21:23 -0800307 cflags = (cflags & ~CF_COUNT_MASK) | 1;
Peter Maydell9739e372018-08-14 17:17:19 +0100308 }
309
Richard Henderson8b86d6d2019-04-15 20:54:54 -1000310 max_insns = cflags & CF_COUNT_MASK;
311 if (max_insns == 0) {
Richard Henderson8b86d6d2019-04-15 20:54:54 -1000312 max_insns = TCG_MAX_INSNS;
313 }
Richard Henderson78ff82b2021-07-17 15:18:39 -0700314 QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
315
Emilio G. Cotae8feb962017-07-07 19:24:20 -0400316 buffer_overflow:
Richard Hendersondeba7872023-07-06 17:55:48 +0100317 assert_no_pages_locked();
Richard Hendersonfe9b6762019-10-23 12:20:47 -0400318 tb = tcg_tb_alloc(tcg_ctx);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700319 if (unlikely(!tb)) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000320 /* flush must be done */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700321 tb_flush(cpu);
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100322 mmap_unlock();
Pavel Dovgalyuk8499c8f2017-01-26 15:34:18 +0300323 /* Make the execution loop process the flush as soon as possible. */
324 cpu->exception_index = EXCP_INTERRUPT;
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100325 cpu_loop_exit(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000326 }
Richard Hendersonfec88f62015-08-27 18:17:40 -0700327
Emilio G. Cotab1311c42017-07-12 17:15:52 -0400328 gen_code_buf = tcg_ctx->code_gen_ptr;
Richard Hendersondb0c51a2020-10-28 12:05:44 -0700329 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
Anton Johansson4be79022023-02-27 14:51:39 +0100330 if (!(cflags & CF_PCREL)) {
331 tb->pc = pc;
332 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000333 tb->cs_base = cs_base;
334 tb->flags = flags;
335 tb->cflags = cflags;
Richard Henderson28905cf2022-09-20 13:21:40 +0200336 tb_set_page_addr0(tb, phys_pc);
337 tb_set_page_addr1(tb, -1);
Richard Hendersondeba7872023-07-06 17:55:48 +0100338 if (phys_pc != -1) {
339 tb_lock_page0(phys_pc);
340 }
341
Richard Hendersonb7e4afb2022-11-26 18:39:55 -0800342 tcg_ctx->gen_tb = tb;
Richard Hendersonff0c61b2023-04-28 09:16:01 +0100343 tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
Richard Hendersonaece72b2023-03-23 21:06:22 -0700344#ifdef CONFIG_SOFTMMU
345 tcg_ctx->page_bits = TARGET_PAGE_BITS;
346 tcg_ctx->page_mask = TARGET_PAGE_MASK;
Richard Hendersona66efde2023-04-02 10:07:57 -0700347 tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
Richard Hendersonaece72b2023-03-23 21:06:22 -0700348#endif
Richard Henderson747bd692023-03-31 21:30:31 -0700349 tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
Richard Henderson28ea5682023-03-31 22:56:55 -0700350#ifdef TCG_GUEST_DEFAULT_MO
351 tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
352#else
353 tcg_ctx->guest_mo = TCG_MO_ALL;
354#endif
Richard Henderson4baf3972023-03-09 17:46:16 -0800355
Richard Hendersondeba7872023-07-06 17:55:48 +0100356 restart_translate:
Richard Hendersonfbf59aa2022-08-15 15:16:06 -0500357 trace_translate_block(tb, pc, tb->tc.ptr);
Richard Hendersonfec88f62015-08-27 18:17:40 -0700358
Richard Henderson344b63b2022-11-06 11:12:33 +1100359 gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700360 if (unlikely(gen_code_size < 0)) {
Richard Henderson6e6c4ef2019-04-15 22:06:39 -1000361 switch (gen_code_size) {
362 case -1:
363 /*
364 * Overflow of code_gen_buffer, or the current slice of it.
365 *
366 * TODO: We don't need to re-do gen_intermediate_code, nor
367 * should we re-do the tcg optimization currently hidden
368 * inside tcg_gen_code. All that should be required is to
369 * flush the TBs, allocate a new TB, re-initialize it per
370 * above, and re-do the actual code generation.
371 */
Richard Hendersonae30e862021-01-23 12:11:17 -1000372 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
373 "Restarting code generation for "
374 "code_gen_buffer overflow\n");
Richard Hendersondeba7872023-07-06 17:55:48 +0100375 tb_unlock_pages(tb);
Richard Hendersonad178682023-07-26 12:58:08 -0700376 tcg_ctx->gen_tb = NULL;
Richard Henderson6e6c4ef2019-04-15 22:06:39 -1000377 goto buffer_overflow;
378
379 case -2:
380 /*
381 * The code generated for the TranslationBlock is too large.
382 * The maximum size allowed by the unwind info is 64k.
383 * There may be stricter constraints from relocations
384 * in the tcg backend.
385 *
386 * Try again with half as many insns as we attempted this time.
387 * If a single insn overflows, there's a bug somewhere...
388 */
Richard Henderson6e6c4ef2019-04-15 22:06:39 -1000389 assert(max_insns > 1);
390 max_insns /= 2;
Richard Hendersonae30e862021-01-23 12:11:17 -1000391 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
392 "Restarting code generation with "
393 "smaller translation block (max %d insns)\n",
394 max_insns);
Richard Hendersondeba7872023-07-06 17:55:48 +0100395
396 /*
397 * The half-sized TB may not cross pages.
398 * TODO: Fix all targets that cross pages except with
399 * the first insn, at which point this can't be reached.
400 */
401 phys_p2 = tb_page_addr1(tb);
402 if (unlikely(phys_p2 != -1)) {
403 tb_unlock_page1(phys_pc, phys_p2);
404 tb_set_page_addr1(tb, -1);
405 }
406 goto restart_translate;
407
408 case -3:
409 /*
410 * We had a page lock ordering problem. In order to avoid
411 * deadlock we had to drop the lock on page0, which means
412 * that everything we translated so far is compromised.
413 * Restart with locks held on both pages.
414 */
415 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
416 "Restarting code generation with re-locked pages");
417 goto restart_translate;
Richard Henderson6e6c4ef2019-04-15 22:06:39 -1000418
419 default:
420 g_assert_not_reached();
421 }
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700422 }
Richard Hendersondeba7872023-07-06 17:55:48 +0100423 tcg_ctx->gen_tb = NULL;
424
Richard Hendersonfca8a502015-09-01 19:11:45 -0700425 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700426 if (unlikely(search_size < 0)) {
Richard Hendersondeba7872023-07-06 17:55:48 +0100427 tb_unlock_pages(tb);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700428 goto buffer_overflow;
429 }
Emilio G. Cota2ac01d62017-06-23 19:00:11 -0400430 tb->tc.size = gen_code_size;
Richard Hendersonfec88f62015-08-27 18:17:40 -0700431
Ilya Leoshkevich5584e2d2023-01-12 16:20:13 +0100432 /*
Anton Johansson4be79022023-02-27 14:51:39 +0100433 * For CF_PCREL, attribute all executions of the generated code
434 * to its first mapping.
Ilya Leoshkevich5584e2d2023-01-12 16:20:13 +0100435 */
436 perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
437
Alex Bennéed977e1c2016-03-15 14:30:21 +0000438 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
Richard Hendersonfbf59aa2022-08-15 15:16:06 -0500439 qemu_log_in_addr_range(pc)) {
Richard Hendersonc60f5992022-04-17 11:29:47 -0700440 FILE *logfile = qemu_log_trylock();
Richard Henderson78b54852022-04-17 11:29:49 -0700441 if (logfile) {
442 int code_size, data_size;
443 const tcg_target_ulong *rx_data_gen_ptr;
444 size_t chunk_start;
445 int insn = 0;
Richard Henderson4c389f62020-09-10 12:15:04 -0700446
Richard Henderson78b54852022-04-17 11:29:49 -0700447 if (tcg_ctx->data_gen_ptr) {
448 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
449 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
450 data_size = gen_code_size - code_size;
451 } else {
452 rx_data_gen_ptr = 0;
453 code_size = gen_code_size;
454 data_size = 0;
Alex Bennée5f0df032020-05-13 18:51:34 +0100455 }
Alex Bennée5f0df032020-05-13 18:51:34 +0100456
Richard Henderson78b54852022-04-17 11:29:49 -0700457 /* Dump header and the first instruction */
458 fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
459 fprintf(logfile,
Richard Hendersonc9ad8d22023-03-08 12:24:41 -0800460 " -- guest addr 0x%016" PRIx64 " + tb prologue\n",
Richard Henderson747bd692023-03-31 21:30:31 -0700461 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
Richard Henderson78b54852022-04-17 11:29:49 -0700462 chunk_start = tcg_ctx->gen_insn_end_off[insn];
463 disas(logfile, tb->tc.ptr, chunk_start);
Richard Henderson4c389f62020-09-10 12:15:04 -0700464
Richard Henderson78b54852022-04-17 11:29:49 -0700465 /*
466 * Dump each instruction chunk, wrapping up empty chunks into
467 * the next instruction. The whole array is offset so the
468 * first entry is the beginning of the 2nd instruction.
469 */
470 while (insn < tb->icount) {
471 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
472 if (chunk_end > chunk_start) {
Richard Hendersonc9ad8d22023-03-08 12:24:41 -0800473 fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
Richard Henderson747bd692023-03-31 21:30:31 -0700474 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
Richard Henderson78b54852022-04-17 11:29:49 -0700475 disas(logfile, tb->tc.ptr + chunk_start,
476 chunk_end - chunk_start);
477 chunk_start = chunk_end;
478 }
479 insn++;
480 }
481
482 if (chunk_start < code_size) {
483 fprintf(logfile, " -- tb slow paths + alignment\n");
484 disas(logfile, tb->tc.ptr + chunk_start,
485 code_size - chunk_start);
486 }
487
488 /* Finally dump any data we may have after the block */
489 if (data_size) {
490 int i;
491 fprintf(logfile, " data: [size=%d]\n", data_size);
492 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
493 if (sizeof(tcg_target_ulong) == 8) {
494 fprintf(logfile,
495 "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
496 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
497 } else if (sizeof(tcg_target_ulong) == 4) {
498 fprintf(logfile,
499 "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
500 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
501 } else {
502 qemu_build_not_reached();
503 }
Philippe Mathieu-Daudé6c6a4a72021-05-15 12:42:02 +0200504 }
Richard Henderson57a26942017-07-30 13:13:21 -0700505 }
Richard Henderson78b54852022-04-17 11:29:49 -0700506 fprintf(logfile, "\n");
Richard Henderson78b54852022-04-17 11:29:49 -0700507 qemu_log_unlock(logfile);
Richard Henderson57a26942017-07-30 13:13:21 -0700508 }
Richard Hendersonfec88f62015-08-27 18:17:40 -0700509 }
Richard Hendersonfec88f62015-08-27 18:17:40 -0700510
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100511 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
Richard Hendersonfca8a502015-09-01 19:11:45 -0700512 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
Emilio G. Cotae8feb962017-07-07 19:24:20 -0400513 CODE_GEN_ALIGN));
Blue Swirl5b6dd862012-12-02 16:04:43 +0000514
Sergey Fedorov901bc3d2016-03-22 19:00:12 +0300515 /* init jump list */
Emilio G. Cota194125e2017-08-02 20:34:06 -0400516 qemu_spin_init(&tb->jmp_lock);
517 tb->jmp_list_head = (uintptr_t)NULL;
Sergey Fedorov901bc3d2016-03-22 19:00:12 +0300518 tb->jmp_list_next[0] = (uintptr_t)NULL;
519 tb->jmp_list_next[1] = (uintptr_t)NULL;
Emilio G. Cota194125e2017-08-02 20:34:06 -0400520 tb->jmp_dest[0] = (uintptr_t)NULL;
521 tb->jmp_dest[1] = (uintptr_t)NULL;
Sergey Fedorov901bc3d2016-03-22 19:00:12 +0300522
Stefan Weil696c7062018-07-12 21:44:54 +0200523 /* init original jump addresses which have been set during tcg_gen_code() */
Richard Henderson3a50f422022-11-26 18:20:57 -0800524 if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
Sergey Fedorov901bc3d2016-03-22 19:00:12 +0300525 tb_reset_jump(tb, 0);
526 }
Richard Henderson3a50f422022-11-26 18:20:57 -0800527 if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
Sergey Fedorov901bc3d2016-03-22 19:00:12 +0300528 tb_reset_jump(tb, 1);
529 }
530
Alex Bennée873d64a2021-02-13 13:03:20 +0000531 /*
Richard Henderson50627f12022-08-10 21:39:29 -0700532 * If the TB is not associated with a physical RAM page then it must be
533 * a temporary one-insn TB, and we have nothing left to do. Return early
534 * before attempting to link to other TBs or add to the lookup table.
Alex Bennée873d64a2021-02-13 13:03:20 +0000535 */
Richard Henderson28905cf2022-09-20 13:21:40 +0200536 if (tb_page_addr0(tb) == -1) {
Richard Hendersondeba7872023-07-06 17:55:48 +0100537 assert_no_pages_locked();
Alex Bennée873d64a2021-02-13 13:03:20 +0000538 return tb;
539 }
540
Liren Weif4cba752021-07-04 22:31:26 +0800541 /*
542 * Insert TB into the corresponding region tree before publishing it
543 * through QHT. Otherwise rewinding happened in the TB might fail to
544 * lookup itself using host PC.
545 */
546 tcg_tb_insert(tb);
547
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400548 /*
549 * No explicit memory barrier is required -- tb_link_page() makes the
550 * TB visible in a consistent state.
Sergey Fedorov901bc3d2016-03-22 19:00:12 +0300551 */
Richard Hendersondeba7872023-07-06 17:55:48 +0100552 existing_tb = tb_link_page(tb);
553 assert_no_pages_locked();
554
Emilio G. Cota95590e22017-08-01 15:40:16 -0400555 /* if the TB already exists, discard what we just translated */
556 if (unlikely(existing_tb != tb)) {
557 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
558
559 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100560 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
Liren Weif4cba752021-07-04 22:31:26 +0800561 tcg_tb_remove(tb);
Emilio G. Cota95590e22017-08-01 15:40:16 -0400562 return existing_tb;
563 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000564 return tb;
565}
566
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400567/* user-mode: call with mmap_lock held */
Richard Hendersonae57db62019-09-21 20:24:12 -0700568void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000569{
570 TranslationBlock *tb;
571
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400572 assert_memory_lock();
573
Richard Hendersonae57db62019-09-21 20:24:12 -0700574 tb = tcg_tb_lookup(retaddr);
Aurelien Jarno8d302e72015-06-13 00:45:59 +0200575 if (tb) {
576 /* We can use retranslation to find the PC. */
Richard Hendersoncfa29dd2022-10-24 23:12:56 +1000577 cpu_restore_state_from_tb(cpu, tb, retaddr);
Aurelien Jarno8d302e72015-06-13 00:45:59 +0200578 tb_phys_invalidate(tb, -1);
579 } else {
580 /* The exception probably happened in a helper. The CPU state should
581 have been saved before calling it. Fetch the PC from there. */
Richard Hendersonb77af262023-09-13 17:22:49 -0700582 CPUArchState *env = cpu_env(cpu);
Anton Johanssonbb5de522023-06-21 15:56:24 +0200583 vaddr pc;
584 uint64_t cs_base;
Aurelien Jarno8d302e72015-06-13 00:45:59 +0200585 tb_page_addr_t addr;
Emilio G. Cota89fee742016-04-07 13:19:22 -0400586 uint32_t flags;
Aurelien Jarno8d302e72015-06-13 00:45:59 +0200587
588 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
589 addr = get_page_addr_code(env, pc);
Peter Maydellc360a0f2018-08-14 17:17:19 +0100590 if (addr != -1) {
Richard Hendersone506ad62023-03-06 04:30:11 +0300591 tb_invalidate_phys_range(addr, addr);
Peter Maydellc360a0f2018-08-14 17:17:19 +0100592 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000593 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000594}
595
596#ifndef CONFIG_USER_ONLY
Alex Bennéecfd405e2021-02-13 13:03:22 +0000597/*
598 * In deterministic execution mode, instructions doing device I/Os
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000599 * must be at the end of the TB.
600 *
601 * Called by softmmu_template.h, with iothread mutex not held.
602 */
Andreas Färber90b40a62013-09-01 17:21:47 +0200603void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000604{
605 TranslationBlock *tb;
Richard Hendersond9bcb582021-02-13 13:03:13 +0000606 CPUClass *cc;
Richard Henderson87f963b2018-03-19 11:15:45 +0800607 uint32_t n;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000608
Emilio G. Cotabe2cdc52017-07-26 16:58:05 -0400609 tb = tcg_tb_lookup(retaddr);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000610 if (!tb) {
Andreas Färbera47dddd2013-09-03 17:38:47 +0200611 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl5b6dd862012-12-02 16:04:43 +0000612 (void *)retaddr);
613 }
Richard Hendersoncfa29dd2022-10-24 23:12:56 +1000614 cpu_restore_state_from_tb(cpu, tb, retaddr);
Richard Henderson87f963b2018-03-19 11:15:45 +0800615
Richard Hendersond9bcb582021-02-13 13:03:13 +0000616 /*
617 * Some guests must re-execute the branch when re-executing a delay
618 * slot instruction. When this is the case, adjust icount and N
619 * to account for the re-execution of the branch.
620 */
Richard Henderson87f963b2018-03-19 11:15:45 +0800621 n = 1;
Richard Hendersond9bcb582021-02-13 13:03:13 +0000622 cc = CPU_GET_CLASS(cpu);
623 if (cc->tcg_ops->io_recompile_replay_branch &&
624 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
Richard Hendersona953b5f2023-09-13 15:46:45 -0700625 cpu->neg.icount_decr.u16.low++;
Richard Hendersond9bcb582021-02-13 13:03:13 +0000626 n = 2;
627 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000628
Alex Bennéecfd405e2021-02-13 13:03:22 +0000629 /*
630 * Exit the loop and potentially generate a new TB executing the
631 * just the I/O insns. We also limit instrumentation to memory
632 * operations only (which execute after completion) so we don't
633 * double instrument the instruction.
634 */
Richard Hendersoncf9b5792023-11-10 08:21:23 -0800635 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
Richard Henderson9b990ee2017-10-13 10:50:02 -0700636
Richard Hendersonfbf59aa2022-08-15 15:16:06 -0500637 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
Anton Johansson256d11f2023-06-21 15:56:23 +0200638 vaddr pc = log_pc(cpu, tb);
Richard Hendersonfbf59aa2022-08-15 15:16:06 -0500639 if (qemu_log_in_addr_range(pc)) {
Peter Maydelle60a7d02023-07-17 11:05:08 +0100640 qemu_log("cpu_io_recompile: rewound execution of TB to %016"
Anton Johansson256d11f2023-06-21 15:56:23 +0200641 VADDR_PRIx "\n", pc);
Richard Hendersonfbf59aa2022-08-15 15:16:06 -0500642 }
643 }
Peter Maydell1d705e82020-10-13 13:26:58 +0100644
Peter Maydell6886b982016-05-17 15:18:04 +0100645 cpu_loop_exit_noexc(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000646}
647
Blue Swirl5b6dd862012-12-02 16:04:43 +0000648#else /* CONFIG_USER_ONLY */
649
Andreas Färberc3affe52013-01-18 15:03:43 +0100650void cpu_interrupt(CPUState *cpu, int mask)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000651{
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000652 g_assert(qemu_mutex_iothread_locked());
Andreas Färber259186a2013-01-17 18:51:17 +0100653 cpu->interrupt_request |= mask;
Richard Hendersona953b5f2023-09-13 15:46:45 -0700654 qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000655}
656
Blue Swirl5b6dd862012-12-02 16:04:43 +0000657#endif /* CONFIG_USER_ONLY */
Thomas Huth2cd53942017-06-26 07:22:55 +0200658
Richard Hendersona976a992022-08-15 15:13:05 -0500659/*
660 * Called by generic code at e.g. cpu reset after cpu creation,
661 * therefore we must be prepared to allocate the jump cache.
662 */
663void tcg_flush_jmp_cache(CPUState *cpu)
664{
665 CPUJumpCache *jc = cpu->tb_jmp_cache;
666
Richard Henderson4e4fa6c2022-10-31 13:26:36 +1100667 /* During early initialization, the cache may not yet be allocated. */
668 if (unlikely(jc == NULL)) {
669 return;
670 }
671
672 for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
673 qatomic_set(&jc->array[i].tb, NULL);
Richard Hendersona976a992022-08-15 15:13:05 -0500674 }
675}