blob: e9bc90c65432633ac0bd7bd9f25efeae749cd31a [file] [log] [blame]
bellardd19893d2003-06-15 19:58:51 +00001/*
2 * Host code generation
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardd19893d2003-06-15 19:58:51 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellardd19893d2003-06-15 19:58:51 +000018 */
Blue Swirl5b6dd862012-12-02 16:04:43 +000019#ifdef _WIN32
20#include <windows.h>
Blue Swirl5b6dd862012-12-02 16:04:43 +000021#endif
Peter Maydell7b31bbc2016-01-26 18:16:56 +000022#include "qemu/osdep.h"
bellardd19893d2003-06-15 19:58:51 +000023
bellard20543962003-06-15 23:28:43 +000024
Blue Swirl5b6dd862012-12-02 16:04:43 +000025#include "qemu-common.h"
bellardaf5ad102004-01-04 23:28:12 +000026#define NO_CPU_IO_DEFS
bellardd3eead22003-09-30 20:59:51 +000027#include "cpu.h"
Alex Bennée6db8b532014-08-01 17:08:57 +010028#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020029#include "disas/disas.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010030#include "exec/exec-all.h"
bellard57fec1f2008-02-01 10:50:11 +000031#include "tcg.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000032#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35#include <sys/param.h>
36#if __FreeBSD_version >= 700104
37#define HAVE_KINFO_GETVMMAP
38#define sigqueue sigqueue_freebsd /* avoid redefinition */
Blue Swirl5b6dd862012-12-02 16:04:43 +000039#include <sys/proc.h>
40#include <machine/profile.h>
41#define _KERNEL
42#include <sys/user.h>
43#undef _KERNEL
44#undef sigqueue
45#include <libutil.h>
46#endif
47#endif
Paolo Bonzini0bc3cd62013-04-08 17:29:59 +020048#else
49#include "exec/address-spaces.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#endif
51
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Peter Crosthwaitee1b89322015-05-30 23:11:45 -070053#include "exec/tb-hash.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000054#include "translate-all.h"
Emilio G. Cota510a6472015-04-22 17:50:52 -040055#include "qemu/bitmap.h"
Alexey Kardashevskiy0aa09892013-04-22 17:42:50 +100056#include "qemu/timer.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000058
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61/* make various TB consistency checks */
62//#define DEBUG_TB_CHECK
63
64#if !defined(CONFIG_USER_ONLY)
65/* TB consistency checks only implemented for usermode emulation. */
66#undef DEBUG_TB_CHECK
67#endif
68
69#define SMC_BITMAP_USE_THRESHOLD 10
70
Blue Swirl5b6dd862012-12-02 16:04:43 +000071typedef struct PageDesc {
72 /* list of TBs intersecting this ram page */
73 TranslationBlock *first_tb;
Paolo Bonzini6fad4592015-08-11 12:42:55 +020074#ifdef CONFIG_SOFTMMU
Blue Swirl5b6dd862012-12-02 16:04:43 +000075 /* in order to optimize self modifying code, we count the number
76 of lookups we do to a given page to use a bitmap */
77 unsigned int code_write_count;
Emilio G. Cota510a6472015-04-22 17:50:52 -040078 unsigned long *code_bitmap;
Paolo Bonzini6fad4592015-08-11 12:42:55 +020079#else
Blue Swirl5b6dd862012-12-02 16:04:43 +000080 unsigned long flags;
81#endif
82} PageDesc;
83
84/* In system mode we want L1_MAP to be based on ram offsets,
85 while in user mode we want it to be based on virtual addresses. */
86#if !defined(CONFIG_USER_ONLY)
87#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
88# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
89#else
90# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
91#endif
92#else
93# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
94#endif
95
Paolo Bonzini03f49952013-11-07 17:14:36 +010096/* Size of the L2 (and L3, etc) page tables. */
97#define V_L2_BITS 10
98#define V_L2_SIZE (1 << V_L2_BITS)
99
Blue Swirl5b6dd862012-12-02 16:04:43 +0000100/* The bits remaining after N lower levels of page tables. */
101#define V_L1_BITS_REM \
Paolo Bonzini03f49952013-11-07 17:14:36 +0100102 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000103
104#if V_L1_BITS_REM < 4
Paolo Bonzini03f49952013-11-07 17:14:36 +0100105#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000106#else
107#define V_L1_BITS V_L1_BITS_REM
108#endif
109
110#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
111
112#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
113
Blue Swirl5b6dd862012-12-02 16:04:43 +0000114uintptr_t qemu_host_page_size;
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100115intptr_t qemu_host_page_mask;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000116
Emilio G. Cotad1142fb2015-08-23 20:23:39 -0400117/* The bottom level has pointers to PageDesc */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000118static void *l1_map[V_L1_SIZE];
119
bellard57fec1f2008-02-01 10:50:11 +0000120/* code generation context */
121TCGContext tcg_ctx;
bellardd19893d2003-06-15 19:58:51 +0000122
KONRAD Frederic677ef622015-08-10 17:27:02 +0200123/* translation block context */
124#ifdef CONFIG_USER_ONLY
125__thread int have_tb_lock;
126#endif
127
128void tb_lock(void)
129{
130#ifdef CONFIG_USER_ONLY
131 assert(!have_tb_lock);
132 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
133 have_tb_lock++;
134#endif
135}
136
137void tb_unlock(void)
138{
139#ifdef CONFIG_USER_ONLY
140 assert(have_tb_lock);
141 have_tb_lock--;
142 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
143#endif
144}
145
146void tb_lock_reset(void)
147{
148#ifdef CONFIG_USER_ONLY
149 if (have_tb_lock) {
150 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
151 have_tb_lock = 0;
152 }
153#endif
154}
155
Blue Swirla8a826a2012-12-04 20:16:07 +0000156static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000157
bellard57fec1f2008-02-01 10:50:11 +0000158void cpu_gen_init(void)
159{
160 tcg_context_init(&tcg_ctx);
bellard57fec1f2008-02-01 10:50:11 +0000161}
162
Richard Hendersonfca8a502015-09-01 19:11:45 -0700163/* Encode VAL as a signed leb128 sequence at P.
164 Return P incremented past the encoded value. */
165static uint8_t *encode_sleb128(uint8_t *p, target_long val)
166{
167 int more, byte;
168
169 do {
170 byte = val & 0x7f;
171 val >>= 7;
172 more = !((val == 0 && (byte & 0x40) == 0)
173 || (val == -1 && (byte & 0x40) != 0));
174 if (more) {
175 byte |= 0x80;
176 }
177 *p++ = byte;
178 } while (more);
179
180 return p;
181}
182
183/* Decode a signed leb128 sequence at *PP; increment *PP past the
184 decoded value. Return the decoded value. */
185static target_long decode_sleb128(uint8_t **pp)
186{
187 uint8_t *p = *pp;
188 target_long val = 0;
189 int byte, shift = 0;
190
191 do {
192 byte = *p++;
193 val |= (target_ulong)(byte & 0x7f) << shift;
194 shift += 7;
195 } while (byte & 0x80);
196 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
197 val |= -(target_ulong)1 << shift;
198 }
199
200 *pp = p;
201 return val;
202}
203
204/* Encode the data collected about the instructions while compiling TB.
205 Place the data at BLOCK, and return the number of bytes consumed.
206
207 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
208 which come from the target's insn_start data, followed by a uintptr_t
209 which comes from the host pc of the end of the code implementing the insn.
210
211 Each line of the table is encoded as sleb128 deltas from the previous
212 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
213 That is, the first column is seeded with the guest pc, the last column
214 with the host pc, and the middle columns with zeros. */
215
216static int encode_search(TranslationBlock *tb, uint8_t *block)
217{
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700218 uint8_t *highwater = tcg_ctx.code_gen_highwater;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700219 uint8_t *p = block;
220 int i, j, n;
221
222 tb->tc_search = block;
223
224 for (i = 0, n = tb->icount; i < n; ++i) {
225 target_ulong prev;
226
227 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
228 if (i == 0) {
229 prev = (j == 0 ? tb->pc : 0);
230 } else {
231 prev = tcg_ctx.gen_insn_data[i - 1][j];
232 }
233 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
234 }
235 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
236 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700237
238 /* Test for (pending) buffer overflow. The assumption is that any
239 one row beginning below the high water mark cannot overrun
240 the buffer completely. Thus we can test for overflow after
241 encoding a row without having to check during encoding. */
242 if (unlikely(p > highwater)) {
243 return -1;
244 }
Richard Hendersonfca8a502015-09-01 19:11:45 -0700245 }
246
247 return p - block;
248}
249
Richard Hendersonfec88f62015-08-27 18:17:40 -0700250/* The cpu state corresponding to 'searched_pc' is restored. */
Andreas Färber74f10512013-09-01 17:02:58 +0200251static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
Blue Swirla8a826a2012-12-04 20:16:07 +0000252 uintptr_t searched_pc)
bellardd19893d2003-06-15 19:58:51 +0000253{
Richard Hendersonfca8a502015-09-01 19:11:45 -0700254 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
255 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
Andreas Färber74f10512013-09-01 17:02:58 +0200256 CPUArchState *env = cpu->env_ptr;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700257 uint8_t *p = tb->tc_search;
258 int i, j, num_insns = tb->icount;
bellard57fec1f2008-02-01 10:50:11 +0000259#ifdef CONFIG_PROFILER
Richard Hendersonfca8a502015-09-01 19:11:45 -0700260 int64_t ti = profile_getclock();
bellard57fec1f2008-02-01 10:50:11 +0000261#endif
262
Richard Henderson01ecaf42016-07-26 06:09:16 +0530263 searched_pc -= GETPC_ADJ;
264
Richard Hendersonfca8a502015-09-01 19:11:45 -0700265 if (searched_pc < host_pc) {
266 return -1;
267 }
bellardd19893d2003-06-15 19:58:51 +0000268
Richard Hendersonfca8a502015-09-01 19:11:45 -0700269 /* Reconstruct the stored insn data while looking for the point at
270 which the end of the insn exceeds the searched_pc. */
271 for (i = 0; i < num_insns; ++i) {
272 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
273 data[j] += decode_sleb128(&p);
274 }
275 host_pc += decode_sleb128(&p);
276 if (host_pc > searched_pc) {
277 goto found;
278 }
279 }
280 return -1;
ths3b46e622007-09-17 08:09:54 +0000281
Richard Hendersonfca8a502015-09-01 19:11:45 -0700282 found:
Paolo Bonzinibd792552014-11-26 13:39:59 +0300283 if (tb->cflags & CF_USE_ICOUNT) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200284 assert(use_icount);
pbrook2e70f6e2008-06-29 01:03:05 +0000285 /* Reset the cycle counter to the start of the block. */
Richard Hendersonfca8a502015-09-01 19:11:45 -0700286 cpu->icount_decr.u16.low += num_insns;
pbrook2e70f6e2008-06-29 01:03:05 +0000287 /* Clear the IO flag. */
Andreas Färber99df7dc2013-08-26 05:15:23 +0200288 cpu->can_do_io = 0;
pbrook2e70f6e2008-06-29 01:03:05 +0000289 }
Richard Hendersonfca8a502015-09-01 19:11:45 -0700290 cpu->icount_decr.u16.low -= i;
291 restore_state_to_opc(env, tb, data);
bellard57fec1f2008-02-01 10:50:11 +0000292
293#ifdef CONFIG_PROFILER
Richard Hendersonfca8a502015-09-01 19:11:45 -0700294 tcg_ctx.restore_time += profile_getclock() - ti;
295 tcg_ctx.restore_count++;
bellard57fec1f2008-02-01 10:50:11 +0000296#endif
bellardd19893d2003-06-15 19:58:51 +0000297 return 0;
298}
Blue Swirl5b6dd862012-12-02 16:04:43 +0000299
Andreas Färber3f38f302013-09-01 16:51:34 +0200300bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
Blue Swirla8a826a2012-12-04 20:16:07 +0000301{
302 TranslationBlock *tb;
303
304 tb = tb_find_pc(retaddr);
305 if (tb) {
Andreas Färber74f10512013-09-01 17:02:58 +0200306 cpu_restore_state_from_tb(cpu, tb, retaddr);
Pavel Dovgalyukd8a499f2014-11-26 13:40:16 +0300307 if (tb->cflags & CF_NOCACHE) {
308 /* one-shot translation, invalidate it immediately */
Pavel Dovgalyukd8a499f2014-11-26 13:40:16 +0300309 tb_phys_invalidate(tb, -1);
310 tb_free(tb);
311 }
Blue Swirla8a826a2012-12-04 20:16:07 +0000312 return true;
313 }
314 return false;
315}
316
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -0700317void page_size_init(void)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000318{
319 /* NOTE: we can always suppose that qemu_host_page_size >=
320 TARGET_PAGE_SIZE */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000321 qemu_real_host_page_size = getpagesize();
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100322 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000323 if (qemu_host_page_size == 0) {
324 qemu_host_page_size = qemu_real_host_page_size;
325 }
326 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
327 qemu_host_page_size = TARGET_PAGE_SIZE;
328 }
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100329 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -0700330}
Blue Swirl5b6dd862012-12-02 16:04:43 +0000331
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -0700332static void page_init(void)
333{
334 page_size_init();
Blue Swirl5b6dd862012-12-02 16:04:43 +0000335#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
336 {
337#ifdef HAVE_KINFO_GETVMMAP
338 struct kinfo_vmentry *freep;
339 int i, cnt;
340
341 freep = kinfo_getvmmap(getpid(), &cnt);
342 if (freep) {
343 mmap_lock();
344 for (i = 0; i < cnt; i++) {
345 unsigned long startaddr, endaddr;
346
347 startaddr = freep[i].kve_start;
348 endaddr = freep[i].kve_end;
349 if (h2g_valid(startaddr)) {
350 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
351
352 if (h2g_valid(endaddr)) {
353 endaddr = h2g(endaddr);
354 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
355 } else {
356#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
357 endaddr = ~0ul;
358 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
359#endif
360 }
361 }
362 }
363 free(freep);
364 mmap_unlock();
365 }
366#else
367 FILE *f;
368
369 last_brk = (unsigned long)sbrk(0);
370
371 f = fopen("/compat/linux/proc/self/maps", "r");
372 if (f) {
373 mmap_lock();
374
375 do {
376 unsigned long startaddr, endaddr;
377 int n;
378
379 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
380
381 if (n == 2 && h2g_valid(startaddr)) {
382 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
383
384 if (h2g_valid(endaddr)) {
385 endaddr = h2g(endaddr);
386 } else {
387 endaddr = ~0ul;
388 }
389 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
390 }
391 } while (!feof(f));
392
393 fclose(f);
394 mmap_unlock();
395 }
396#endif
397 }
398#endif
399}
400
Paolo Bonzini75692082015-08-11 10:59:50 +0200401/* If alloc=1:
402 * Called with mmap_lock held for user-mode emulation.
403 */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000404static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
405{
406 PageDesc *pd;
407 void **lp;
408 int i;
409
Blue Swirl5b6dd862012-12-02 16:04:43 +0000410 /* Level 1. Always allocated. */
411 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
412
413 /* Level 2..N-1. */
Paolo Bonzini03f49952013-11-07 17:14:36 +0100414 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200415 void **p = atomic_rcu_read(lp);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000416
417 if (p == NULL) {
418 if (!alloc) {
419 return NULL;
420 }
Emilio G. Cotae3a0abf2015-04-09 16:07:33 -0400421 p = g_new0(void *, V_L2_SIZE);
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200422 atomic_rcu_set(lp, p);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000423 }
424
Paolo Bonzini03f49952013-11-07 17:14:36 +0100425 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
Blue Swirl5b6dd862012-12-02 16:04:43 +0000426 }
427
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200428 pd = atomic_rcu_read(lp);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000429 if (pd == NULL) {
430 if (!alloc) {
431 return NULL;
432 }
Emilio G. Cotae3a0abf2015-04-09 16:07:33 -0400433 pd = g_new0(PageDesc, V_L2_SIZE);
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200434 atomic_rcu_set(lp, pd);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000435 }
436
Paolo Bonzini03f49952013-11-07 17:14:36 +0100437 return pd + (index & (V_L2_SIZE - 1));
Blue Swirl5b6dd862012-12-02 16:04:43 +0000438}
439
440static inline PageDesc *page_find(tb_page_addr_t index)
441{
442 return page_find_alloc(index, 0);
443}
444
Blue Swirl5b6dd862012-12-02 16:04:43 +0000445#if defined(CONFIG_USER_ONLY)
446/* Currently it is not recommended to allocate big chunks of data in
447 user mode. It will change when a dedicated libc will be used. */
448/* ??? 64-bit hosts ought to have no problem mmaping data outside the
449 region in which the guest needs to run. Revisit this. */
450#define USE_STATIC_CODE_GEN_BUFFER
451#endif
452
Blue Swirl5b6dd862012-12-02 16:04:43 +0000453/* Minimum size of the code gen buffer. This number is randomly chosen,
454 but not so small that we can't have a fair number of TB's live. */
455#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
456
457/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
458 indicated, this is constrained by the range of direct branches on the
459 host cpu, as used by the TCG implementation of goto_tb. */
460#if defined(__x86_64__)
461# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
462#elif defined(__sparc__)
463# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
Richard Henderson5bfd75a2015-10-02 22:25:28 +0000464#elif defined(__powerpc64__)
465# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
Sergey Fedorov399f1642016-04-22 19:08:46 +0300466#elif defined(__powerpc__)
467# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
Claudio Fontana4a136e02013-06-12 16:20:22 +0100468#elif defined(__aarch64__)
469# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000470#elif defined(__arm__)
471# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
472#elif defined(__s390x__)
473 /* We have a +- 4GB range on the branches; leave some slop. */
474# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
Richard Henderson479eb122014-04-24 08:25:03 -0700475#elif defined(__mips__)
476 /* We have a 256MB branch region, but leave room to make sure the
477 main executable is also within that region. */
478# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000479#else
480# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
481#endif
482
483#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
484
485#define DEFAULT_CODE_GEN_BUFFER_SIZE \
486 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
487 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
488
489static inline size_t size_code_gen_buffer(size_t tb_size)
490{
491 /* Size the buffer. */
492 if (tb_size == 0) {
493#ifdef USE_STATIC_CODE_GEN_BUFFER
494 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
495#else
496 /* ??? Needs adjustments. */
497 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
498 static buffer, we could size this on RESERVED_VA, on the text
499 segment size of the executable, or continue to use the default. */
500 tb_size = (unsigned long)(ram_size / 4);
501#endif
502 }
503 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
504 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
505 }
506 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
507 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
508 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000509 return tb_size;
510}
511
Richard Henderson483c76e2014-04-24 09:16:07 -0700512#ifdef __mips__
513/* In order to use J and JAL within the code_gen_buffer, we require
514 that the buffer not cross a 256MB boundary. */
515static inline bool cross_256mb(void *addr, size_t size)
516{
Richard Henderson7ba6a512016-04-24 15:55:29 -0700517 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
Richard Henderson483c76e2014-04-24 09:16:07 -0700518}
519
520/* We weren't able to allocate a buffer without crossing that boundary,
521 so make do with the larger portion of the buffer that doesn't cross.
522 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
523static inline void *split_cross_256mb(void *buf1, size_t size1)
524{
Richard Henderson7ba6a512016-04-24 15:55:29 -0700525 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
Richard Henderson483c76e2014-04-24 09:16:07 -0700526 size_t size2 = buf1 + size1 - buf2;
527
528 size1 = buf2 - buf1;
529 if (size1 < size2) {
530 size1 = size2;
531 buf1 = buf2;
532 }
533
534 tcg_ctx.code_gen_buffer_size = size1;
535 return buf1;
536}
537#endif
538
Blue Swirl5b6dd862012-12-02 16:04:43 +0000539#ifdef USE_STATIC_CODE_GEN_BUFFER
540static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
541 __attribute__((aligned(CODE_GEN_ALIGN)));
542
Richard Hendersonf2937092015-09-19 12:03:15 -0700543# ifdef _WIN32
544static inline void do_protect(void *addr, long size, int prot)
545{
546 DWORD old_protect;
547 VirtualProtect(addr, size, prot, &old_protect);
548}
549
550static inline void map_exec(void *addr, long size)
551{
552 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
553}
554
555static inline void map_none(void *addr, long size)
556{
557 do_protect(addr, size, PAGE_NOACCESS);
558}
559# else
560static inline void do_protect(void *addr, long size, int prot)
561{
562 uintptr_t start, end;
563
564 start = (uintptr_t)addr;
565 start &= qemu_real_host_page_mask;
566
567 end = (uintptr_t)addr + size;
568 end = ROUND_UP(end, qemu_real_host_page_size);
569
570 mprotect((void *)start, end - start, prot);
571}
572
573static inline void map_exec(void *addr, long size)
574{
575 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
576}
577
578static inline void map_none(void *addr, long size)
579{
580 do_protect(addr, size, PROT_NONE);
581}
582# endif /* WIN32 */
583
Blue Swirl5b6dd862012-12-02 16:04:43 +0000584static inline void *alloc_code_gen_buffer(void)
585{
Richard Henderson483c76e2014-04-24 09:16:07 -0700586 void *buf = static_code_gen_buffer;
Richard Hendersonf2937092015-09-19 12:03:15 -0700587 size_t full_size, size;
588
589 /* The size of the buffer, rounded down to end on a page boundary. */
590 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
591 & qemu_real_host_page_mask) - (uintptr_t)buf;
592
593 /* Reserve a guard page. */
594 size = full_size - qemu_real_host_page_size;
595
596 /* Honor a command-line option limiting the size of the buffer. */
597 if (size > tcg_ctx.code_gen_buffer_size) {
598 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
599 & qemu_real_host_page_mask) - (uintptr_t)buf;
600 }
601 tcg_ctx.code_gen_buffer_size = size;
602
Richard Henderson483c76e2014-04-24 09:16:07 -0700603#ifdef __mips__
Richard Hendersonf2937092015-09-19 12:03:15 -0700604 if (cross_256mb(buf, size)) {
605 buf = split_cross_256mb(buf, size);
606 size = tcg_ctx.code_gen_buffer_size;
Richard Henderson483c76e2014-04-24 09:16:07 -0700607 }
608#endif
Richard Hendersonf2937092015-09-19 12:03:15 -0700609
610 map_exec(buf, size);
611 map_none(buf + size, qemu_real_host_page_size);
612 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
613
Richard Henderson483c76e2014-04-24 09:16:07 -0700614 return buf;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000615}
Richard Hendersonf2937092015-09-19 12:03:15 -0700616#elif defined(_WIN32)
617static inline void *alloc_code_gen_buffer(void)
618{
619 size_t size = tcg_ctx.code_gen_buffer_size;
620 void *buf1, *buf2;
621
622 /* Perform the allocation in two steps, so that the guard page
623 is reserved but uncommitted. */
624 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
625 MEM_RESERVE, PAGE_NOACCESS);
626 if (buf1 != NULL) {
627 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
628 assert(buf1 == buf2);
629 }
630
631 return buf1;
632}
633#else
Blue Swirl5b6dd862012-12-02 16:04:43 +0000634static inline void *alloc_code_gen_buffer(void)
635{
636 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
637 uintptr_t start = 0;
Richard Hendersonf2937092015-09-19 12:03:15 -0700638 size_t size = tcg_ctx.code_gen_buffer_size;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000639 void *buf;
640
641 /* Constrain the position of the buffer based on the host cpu.
642 Note that these addresses are chosen in concert with the
643 addresses assigned in the relevant linker script file. */
644# if defined(__PIE__) || defined(__PIC__)
645 /* Don't bother setting a preferred location if we're building
646 a position-independent executable. We're more likely to get
647 an address near the main executable if we let the kernel
648 choose the address. */
649# elif defined(__x86_64__) && defined(MAP_32BIT)
650 /* Force the memory down into low memory with the executable.
651 Leave the choice of exact location with the kernel. */
652 flags |= MAP_32BIT;
653 /* Cannot expect to map more than 800MB in low memory. */
Richard Hendersonf2937092015-09-19 12:03:15 -0700654 if (size > 800u * 1024 * 1024) {
655 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000656 }
657# elif defined(__sparc__)
658 start = 0x40000000ul;
659# elif defined(__s390x__)
660 start = 0x90000000ul;
Richard Henderson479eb122014-04-24 08:25:03 -0700661# elif defined(__mips__)
Richard Hendersonf2937092015-09-19 12:03:15 -0700662# if _MIPS_SIM == _ABI64
Richard Henderson479eb122014-04-24 08:25:03 -0700663 start = 0x128000000ul;
664# else
665 start = 0x08000000ul;
666# endif
Blue Swirl5b6dd862012-12-02 16:04:43 +0000667# endif
668
Richard Hendersonf2937092015-09-19 12:03:15 -0700669 buf = mmap((void *)start, size + qemu_real_host_page_size,
670 PROT_NONE, flags, -1, 0);
Richard Henderson483c76e2014-04-24 09:16:07 -0700671 if (buf == MAP_FAILED) {
672 return NULL;
673 }
674
675#ifdef __mips__
Richard Hendersonf2937092015-09-19 12:03:15 -0700676 if (cross_256mb(buf, size)) {
Stefan Weil5d831be2014-06-13 20:42:57 +0200677 /* Try again, with the original still mapped, to avoid re-acquiring
Richard Henderson483c76e2014-04-24 09:16:07 -0700678 that 256mb crossing. This time don't specify an address. */
Richard Hendersonf2937092015-09-19 12:03:15 -0700679 size_t size2;
680 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
681 PROT_NONE, flags, -1, 0);
682 switch (buf2 != MAP_FAILED) {
683 case 1:
684 if (!cross_256mb(buf2, size)) {
Richard Henderson483c76e2014-04-24 09:16:07 -0700685 /* Success! Use the new buffer. */
Emilio G. Cota8bdf4992016-04-21 20:01:54 -0400686 munmap(buf, size + qemu_real_host_page_size);
Richard Hendersonf2937092015-09-19 12:03:15 -0700687 break;
Richard Henderson483c76e2014-04-24 09:16:07 -0700688 }
689 /* Failure. Work with what we had. */
Emilio G. Cota8bdf4992016-04-21 20:01:54 -0400690 munmap(buf2, size + qemu_real_host_page_size);
Richard Hendersonf2937092015-09-19 12:03:15 -0700691 /* fallthru */
692 default:
693 /* Split the original buffer. Free the smaller half. */
694 buf2 = split_cross_256mb(buf, size);
695 size2 = tcg_ctx.code_gen_buffer_size;
696 if (buf == buf2) {
697 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
698 } else {
699 munmap(buf, size - size2);
700 }
701 size = size2;
702 break;
Richard Henderson483c76e2014-04-24 09:16:07 -0700703 }
Richard Hendersonf2937092015-09-19 12:03:15 -0700704 buf = buf2;
Richard Henderson483c76e2014-04-24 09:16:07 -0700705 }
706#endif
707
Richard Hendersonf2937092015-09-19 12:03:15 -0700708 /* Make the final buffer accessible. The guard page at the end
709 will remain inaccessible with PROT_NONE. */
710 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
711
712 /* Request large pages for the buffer. */
713 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
714
Richard Henderson483c76e2014-04-24 09:16:07 -0700715 return buf;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000716}
Richard Hendersonf2937092015-09-19 12:03:15 -0700717#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000718
719static inline void code_gen_alloc(size_t tb_size)
720{
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700721 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
722 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
723 if (tcg_ctx.code_gen_buffer == NULL) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000724 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
725 exit(1);
726 }
727
Richard Henderson8163b742015-09-18 23:43:05 -0700728 /* Estimate a good size for the number of TBs we can support. We
729 still haven't deducted the prologue from the buffer size here,
730 but that's minimal and won't affect the estimate much. */
731 tcg_ctx.code_gen_max_blocks
732 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
733 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000734
KONRAD Frederic677ef622015-08-10 17:27:02 +0200735 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000736}
737
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400738static void tb_htable_init(void)
739{
740 unsigned int mode = QHT_MODE_AUTO_RESIZE;
741
742 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
743}
744
Blue Swirl5b6dd862012-12-02 16:04:43 +0000745/* Must be called before using the QEMU cpus. 'tb_size' is the size
746 (in bytes) allocated to the translation buffer. Zero means default
747 size. */
748void tcg_exec_init(unsigned long tb_size)
749{
750 cpu_gen_init();
Blue Swirl5b6dd862012-12-02 16:04:43 +0000751 page_init();
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400752 tb_htable_init();
Richard Hendersonf2937092015-09-19 12:03:15 -0700753 code_gen_alloc(tb_size);
Laurent Vivier4cbea592015-08-24 01:42:07 +0200754#if defined(CONFIG_SOFTMMU)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000755 /* There's no guest base to take into account, so go ahead and
756 initialize the prologue now. */
757 tcg_prologue_init(&tcg_ctx);
758#endif
759}
760
761bool tcg_enabled(void)
762{
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700763 return tcg_ctx.code_gen_buffer != NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000764}
765
766/* Allocate a new translation block. Flush the translation buffer if
767 too many translation blocks or too much generated code. */
768static TranslationBlock *tb_alloc(target_ulong pc)
769{
770 TranslationBlock *tb;
771
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700772 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000773 return NULL;
774 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700775 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
Blue Swirl5b6dd862012-12-02 16:04:43 +0000776 tb->pc = pc;
777 tb->cflags = 0;
Paolo Bonzini6d21e422016-07-19 08:36:18 +0200778 tb->invalid = false;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000779 return tb;
780}
781
782void tb_free(TranslationBlock *tb)
783{
784 /* In practice this is mostly used for single use temporary TB
785 Ignore the hard cases and just back up if this TB happens to
786 be the last one generated. */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700787 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
788 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700789 tcg_ctx.code_gen_ptr = tb->tc_ptr;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700790 tcg_ctx.tb_ctx.nb_tbs--;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000791 }
792}
793
794static inline void invalidate_page_bitmap(PageDesc *p)
795{
Paolo Bonzini6fad4592015-08-11 12:42:55 +0200796#ifdef CONFIG_SOFTMMU
Markus Armbruster012aef02015-08-26 14:02:53 +0200797 g_free(p->code_bitmap);
798 p->code_bitmap = NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000799 p->code_write_count = 0;
Paolo Bonzini6fad4592015-08-11 12:42:55 +0200800#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +0000801}
802
803/* Set to NULL all the 'first_tb' fields in all PageDescs. */
804static void page_flush_tb_1(int level, void **lp)
805{
806 int i;
807
808 if (*lp == NULL) {
809 return;
810 }
811 if (level == 0) {
812 PageDesc *pd = *lp;
813
Paolo Bonzini03f49952013-11-07 17:14:36 +0100814 for (i = 0; i < V_L2_SIZE; ++i) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000815 pd[i].first_tb = NULL;
816 invalidate_page_bitmap(pd + i);
817 }
818 } else {
819 void **pp = *lp;
820
Paolo Bonzini03f49952013-11-07 17:14:36 +0100821 for (i = 0; i < V_L2_SIZE; ++i) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000822 page_flush_tb_1(level - 1, pp + i);
823 }
824 }
825}
826
827static void page_flush_tb(void)
828{
829 int i;
830
831 for (i = 0; i < V_L1_SIZE; i++) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100832 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000833 }
834}
835
836/* flush all the translation blocks */
837/* XXX: tb_flush is currently not thread safe */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700838void tb_flush(CPUState *cpu)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000839{
Christian Borntraeger135a9722016-08-25 20:11:26 +0200840 if (!tcg_enabled()) {
841 return;
842 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000843#if defined(DEBUG_FLUSH)
844 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700845 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700846 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700847 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700848 tcg_ctx.tb_ctx.nb_tbs : 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000849#endif
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700850 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
851 > tcg_ctx.code_gen_buffer_size) {
Andreas Färbera47dddd2013-09-03 17:38:47 +0200852 cpu_abort(cpu, "Internal error: code buffer overflow\n");
Blue Swirl5b6dd862012-12-02 16:04:43 +0000853 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000854
Andreas Färberbdc44642013-06-24 23:50:24 +0200855 CPU_FOREACH(cpu) {
Sergey Fedorov89a16b12016-07-15 20:58:43 +0300856 int i;
857
858 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
859 atomic_set(&cpu->tb_jmp_cache[i], NULL);
860 }
Sergey Fedorov118b0732016-07-15 20:58:44 +0300861 atomic_mb_set(&cpu->tb_flushed, true);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000862 }
863
Sergey Fedorov118b0732016-07-15 20:58:44 +0300864 tcg_ctx.tb_ctx.nb_tbs = 0;
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400865 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000866 page_flush_tb();
867
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700868 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000869 /* XXX: flush processor icache at this point if cache flush is
870 expensive */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700871 tcg_ctx.tb_ctx.tb_flush_count++;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000872}
873
874#ifdef DEBUG_TB_CHECK
875
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400876static void
877do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
878{
879 TranslationBlock *tb = p;
880 target_ulong addr = *(target_ulong *)userp;
881
882 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
883 printf("ERROR invalidate: address=" TARGET_FMT_lx
884 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
885 }
886}
887
Blue Swirl5b6dd862012-12-02 16:04:43 +0000888static void tb_invalidate_check(target_ulong address)
889{
Blue Swirl5b6dd862012-12-02 16:04:43 +0000890 address &= TARGET_PAGE_MASK;
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400891 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
892}
893
894static void
895do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
896{
897 TranslationBlock *tb = p;
898 int flags1, flags2;
899
900 flags1 = page_get_flags(tb->pc);
901 flags2 = page_get_flags(tb->pc + tb->size - 1);
902 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
903 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
904 (long)tb->pc, tb->size, flags1, flags2);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000905 }
906}
907
908/* verify that all the pages have correct rights for code */
909static void tb_page_check(void)
910{
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400911 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000912}
913
914#endif
915
Blue Swirl5b6dd862012-12-02 16:04:43 +0000916static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
917{
918 TranslationBlock *tb1;
919 unsigned int n1;
920
921 for (;;) {
922 tb1 = *ptb;
923 n1 = (uintptr_t)tb1 & 3;
924 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
925 if (tb1 == tb) {
926 *ptb = tb1->page_next[n1];
927 break;
928 }
929 ptb = &tb1->page_next[n1];
930 }
931}
932
Sergey Fedorov13362672016-03-23 18:30:16 +0300933/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
934static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000935{
Sergey Fedorovc37e6d72016-03-21 23:11:00 +0300936 TranslationBlock *tb1;
937 uintptr_t *ptb, ntb;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000938 unsigned int n1;
939
Sergey Fedorovf3091012016-04-10 23:35:45 +0300940 ptb = &tb->jmp_list_next[n];
Sergey Fedorovc37e6d72016-03-21 23:11:00 +0300941 if (*ptb) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000942 /* find tb(n) in circular list */
943 for (;;) {
Sergey Fedorovc37e6d72016-03-21 23:11:00 +0300944 ntb = *ptb;
945 n1 = ntb & 3;
946 tb1 = (TranslationBlock *)(ntb & ~3);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000947 if (n1 == n && tb1 == tb) {
948 break;
949 }
950 if (n1 == 2) {
Sergey Fedorovf3091012016-04-10 23:35:45 +0300951 ptb = &tb1->jmp_list_first;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000952 } else {
Sergey Fedorovf3091012016-04-10 23:35:45 +0300953 ptb = &tb1->jmp_list_next[n1];
Blue Swirl5b6dd862012-12-02 16:04:43 +0000954 }
955 }
956 /* now we can suppress tb(n) from the list */
Sergey Fedorovf3091012016-04-10 23:35:45 +0300957 *ptb = tb->jmp_list_next[n];
Blue Swirl5b6dd862012-12-02 16:04:43 +0000958
Sergey Fedorovc37e6d72016-03-21 23:11:00 +0300959 tb->jmp_list_next[n] = (uintptr_t)NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000960 }
961}
962
963/* reset the jump entry 'n' of a TB so that it is not chained to
964 another TB */
965static inline void tb_reset_jump(TranslationBlock *tb, int n)
966{
Sergey Fedorovf3091012016-04-10 23:35:45 +0300967 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
968 tb_set_jmp_target(tb, n, addr);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000969}
970
Sergey Fedorov89bba492016-03-23 18:36:31 +0300971/* remove any jumps to the TB */
972static inline void tb_jmp_unlink(TranslationBlock *tb)
973{
Sergey Fedorovf9c5b662016-03-23 21:47:33 +0300974 TranslationBlock *tb1;
975 uintptr_t *ptb, ntb;
Sergey Fedorov89bba492016-03-23 18:36:31 +0300976 unsigned int n1;
977
Sergey Fedorovf9c5b662016-03-23 21:47:33 +0300978 ptb = &tb->jmp_list_first;
Sergey Fedorov89bba492016-03-23 18:36:31 +0300979 for (;;) {
Sergey Fedorovf9c5b662016-03-23 21:47:33 +0300980 ntb = *ptb;
981 n1 = ntb & 3;
982 tb1 = (TranslationBlock *)(ntb & ~3);
Sergey Fedorov89bba492016-03-23 18:36:31 +0300983 if (n1 == 2) {
984 break;
985 }
Sergey Fedorovf9c5b662016-03-23 21:47:33 +0300986 tb_reset_jump(tb1, n1);
987 *ptb = tb1->jmp_list_next[n1];
988 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
Sergey Fedorov89bba492016-03-23 18:36:31 +0300989 }
Sergey Fedorov89bba492016-03-23 18:36:31 +0300990}
991
陳韋任 (Wei-Ren Chen)0c884d12012-12-20 09:39:16 +0800992/* invalidate one TB */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000993void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
994{
Andreas Färber182735e2013-05-29 22:29:20 +0200995 CPUState *cpu;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000996 PageDesc *p;
Emilio G. Cota42bd3222016-06-08 14:55:25 -0400997 uint32_t h;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000998 tb_page_addr_t phys_pc;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000999
Paolo Bonzini6d21e422016-07-19 08:36:18 +02001000 atomic_set(&tb->invalid, true);
1001
Blue Swirl5b6dd862012-12-02 16:04:43 +00001002 /* remove the TB from the hash list */
1003 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
Emilio G. Cota42bd3222016-06-08 14:55:25 -04001004 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
Emilio G. Cota909eaac2016-06-08 14:55:32 -04001005 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001006
1007 /* remove the TB from the page list */
1008 if (tb->page_addr[0] != page_addr) {
1009 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1010 tb_page_remove(&p->first_tb, tb);
1011 invalidate_page_bitmap(p);
1012 }
1013 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1014 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1015 tb_page_remove(&p->first_tb, tb);
1016 invalidate_page_bitmap(p);
1017 }
1018
Blue Swirl5b6dd862012-12-02 16:04:43 +00001019 /* remove the TB from the hash list */
1020 h = tb_jmp_cache_hash_func(tb->pc);
Andreas Färberbdc44642013-06-24 23:50:24 +02001021 CPU_FOREACH(cpu) {
Sergey Fedorov89a16b12016-07-15 20:58:43 +03001022 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1023 atomic_set(&cpu->tb_jmp_cache[h], NULL);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001024 }
1025 }
1026
1027 /* suppress this TB from the two jump lists */
Sergey Fedorov13362672016-03-23 18:30:16 +03001028 tb_remove_from_jmp_list(tb, 0);
1029 tb_remove_from_jmp_list(tb, 1);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001030
1031 /* suppress any remaining jumps to this TB */
Sergey Fedorov89bba492016-03-23 18:36:31 +03001032 tb_jmp_unlink(tb);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001033
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001034 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001035}
1036
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001037#ifdef CONFIG_SOFTMMU
Blue Swirl5b6dd862012-12-02 16:04:43 +00001038static void build_page_bitmap(PageDesc *p)
1039{
1040 int n, tb_start, tb_end;
1041 TranslationBlock *tb;
1042
Emilio G. Cota510a6472015-04-22 17:50:52 -04001043 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001044
1045 tb = p->first_tb;
1046 while (tb != NULL) {
1047 n = (uintptr_t)tb & 3;
1048 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1049 /* NOTE: this is subtle as a TB may span two physical pages */
1050 if (n == 0) {
1051 /* NOTE: tb_end may be after the end of the page, but
1052 it is not a problem */
1053 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1054 tb_end = tb_start + tb->size;
1055 if (tb_end > TARGET_PAGE_SIZE) {
1056 tb_end = TARGET_PAGE_SIZE;
1057 }
1058 } else {
1059 tb_start = 0;
1060 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1061 }
Emilio G. Cota510a6472015-04-22 17:50:52 -04001062 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001063 tb = tb->page_next[n];
1064 }
1065}
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001066#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001067
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001068/* add the tb in the target page and protect it if necessary
1069 *
1070 * Called with mmap_lock held for user-mode emulation.
1071 */
1072static inline void tb_alloc_page(TranslationBlock *tb,
1073 unsigned int n, tb_page_addr_t page_addr)
1074{
1075 PageDesc *p;
1076#ifndef CONFIG_USER_ONLY
1077 bool page_already_protected;
1078#endif
1079
1080 tb->page_addr[n] = page_addr;
1081 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1082 tb->page_next[n] = p->first_tb;
1083#ifndef CONFIG_USER_ONLY
1084 page_already_protected = p->first_tb != NULL;
1085#endif
1086 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1087 invalidate_page_bitmap(p);
1088
1089#if defined(CONFIG_USER_ONLY)
1090 if (p->flags & PAGE_WRITE) {
1091 target_ulong addr;
1092 PageDesc *p2;
1093 int prot;
1094
1095 /* force the host page as non writable (writes will have a
1096 page fault + mprotect overhead) */
1097 page_addr &= qemu_host_page_mask;
1098 prot = 0;
1099 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1100 addr += TARGET_PAGE_SIZE) {
1101
1102 p2 = page_find(addr >> TARGET_PAGE_BITS);
1103 if (!p2) {
1104 continue;
1105 }
1106 prot |= p2->flags;
1107 p2->flags &= ~PAGE_WRITE;
1108 }
1109 mprotect(g2h(page_addr), qemu_host_page_size,
1110 (prot & PAGE_BITS) & ~PAGE_WRITE);
1111#ifdef DEBUG_TB_INVALIDATE
1112 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1113 page_addr);
1114#endif
1115 }
1116#else
1117 /* if some code is already present, then the pages are already
1118 protected. So we handle the case where only the first TB is
1119 allocated in a physical page */
1120 if (!page_already_protected) {
1121 tlb_protect_code(page_addr);
1122 }
1123#endif
1124}
1125
1126/* add a new TB and link it to the physical page tables. phys_page2 is
1127 * (-1) to indicate that only one page contains the TB.
1128 *
1129 * Called with mmap_lock held for user-mode emulation.
1130 */
1131static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1132 tb_page_addr_t phys_page2)
1133{
Emilio G. Cota42bd3222016-06-08 14:55:25 -04001134 uint32_t h;
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001135
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001136 /* add in the page list */
1137 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1138 if (phys_page2 != -1) {
1139 tb_alloc_page(tb, 1, phys_page2);
1140 } else {
1141 tb->page_addr[1] = -1;
1142 }
1143
Alex Bennée2e1ae442016-07-15 20:58:48 +03001144 /* add in the hash table */
1145 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1146 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1147
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001148#ifdef DEBUG_TB_CHECK
1149 tb_page_check();
1150#endif
1151}
1152
Paolo Bonzini75692082015-08-11 10:59:50 +02001153/* Called with mmap_lock held for user mode emulation. */
Andreas Färber648f0342013-09-01 17:43:17 +02001154TranslationBlock *tb_gen_code(CPUState *cpu,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001155 target_ulong pc, target_ulong cs_base,
Emilio G. Cota89fee742016-04-07 13:19:22 -04001156 uint32_t flags, int cflags)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001157{
Andreas Färber648f0342013-09-01 17:43:17 +02001158 CPUArchState *env = cpu->env_ptr;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001159 TranslationBlock *tb;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001160 tb_page_addr_t phys_pc, phys_page2;
1161 target_ulong virt_page2;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001162 tcg_insn_unit *gen_code_buf;
Richard Hendersonfca8a502015-09-01 19:11:45 -07001163 int gen_code_size, search_size;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001164#ifdef CONFIG_PROFILER
1165 int64_t ti;
1166#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001167
1168 phys_pc = get_page_addr_code(env, pc);
Pavel Dovgalyuk56c02692015-09-17 19:23:59 +03001169 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
Paolo Bonzini02663592014-11-26 13:39:53 +03001170 cflags |= CF_USE_ICOUNT;
1171 }
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001172
Blue Swirl5b6dd862012-12-02 16:04:43 +00001173 tb = tb_alloc(pc);
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001174 if (unlikely(!tb)) {
1175 buffer_overflow:
Blue Swirl5b6dd862012-12-02 16:04:43 +00001176 /* flush must be done */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -07001177 tb_flush(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001178 /* cannot fail at this point */
1179 tb = tb_alloc(pc);
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001180 assert(tb != NULL);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001181 }
Richard Hendersonfec88f62015-08-27 18:17:40 -07001182
1183 gen_code_buf = tcg_ctx.code_gen_ptr;
1184 tb->tc_ptr = gen_code_buf;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001185 tb->cs_base = cs_base;
1186 tb->flags = flags;
1187 tb->cflags = cflags;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001188
1189#ifdef CONFIG_PROFILER
1190 tcg_ctx.tb_count1++; /* includes aborted translations because of
1191 exceptions */
1192 ti = profile_getclock();
1193#endif
1194
1195 tcg_func_start(&tcg_ctx);
1196
LluĂ­s Vilanova7c255042016-06-09 19:31:41 +02001197 tcg_ctx.cpu = ENV_GET_CPU(env);
Richard Hendersonfec88f62015-08-27 18:17:40 -07001198 gen_intermediate_code(env, tb);
LluĂ­s Vilanova7c255042016-06-09 19:31:41 +02001199 tcg_ctx.cpu = NULL;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001200
1201 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1202
1203 /* generate machine code */
Sergey Fedorovf3091012016-04-10 23:35:45 +03001204 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1205 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1206 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001207#ifdef USE_DIRECT_JUMP
Sergey Fedorovf3091012016-04-10 23:35:45 +03001208 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1209 tcg_ctx.tb_jmp_target_addr = NULL;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001210#else
Sergey Fedorovf3091012016-04-10 23:35:45 +03001211 tcg_ctx.tb_jmp_insn_offset = NULL;
1212 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001213#endif
1214
1215#ifdef CONFIG_PROFILER
1216 tcg_ctx.tb_count++;
1217 tcg_ctx.interm_time += profile_getclock() - ti;
1218 tcg_ctx.code_time -= profile_getclock();
1219#endif
1220
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001221 /* ??? Overflow could be handled better here. In particular, we
1222 don't need to re-do gen_intermediate_code, nor should we re-do
1223 the tcg optimization currently hidden inside tcg_gen_code. All
1224 that should be required is to flush the TBs, allocate a new TB,
1225 re-initialize it per above, and re-do the actual code generation. */
Alex Bennée5bd2ec32016-03-15 14:30:16 +00001226 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001227 if (unlikely(gen_code_size < 0)) {
1228 goto buffer_overflow;
1229 }
Richard Hendersonfca8a502015-09-01 19:11:45 -07001230 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001231 if (unlikely(search_size < 0)) {
1232 goto buffer_overflow;
1233 }
Richard Hendersonfec88f62015-08-27 18:17:40 -07001234
1235#ifdef CONFIG_PROFILER
1236 tcg_ctx.code_time += profile_getclock();
1237 tcg_ctx.code_in_len += tb->size;
1238 tcg_ctx.code_out_len += gen_code_size;
Richard Hendersonfca8a502015-09-01 19:11:45 -07001239 tcg_ctx.search_out_len += search_size;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001240#endif
1241
1242#ifdef DEBUG_DISAS
Alex Bennéed977e1c2016-03-15 14:30:21 +00001243 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1244 qemu_log_in_addr_range(tb->pc)) {
Richard Hendersonfec88f62015-08-27 18:17:40 -07001245 qemu_log("OUT: [size=%d]\n", gen_code_size);
1246 log_disas(tb->tc_ptr, gen_code_size);
1247 qemu_log("\n");
1248 qemu_log_flush();
1249 }
1250#endif
1251
Richard Hendersonfca8a502015-09-01 19:11:45 -07001252 tcg_ctx.code_gen_ptr = (void *)
1253 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1254 CODE_GEN_ALIGN);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001255
Sergey Fedorov901bc3d2016-03-22 19:00:12 +03001256 /* init jump list */
1257 assert(((uintptr_t)tb & 3) == 0);
1258 tb->jmp_list_first = (uintptr_t)tb | 2;
1259 tb->jmp_list_next[0] = (uintptr_t)NULL;
1260 tb->jmp_list_next[1] = (uintptr_t)NULL;
1261
1262 /* init original jump addresses wich has been set during tcg_gen_code() */
1263 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1264 tb_reset_jump(tb, 0);
1265 }
1266 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1267 tb_reset_jump(tb, 1);
1268 }
1269
Blue Swirl5b6dd862012-12-02 16:04:43 +00001270 /* check next page if needed */
1271 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1272 phys_page2 = -1;
1273 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1274 phys_page2 = get_page_addr_code(env, virt_page2);
1275 }
Sergey Fedorov901bc3d2016-03-22 19:00:12 +03001276 /* As long as consistency of the TB stuff is provided by tb_lock in user
1277 * mode and is implicit in single-threaded softmmu emulation, no explicit
1278 * memory barrier is required before tb_link_page() makes the TB visible
1279 * through the physical hash table and physical page list.
1280 */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001281 tb_link_page(tb, phys_pc, phys_page2);
1282 return tb;
1283}
1284
1285/*
1286 * Invalidate all TBs which intersect with the target physical address range
1287 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1288 * 'is_cpu_write_access' should be true if called from a real cpu write
1289 * access: the virtual CPU will exit the current TB if code is modified inside
1290 * this TB.
Paolo Bonzini75692082015-08-11 10:59:50 +02001291 *
1292 * Called with mmap_lock held for user-mode emulation
Blue Swirl5b6dd862012-12-02 16:04:43 +00001293 */
Paolo Bonzini35865332015-04-22 14:20:35 +02001294void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001295{
1296 while (start < end) {
Paolo Bonzini35865332015-04-22 14:20:35 +02001297 tb_invalidate_phys_page_range(start, end, 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001298 start &= TARGET_PAGE_MASK;
1299 start += TARGET_PAGE_SIZE;
1300 }
1301}
1302
1303/*
1304 * Invalidate all TBs which intersect with the target physical address range
1305 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1306 * 'is_cpu_write_access' should be true if called from a real cpu write
1307 * access: the virtual CPU will exit the current TB if code is modified inside
1308 * this TB.
Paolo Bonzini75692082015-08-11 10:59:50 +02001309 *
1310 * Called with mmap_lock held for user-mode emulation
Blue Swirl5b6dd862012-12-02 16:04:43 +00001311 */
1312void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1313 int is_cpu_write_access)
1314{
Sergey Fedorov32135252016-05-03 14:04:22 +03001315 TranslationBlock *tb, *tb_next;
Andreas Färberbaea4fa2013-09-03 10:51:26 +02001316#if defined(TARGET_HAS_PRECISE_SMC)
Sergey Fedorov32135252016-05-03 14:04:22 +03001317 CPUState *cpu = current_cpu;
Andreas Färber4917cf42013-05-27 05:17:50 +02001318 CPUArchState *env = NULL;
1319#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001320 tb_page_addr_t tb_start, tb_end;
1321 PageDesc *p;
1322 int n;
1323#ifdef TARGET_HAS_PRECISE_SMC
1324 int current_tb_not_found = is_cpu_write_access;
1325 TranslationBlock *current_tb = NULL;
1326 int current_tb_modified = 0;
1327 target_ulong current_pc = 0;
1328 target_ulong current_cs_base = 0;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001329 uint32_t current_flags = 0;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001330#endif /* TARGET_HAS_PRECISE_SMC */
1331
1332 p = page_find(start >> TARGET_PAGE_BITS);
1333 if (!p) {
1334 return;
1335 }
Andreas Färberbaea4fa2013-09-03 10:51:26 +02001336#if defined(TARGET_HAS_PRECISE_SMC)
Andreas Färber4917cf42013-05-27 05:17:50 +02001337 if (cpu != NULL) {
1338 env = cpu->env_ptr;
Andreas Färberd77953b2013-01-16 19:29:31 +01001339 }
Andreas Färber4917cf42013-05-27 05:17:50 +02001340#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001341
1342 /* we remove all the TBs in the range [start, end[ */
1343 /* XXX: see if in some cases it could be faster to invalidate all
1344 the code */
1345 tb = p->first_tb;
1346 while (tb != NULL) {
1347 n = (uintptr_t)tb & 3;
1348 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1349 tb_next = tb->page_next[n];
1350 /* NOTE: this is subtle as a TB may span two physical pages */
1351 if (n == 0) {
1352 /* NOTE: tb_end may be after the end of the page, but
1353 it is not a problem */
1354 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1355 tb_end = tb_start + tb->size;
1356 } else {
1357 tb_start = tb->page_addr[1];
1358 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1359 }
1360 if (!(tb_end <= start || tb_start >= end)) {
1361#ifdef TARGET_HAS_PRECISE_SMC
1362 if (current_tb_not_found) {
1363 current_tb_not_found = 0;
1364 current_tb = NULL;
Andreas Färber93afead2013-08-26 03:41:01 +02001365 if (cpu->mem_io_pc) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001366 /* now we have a real cpu fault */
Andreas Färber93afead2013-08-26 03:41:01 +02001367 current_tb = tb_find_pc(cpu->mem_io_pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001368 }
1369 }
1370 if (current_tb == tb &&
1371 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1372 /* If we are modifying the current TB, we must stop
1373 its execution. We could be more precise by checking
1374 that the modification is after the current PC, but it
1375 would require a specialized function to partially
1376 restore the CPU state */
1377
1378 current_tb_modified = 1;
Andreas Färber74f10512013-09-01 17:02:58 +02001379 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001380 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1381 &current_flags);
1382 }
1383#endif /* TARGET_HAS_PRECISE_SMC */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001384 tb_phys_invalidate(tb, -1);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001385 }
1386 tb = tb_next;
1387 }
1388#if !defined(CONFIG_USER_ONLY)
1389 /* if no code remaining, no need to continue to use slow writes */
1390 if (!p->first_tb) {
1391 invalidate_page_bitmap(p);
Paolo Bonzinifc377bc2015-04-22 14:20:35 +02001392 tlb_unprotect_code(start);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001393 }
1394#endif
1395#ifdef TARGET_HAS_PRECISE_SMC
1396 if (current_tb_modified) {
1397 /* we generate a block containing just the instruction
1398 modifying the memory. It will ensure that it cannot modify
1399 itself */
Andreas Färber648f0342013-09-01 17:43:17 +02001400 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01001401 cpu_loop_exit_noexc(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001402 }
1403#endif
1404}
1405
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001406#ifdef CONFIG_SOFTMMU
Blue Swirl5b6dd862012-12-02 16:04:43 +00001407/* len must be <= 8 and start must be a multiple of len */
1408void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1409{
1410 PageDesc *p;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001411
1412#if 0
1413 if (1) {
1414 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1415 cpu_single_env->mem_io_vaddr, len,
1416 cpu_single_env->eip,
1417 cpu_single_env->eip +
1418 (intptr_t)cpu_single_env->segs[R_CS].base);
1419 }
1420#endif
1421 p = page_find(start >> TARGET_PAGE_BITS);
1422 if (!p) {
1423 return;
1424 }
Paolo Bonzinifc377bc2015-04-22 14:20:35 +02001425 if (!p->code_bitmap &&
1426 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1427 /* build code bitmap */
1428 build_page_bitmap(p);
1429 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001430 if (p->code_bitmap) {
Emilio G. Cota510a6472015-04-22 17:50:52 -04001431 unsigned int nr;
1432 unsigned long b;
1433
1434 nr = start & ~TARGET_PAGE_MASK;
1435 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
Blue Swirl5b6dd862012-12-02 16:04:43 +00001436 if (b & ((1 << len) - 1)) {
1437 goto do_invalidate;
1438 }
1439 } else {
1440 do_invalidate:
1441 tb_invalidate_phys_page_range(start, start + len, 1);
1442 }
1443}
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001444#else
Peter Maydell75809222016-05-17 15:18:02 +01001445/* Called with mmap_lock held. If pc is not 0 then it indicates the
1446 * host PC of the faulting store instruction that caused this invalidate.
1447 * Returns true if the caller needs to abort execution of the current
1448 * TB (because it was modified by this store and the guest CPU has
1449 * precise-SMC semantics).
1450 */
1451static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001452{
1453 TranslationBlock *tb;
1454 PageDesc *p;
1455 int n;
1456#ifdef TARGET_HAS_PRECISE_SMC
1457 TranslationBlock *current_tb = NULL;
Andreas Färber4917cf42013-05-27 05:17:50 +02001458 CPUState *cpu = current_cpu;
1459 CPUArchState *env = NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001460 int current_tb_modified = 0;
1461 target_ulong current_pc = 0;
1462 target_ulong current_cs_base = 0;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001463 uint32_t current_flags = 0;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001464#endif
1465
1466 addr &= TARGET_PAGE_MASK;
1467 p = page_find(addr >> TARGET_PAGE_BITS);
1468 if (!p) {
Peter Maydell75809222016-05-17 15:18:02 +01001469 return false;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001470 }
1471 tb = p->first_tb;
1472#ifdef TARGET_HAS_PRECISE_SMC
1473 if (tb && pc != 0) {
1474 current_tb = tb_find_pc(pc);
1475 }
Andreas Färber4917cf42013-05-27 05:17:50 +02001476 if (cpu != NULL) {
1477 env = cpu->env_ptr;
Andreas Färberd77953b2013-01-16 19:29:31 +01001478 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001479#endif
1480 while (tb != NULL) {
1481 n = (uintptr_t)tb & 3;
1482 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1483#ifdef TARGET_HAS_PRECISE_SMC
1484 if (current_tb == tb &&
1485 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1486 /* If we are modifying the current TB, we must stop
1487 its execution. We could be more precise by checking
1488 that the modification is after the current PC, but it
1489 would require a specialized function to partially
1490 restore the CPU state */
1491
1492 current_tb_modified = 1;
Andreas Färber74f10512013-09-01 17:02:58 +02001493 cpu_restore_state_from_tb(cpu, current_tb, pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001494 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1495 &current_flags);
1496 }
1497#endif /* TARGET_HAS_PRECISE_SMC */
1498 tb_phys_invalidate(tb, addr);
1499 tb = tb->page_next[n];
1500 }
1501 p->first_tb = NULL;
1502#ifdef TARGET_HAS_PRECISE_SMC
1503 if (current_tb_modified) {
1504 /* we generate a block containing just the instruction
1505 modifying the memory. It will ensure that it cannot modify
1506 itself */
Andreas Färber648f0342013-09-01 17:43:17 +02001507 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
Peter Maydell75809222016-05-17 15:18:02 +01001508 return true;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001509 }
1510#endif
Peter Maydell75809222016-05-17 15:18:02 +01001511 return false;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001512}
1513#endif
1514
Blue Swirl5b6dd862012-12-02 16:04:43 +00001515/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1516 tb[1].tc_ptr. Return NULL if not found */
Blue Swirla8a826a2012-12-04 20:16:07 +00001517static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001518{
1519 int m_min, m_max, m;
1520 uintptr_t v;
1521 TranslationBlock *tb;
1522
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001523 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001524 return NULL;
1525 }
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +07001526 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1527 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001528 return NULL;
1529 }
1530 /* binary search (cf Knuth) */
1531 m_min = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001532 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001533 while (m_min <= m_max) {
1534 m = (m_min + m_max) >> 1;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001535 tb = &tcg_ctx.tb_ctx.tbs[m];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001536 v = (uintptr_t)tb->tc_ptr;
1537 if (v == tc_ptr) {
1538 return tb;
1539 } else if (tc_ptr < v) {
1540 m_max = m - 1;
1541 } else {
1542 m_min = m + 1;
1543 }
1544 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001545 return &tcg_ctx.tb_ctx.tbs[m_max];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001546}
1547
Peter Maydellec53b452015-01-20 15:19:32 +00001548#if !defined(CONFIG_USER_ONLY)
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +01001549void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001550{
1551 ram_addr_t ram_addr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001552 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001553 hwaddr l = 1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001554
Paolo Bonzini41063e12015-03-18 14:21:43 +01001555 rcu_read_lock();
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +01001556 mr = address_space_translate(as, addr, &addr, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001557 if (!(memory_region_is_ram(mr)
1558 || memory_region_is_romd(mr))) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01001559 rcu_read_unlock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001560 return;
1561 }
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001562 ram_addr = memory_region_get_ram_addr(mr) + addr;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001563 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
Paolo Bonzini41063e12015-03-18 14:21:43 +01001564 rcu_read_unlock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001565}
Peter Maydellec53b452015-01-20 15:19:32 +00001566#endif /* !defined(CONFIG_USER_ONLY) */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001567
Andreas Färber239c51a2013-09-01 17:12:23 +02001568void tb_check_watchpoint(CPUState *cpu)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001569{
1570 TranslationBlock *tb;
1571
Andreas Färber93afead2013-08-26 03:41:01 +02001572 tb = tb_find_pc(cpu->mem_io_pc);
Aurelien Jarno8d302e72015-06-13 00:45:59 +02001573 if (tb) {
1574 /* We can use retranslation to find the PC. */
1575 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1576 tb_phys_invalidate(tb, -1);
1577 } else {
1578 /* The exception probably happened in a helper. The CPU state should
1579 have been saved before calling it. Fetch the PC from there. */
1580 CPUArchState *env = cpu->env_ptr;
1581 target_ulong pc, cs_base;
1582 tb_page_addr_t addr;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001583 uint32_t flags;
Aurelien Jarno8d302e72015-06-13 00:45:59 +02001584
1585 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1586 addr = get_page_addr_code(env, pc);
1587 tb_invalidate_phys_range(addr, addr + 1);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001588 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001589}
1590
1591#ifndef CONFIG_USER_ONLY
Blue Swirl5b6dd862012-12-02 16:04:43 +00001592/* in deterministic execution mode, instructions doing device I/Os
1593 must be at the end of the TB */
Andreas Färber90b40a62013-09-01 17:21:47 +02001594void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001595{
Andreas Färbera47dddd2013-09-03 17:38:47 +02001596#if defined(TARGET_MIPS) || defined(TARGET_SH4)
Andreas Färber90b40a62013-09-01 17:21:47 +02001597 CPUArchState *env = cpu->env_ptr;
Andreas Färbera47dddd2013-09-03 17:38:47 +02001598#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001599 TranslationBlock *tb;
1600 uint32_t n, cflags;
1601 target_ulong pc, cs_base;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001602 uint32_t flags;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001603
1604 tb = tb_find_pc(retaddr);
1605 if (!tb) {
Andreas Färbera47dddd2013-09-03 17:38:47 +02001606 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl5b6dd862012-12-02 16:04:43 +00001607 (void *)retaddr);
1608 }
Andreas Färber28ecfd72013-08-26 05:51:49 +02001609 n = cpu->icount_decr.u16.low + tb->icount;
Andreas Färber74f10512013-09-01 17:02:58 +02001610 cpu_restore_state_from_tb(cpu, tb, retaddr);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001611 /* Calculate how many instructions had been executed before the fault
1612 occurred. */
Andreas Färber28ecfd72013-08-26 05:51:49 +02001613 n = n - cpu->icount_decr.u16.low;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001614 /* Generate a new TB ending on the I/O insn. */
1615 n++;
1616 /* On MIPS and SH, delay slot instructions can only be restarted if
1617 they were already the first instruction in the TB. If this is not
1618 the first instruction in a TB then re-execute the preceding
1619 branch. */
1620#if defined(TARGET_MIPS)
1621 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
Maciej W. Rozyckic3577472014-11-07 20:05:35 +00001622 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001623 cpu->icount_decr.u16.low++;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001624 env->hflags &= ~MIPS_HFLAG_BMASK;
1625 }
1626#elif defined(TARGET_SH4)
1627 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1628 && n > 1) {
1629 env->pc -= 2;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001630 cpu->icount_decr.u16.low++;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001631 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1632 }
1633#endif
1634 /* This should never happen. */
1635 if (n > CF_COUNT_MASK) {
Andreas Färbera47dddd2013-09-03 17:38:47 +02001636 cpu_abort(cpu, "TB too big during recompile");
Blue Swirl5b6dd862012-12-02 16:04:43 +00001637 }
1638
1639 cflags = n | CF_LAST_IO;
1640 pc = tb->pc;
1641 cs_base = tb->cs_base;
1642 flags = tb->flags;
1643 tb_phys_invalidate(tb, -1);
Sergey Fedorov02d57ea2015-06-30 12:35:09 +03001644 if (tb->cflags & CF_NOCACHE) {
1645 if (tb->orig_tb) {
1646 /* Invalidate original TB if this TB was generated in
1647 * cpu_exec_nocache() */
1648 tb_phys_invalidate(tb->orig_tb, -1);
1649 }
1650 tb_free(tb);
1651 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001652 /* FIXME: In theory this could raise an exception. In practice
1653 we have already translated the block once so it's probably ok. */
Andreas Färber648f0342013-09-01 17:43:17 +02001654 tb_gen_code(cpu, pc, cs_base, flags, cflags);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001655 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1656 the first in the TB) then we end up generating a whole new TB and
1657 repeating the fault, which is horribly inefficient.
1658 Better would be to execute just this insn uncached, or generate a
1659 second new TB. */
Peter Maydell6886b982016-05-17 15:18:04 +01001660 cpu_loop_exit_noexc(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001661}
1662
Andreas Färber611d4f92013-09-01 17:52:07 +02001663void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001664{
1665 unsigned int i;
1666
1667 /* Discard jump cache entries for any tb which might potentially
1668 overlap the flushed page. */
1669 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
Andreas Färber8cd70432013-08-26 06:03:38 +02001670 memset(&cpu->tb_jmp_cache[i], 0,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001671 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1672
1673 i = tb_jmp_cache_hash_page(addr);
Andreas Färber8cd70432013-08-26 06:03:38 +02001674 memset(&cpu->tb_jmp_cache[i], 0,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001675 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1676}
1677
Emilio G. Cota7266ae92016-07-22 12:36:30 -04001678static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1679 struct qht_stats hst)
1680{
1681 uint32_t hgram_opts;
1682 size_t hgram_bins;
1683 char *hgram;
1684
1685 if (!hst.head_buckets) {
1686 return;
1687 }
1688 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1689 hst.used_head_buckets, hst.head_buckets,
1690 (double)hst.used_head_buckets / hst.head_buckets * 100);
1691
1692 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1693 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1694 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1695 hgram_opts |= QDIST_PR_NODECIMAL;
1696 }
1697 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1698 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1699 qdist_avg(&hst.occupancy) * 100, hgram);
1700 g_free(hgram);
1701
1702 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1703 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1704 if (hgram_bins > 10) {
1705 hgram_bins = 10;
1706 } else {
1707 hgram_bins = 0;
1708 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1709 }
1710 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1711 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1712 qdist_avg(&hst.chain), hgram);
1713 g_free(hgram);
1714}
1715
Blue Swirl5b6dd862012-12-02 16:04:43 +00001716void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1717{
1718 int i, target_code_size, max_target_code_size;
1719 int direct_jmp_count, direct_jmp2_count, cross_page;
1720 TranslationBlock *tb;
Emilio G. Cota329844d2016-06-08 14:55:33 -04001721 struct qht_stats hst;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001722
1723 target_code_size = 0;
1724 max_target_code_size = 0;
1725 cross_page = 0;
1726 direct_jmp_count = 0;
1727 direct_jmp2_count = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001728 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1729 tb = &tcg_ctx.tb_ctx.tbs[i];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001730 target_code_size += tb->size;
1731 if (tb->size > max_target_code_size) {
1732 max_target_code_size = tb->size;
1733 }
1734 if (tb->page_addr[1] != -1) {
1735 cross_page++;
1736 }
Sergey Fedorovf3091012016-04-10 23:35:45 +03001737 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001738 direct_jmp_count++;
Sergey Fedorovf3091012016-04-10 23:35:45 +03001739 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001740 direct_jmp2_count++;
1741 }
1742 }
1743 }
1744 /* XXX: avoid using doubles ? */
1745 cpu_fprintf(f, "Translation buffer state:\n");
1746 cpu_fprintf(f, "gen code size %td/%zd\n",
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +07001747 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001748 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001749 cpu_fprintf(f, "TB count %d/%d\n",
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001750 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001751 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001752 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1753 tcg_ctx.tb_ctx.nb_tbs : 0,
1754 max_target_code_size);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001755 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001756 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1757 tcg_ctx.code_gen_buffer) /
1758 tcg_ctx.tb_ctx.nb_tbs : 0,
1759 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1760 tcg_ctx.code_gen_buffer) /
1761 target_code_size : 0);
1762 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1763 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1764 tcg_ctx.tb_ctx.nb_tbs : 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001765 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1766 direct_jmp_count,
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001767 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1768 tcg_ctx.tb_ctx.nb_tbs : 0,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001769 direct_jmp2_count,
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001770 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1771 tcg_ctx.tb_ctx.nb_tbs : 0);
Emilio G. Cota329844d2016-06-08 14:55:33 -04001772
1773 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
Emilio G. Cota7266ae92016-07-22 12:36:30 -04001774 print_qht_statistics(f, cpu_fprintf, hst);
Emilio G. Cota329844d2016-06-08 14:55:33 -04001775 qht_statistics_destroy(&hst);
1776
Blue Swirl5b6dd862012-12-02 16:04:43 +00001777 cpu_fprintf(f, "\nStatistics:\n");
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001778 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1779 cpu_fprintf(f, "TB invalidate count %d\n",
1780 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001781 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1782 tcg_dump_info(f, cpu_fprintf);
1783}
1784
Max Filippov246ae242014-11-02 11:04:18 +03001785void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1786{
1787 tcg_dump_op_count(f, cpu_fprintf);
1788}
1789
Blue Swirl5b6dd862012-12-02 16:04:43 +00001790#else /* CONFIG_USER_ONLY */
1791
Andreas Färberc3affe52013-01-18 15:03:43 +01001792void cpu_interrupt(CPUState *cpu, int mask)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001793{
Andreas Färber259186a2013-01-17 18:51:17 +01001794 cpu->interrupt_request |= mask;
Peter Maydell378df4b2013-02-22 18:10:03 +00001795 cpu->tcg_exit_req = 1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001796}
1797
1798/*
1799 * Walks guest process memory "regions" one by one
1800 * and calls callback function 'fn' for each region.
1801 */
1802struct walk_memory_regions_data {
1803 walk_memory_regions_fn fn;
1804 void *priv;
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001805 target_ulong start;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001806 int prot;
1807};
1808
1809static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001810 target_ulong end, int new_prot)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001811{
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001812 if (data->start != -1u) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001813 int rc = data->fn(data->priv, data->start, end, data->prot);
1814 if (rc != 0) {
1815 return rc;
1816 }
1817 }
1818
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001819 data->start = (new_prot ? end : -1u);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001820 data->prot = new_prot;
1821
1822 return 0;
1823}
1824
1825static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001826 target_ulong base, int level, void **lp)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001827{
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001828 target_ulong pa;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001829 int i, rc;
1830
1831 if (*lp == NULL) {
1832 return walk_memory_regions_end(data, base, 0);
1833 }
1834
1835 if (level == 0) {
1836 PageDesc *pd = *lp;
1837
Paolo Bonzini03f49952013-11-07 17:14:36 +01001838 for (i = 0; i < V_L2_SIZE; ++i) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001839 int prot = pd[i].flags;
1840
1841 pa = base | (i << TARGET_PAGE_BITS);
1842 if (prot != data->prot) {
1843 rc = walk_memory_regions_end(data, pa, prot);
1844 if (rc != 0) {
1845 return rc;
1846 }
1847 }
1848 }
1849 } else {
1850 void **pp = *lp;
1851
Paolo Bonzini03f49952013-11-07 17:14:36 +01001852 for (i = 0; i < V_L2_SIZE; ++i) {
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001853 pa = base | ((target_ulong)i <<
Paolo Bonzini03f49952013-11-07 17:14:36 +01001854 (TARGET_PAGE_BITS + V_L2_BITS * level));
Blue Swirl5b6dd862012-12-02 16:04:43 +00001855 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1856 if (rc != 0) {
1857 return rc;
1858 }
1859 }
1860 }
1861
1862 return 0;
1863}
1864
1865int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1866{
1867 struct walk_memory_regions_data data;
1868 uintptr_t i;
1869
1870 data.fn = fn;
1871 data.priv = priv;
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001872 data.start = -1u;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001873 data.prot = 0;
1874
1875 for (i = 0; i < V_L1_SIZE; i++) {
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001876 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
Paolo Bonzini03f49952013-11-07 17:14:36 +01001877 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001878 if (rc != 0) {
1879 return rc;
1880 }
1881 }
1882
1883 return walk_memory_regions_end(&data, 0, 0);
1884}
1885
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001886static int dump_region(void *priv, target_ulong start,
1887 target_ulong end, unsigned long prot)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001888{
1889 FILE *f = (FILE *)priv;
1890
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001891 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1892 " "TARGET_FMT_lx" %c%c%c\n",
Blue Swirl5b6dd862012-12-02 16:04:43 +00001893 start, end, end - start,
1894 ((prot & PAGE_READ) ? 'r' : '-'),
1895 ((prot & PAGE_WRITE) ? 'w' : '-'),
1896 ((prot & PAGE_EXEC) ? 'x' : '-'));
1897
1898 return 0;
1899}
1900
1901/* dump memory mappings */
1902void page_dump(FILE *f)
1903{
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001904 const int length = sizeof(target_ulong) * 2;
Stefan Weil227b8172013-09-12 20:09:06 +02001905 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1906 length, "start", length, "end", length, "size", "prot");
Blue Swirl5b6dd862012-12-02 16:04:43 +00001907 walk_memory_regions(f, dump_region);
1908}
1909
1910int page_get_flags(target_ulong address)
1911{
1912 PageDesc *p;
1913
1914 p = page_find(address >> TARGET_PAGE_BITS);
1915 if (!p) {
1916 return 0;
1917 }
1918 return p->flags;
1919}
1920
1921/* Modify the flags of a page and invalidate the code if necessary.
1922 The flag PAGE_WRITE_ORG is positioned automatically depending
1923 on PAGE_WRITE. The mmap_lock should already be held. */
1924void page_set_flags(target_ulong start, target_ulong end, int flags)
1925{
1926 target_ulong addr, len;
1927
1928 /* This function should never be called with addresses outside the
1929 guest address space. If this assert fires, it probably indicates
1930 a missing call to h2g_valid. */
1931#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001932 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Blue Swirl5b6dd862012-12-02 16:04:43 +00001933#endif
1934 assert(start < end);
1935
1936 start = start & TARGET_PAGE_MASK;
1937 end = TARGET_PAGE_ALIGN(end);
1938
1939 if (flags & PAGE_WRITE) {
1940 flags |= PAGE_WRITE_ORG;
1941 }
1942
1943 for (addr = start, len = end - start;
1944 len != 0;
1945 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1946 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1947
1948 /* If the write protection bit is set, then we invalidate
1949 the code inside. */
1950 if (!(p->flags & PAGE_WRITE) &&
1951 (flags & PAGE_WRITE) &&
1952 p->first_tb) {
Peter Maydell75809222016-05-17 15:18:02 +01001953 tb_invalidate_phys_page(addr, 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001954 }
1955 p->flags = flags;
1956 }
1957}
1958
1959int page_check_range(target_ulong start, target_ulong len, int flags)
1960{
1961 PageDesc *p;
1962 target_ulong end;
1963 target_ulong addr;
1964
1965 /* This function should never be called with addresses outside the
1966 guest address space. If this assert fires, it probably indicates
1967 a missing call to h2g_valid. */
1968#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001969 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Blue Swirl5b6dd862012-12-02 16:04:43 +00001970#endif
1971
1972 if (len == 0) {
1973 return 0;
1974 }
1975 if (start + len - 1 < start) {
1976 /* We've wrapped around. */
1977 return -1;
1978 }
1979
1980 /* must do before we loose bits in the next step */
1981 end = TARGET_PAGE_ALIGN(start + len);
1982 start = start & TARGET_PAGE_MASK;
1983
1984 for (addr = start, len = end - start;
1985 len != 0;
1986 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1987 p = page_find(addr >> TARGET_PAGE_BITS);
1988 if (!p) {
1989 return -1;
1990 }
1991 if (!(p->flags & PAGE_VALID)) {
1992 return -1;
1993 }
1994
1995 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1996 return -1;
1997 }
1998 if (flags & PAGE_WRITE) {
1999 if (!(p->flags & PAGE_WRITE_ORG)) {
2000 return -1;
2001 }
2002 /* unprotect the page if it was put read-only because it
2003 contains translated code */
2004 if (!(p->flags & PAGE_WRITE)) {
Peter Maydellf213e722016-05-17 15:18:03 +01002005 if (!page_unprotect(addr, 0)) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00002006 return -1;
2007 }
2008 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00002009 }
2010 }
2011 return 0;
2012}
2013
2014/* called from signal handler: invalidate the code and unprotect the
Peter Maydellf213e722016-05-17 15:18:03 +01002015 * page. Return 0 if the fault was not handled, 1 if it was handled,
2016 * and 2 if it was handled but the caller must cause the TB to be
2017 * immediately exited. (We can only return 2 if the 'pc' argument is
2018 * non-zero.)
2019 */
2020int page_unprotect(target_ulong address, uintptr_t pc)
Blue Swirl5b6dd862012-12-02 16:04:43 +00002021{
2022 unsigned int prot;
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002023 bool current_tb_invalidated;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002024 PageDesc *p;
2025 target_ulong host_start, host_end, addr;
2026
2027 /* Technically this isn't safe inside a signal handler. However we
2028 know this only ever happens in a synchronous SEGV handler, so in
2029 practice it seems to be ok. */
2030 mmap_lock();
2031
2032 p = page_find(address >> TARGET_PAGE_BITS);
2033 if (!p) {
2034 mmap_unlock();
2035 return 0;
2036 }
2037
2038 /* if the page was really writable, then we change its
2039 protection back to writable */
2040 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2041 host_start = address & qemu_host_page_mask;
2042 host_end = host_start + qemu_host_page_size;
2043
2044 prot = 0;
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002045 current_tb_invalidated = false;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002046 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2047 p = page_find(addr >> TARGET_PAGE_BITS);
2048 p->flags |= PAGE_WRITE;
2049 prot |= p->flags;
2050
2051 /* and since the content will be modified, we must invalidate
2052 the corresponding translated code. */
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002053 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00002054#ifdef DEBUG_TB_CHECK
2055 tb_invalidate_check(addr);
2056#endif
2057 }
2058 mprotect((void *)g2h(host_start), qemu_host_page_size,
2059 prot & PAGE_BITS);
2060
2061 mmap_unlock();
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002062 /* If current TB was invalidated return to main loop */
2063 return current_tb_invalidated ? 2 : 1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002064 }
2065 mmap_unlock();
2066 return 0;
2067}
2068#endif /* CONFIG_USER_ONLY */