blob: 16034ee651e9ddcf2ebe07f9112dc8e9e8207c3c [file] [log] [blame]
bellardd4e81642003-05-25 16:46:15 +00001/*
2 * internal execution defines for qemu
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardd4e81642003-05-25 16:46:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellardd4e81642003-05-25 16:46:15 +000018 */
19
Markus Armbruster2a6a4072016-06-29 13:47:03 +020020#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
blueswir17d99a002009-01-14 19:00:36 +000022
Paolo Bonzini00f6da62016-03-15 13:16:36 +010023#include "exec/tb-context.h"
Richard Henderson416986d2017-10-13 12:22:28 -070024#include "sysemu/cpus.h"
blueswir17d99a002009-01-14 19:00:36 +000025
bellardb346ff42003-06-15 20:05:50 +000026/* allow to see translation results - the slowdown should be negligible, so we leave it */
aurel32de9a95f2008-11-11 13:41:01 +000027#define DEBUG_DISAS
bellardb346ff42003-06-15 20:05:50 +000028
Paul Brook41c1b1c2010-03-12 16:54:58 +000029/* Page tracking code uses ram addresses in system mode, and virtual
30 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
31 type. */
32#if defined(CONFIG_USER_ONLY)
Paul Brookb480d9b2010-03-12 23:23:29 +000033typedef abi_ulong tb_page_addr_t;
Emilio G. Cota67a5b5d2017-07-13 17:18:15 -040034#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
Paul Brook41c1b1c2010-03-12 16:54:58 +000035#else
36typedef ram_addr_t tb_page_addr_t;
Emilio G. Cota67a5b5d2017-07-13 17:18:15 -040037#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
Paul Brook41c1b1c2010-03-12 16:54:58 +000038#endif
39
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010040#include "qemu/log.h"
bellardb346ff42003-06-15 20:05:50 +000041
Richard Henderson8b86d6d2019-04-15 20:54:54 -100042void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
43void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
Richard Hendersonbad729e2015-09-01 15:51:12 -070044 target_ulong *data);
aurel32d2856f12008-04-28 00:32:32 +000045
bellard57fec1f2008-02-01 10:50:11 +000046void cpu_gen_init(void);
Alex Bennéed25f2a72017-11-13 13:55:27 +000047
48/**
49 * cpu_restore_state:
50 * @cpu: the vCPU state is to be restore to
51 * @searched_pc: the host PC the fault occurred at
Pavel Dovgalyukafd46fc2018-04-09 12:13:20 +030052 * @will_exit: true if the TB executed will be interrupted after some
53 cpu adjustments. Required for maintaining the correct
54 icount valus
Alex Bennéed25f2a72017-11-13 13:55:27 +000055 * @return: true if state was restored, false otherwise
56 *
57 * Attempt to restore the state for a fault occurring in translated
58 * code. If the searched_pc is not in translated code no state is
59 * restored and the function returns false.
60 */
Pavel Dovgalyukafd46fc2018-04-09 12:13:20 +030061bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
Blue Swirla8a826a2012-12-04 20:16:07 +000062
Peter Maydell6886b982016-05-17 15:18:04 +010063void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
Andreas Färber90b40a62013-09-01 17:21:47 +020064void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
Andreas Färber648f0342013-09-01 17:43:17 +020065TranslationBlock *tb_gen_code(CPUState *cpu,
Emilio G. Cota89fee742016-04-07 13:19:22 -040066 target_ulong pc, target_ulong cs_base,
67 uint32_t flags,
pbrook2e70f6e2008-06-29 01:03:05 +000068 int cflags);
Igor Mammedov1bc7e522016-07-25 11:59:19 +020069
Andreas Färber5638d182013-08-27 17:52:12 +020070void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
Pavel Dovgalyuk1c3c8af2015-07-10 12:57:02 +030071void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
Richard Hendersonfdbc2b52016-06-29 22:12:55 -070072void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
Paolo Bonzini1652b972015-04-22 14:15:48 +020073
Blue Swirl0cac1b62012-04-09 16:50:52 +000074#if !defined(CONFIG_USER_ONLY)
Peter Maydell32857f42015-10-01 15:29:50 +010075void cpu_reloading_memory_map(void);
Peter Maydell56943e82016-01-21 14:15:04 +000076/**
77 * cpu_address_space_init:
78 * @cpu: CPU to add this address space to
Peter Maydell56943e82016-01-21 14:15:04 +000079 * @asidx: integer index of this address space
Peter Xu80ceb072017-11-23 17:23:32 +080080 * @prefix: prefix to be used as name of address space
81 * @mr: the root memory region of address space
Peter Maydell56943e82016-01-21 14:15:04 +000082 *
83 * Add the specified address space to the CPU's cpu_ases list.
84 * The address space added with @asidx 0 is the one used for the
85 * convenience pointer cpu->as.
86 * The target-specific code which registers ASes is responsible
87 * for defining what semantics address space 0, 1, 2, etc have.
88 *
Peter Maydell12ebc9a2016-01-21 14:15:04 +000089 * Before the first call to this function, the caller must set
90 * cpu->num_ases to the total number of address spaces it needs
91 * to support.
92 *
Peter Maydell56943e82016-01-21 14:15:04 +000093 * Note that with KVM only one address space is supported.
94 */
Peter Xu80ceb072017-11-23 17:23:32 +080095void cpu_address_space_init(CPUState *cpu, int asidx,
96 const char *prefix, MemoryRegion *mr);
Yang Zhongb11ec7f2017-07-03 18:12:21 +080097#endif
98
99#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000100/* cputlb.c */
Peter Maydelld7a74a92015-08-25 15:45:09 +0100101/**
Emilio G. Cota5005e252018-10-09 13:45:54 -0400102 * tlb_init - initialize a CPU's TLB
103 * @cpu: CPU whose TLB should be initialized
104 */
105void tlb_init(CPUState *cpu);
106/**
Peter Maydelld7a74a92015-08-25 15:45:09 +0100107 * tlb_flush_page:
108 * @cpu: CPU whose TLB should be flushed
109 * @addr: virtual address of page to be flushed
110 *
111 * Flush one page from the TLB of the specified CPU, for all
112 * MMU indexes.
113 */
Andreas Färber31b030d2013-09-04 01:29:02 +0200114void tlb_flush_page(CPUState *cpu, target_ulong addr);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100115/**
Alex Bennéec3b9a072017-02-23 18:29:22 +0000116 * tlb_flush_page_all_cpus:
117 * @cpu: src CPU of the flush
118 * @addr: virtual address of page to be flushed
119 *
120 * Flush one page from the TLB of the specified CPU, for all
121 * MMU indexes.
122 */
123void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
124/**
125 * tlb_flush_page_all_cpus_synced:
126 * @cpu: src CPU of the flush
127 * @addr: virtual address of page to be flushed
128 *
129 * Flush one page from the TLB of the specified CPU, for all MMU
130 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
131 * is scheduled as safe work meaning all flushes will be complete once
132 * the source vCPUs safe work is complete. This will depend on when
133 * the guests translation ends the TB.
134 */
135void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
136/**
Peter Maydelld7a74a92015-08-25 15:45:09 +0100137 * tlb_flush:
138 * @cpu: CPU whose TLB should be flushed
Peter Maydelld7a74a92015-08-25 15:45:09 +0100139 *
Alex Bennéed10eb082016-11-14 14:17:28 +0000140 * Flush the entire TLB for the specified CPU. Most CPU architectures
141 * allow the implementation to drop entries from the TLB at any time
142 * so this is generally safe. If more selective flushing is required
143 * use one of the other functions for efficiency.
Peter Maydelld7a74a92015-08-25 15:45:09 +0100144 */
Alex Bennéed10eb082016-11-14 14:17:28 +0000145void tlb_flush(CPUState *cpu);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100146/**
Alex Bennéec3b9a072017-02-23 18:29:22 +0000147 * tlb_flush_all_cpus:
148 * @cpu: src CPU of the flush
149 */
150void tlb_flush_all_cpus(CPUState *src_cpu);
151/**
152 * tlb_flush_all_cpus_synced:
153 * @cpu: src CPU of the flush
154 *
155 * Like tlb_flush_all_cpus except this except the source vCPUs work is
156 * scheduled as safe work meaning all flushes will be complete once
157 * the source vCPUs safe work is complete. This will depend on when
158 * the guests translation ends the TB.
159 */
160void tlb_flush_all_cpus_synced(CPUState *src_cpu);
161/**
Peter Maydelld7a74a92015-08-25 15:45:09 +0100162 * tlb_flush_page_by_mmuidx:
163 * @cpu: CPU whose TLB should be flushed
164 * @addr: virtual address of page to be flushed
Alex Bennée0336cbf2017-02-23 18:29:19 +0000165 * @idxmap: bitmap of MMU indexes to flush
Peter Maydelld7a74a92015-08-25 15:45:09 +0100166 *
167 * Flush one page from the TLB of the specified CPU, for the specified
168 * MMU indexes.
169 */
Alex Bennée0336cbf2017-02-23 18:29:19 +0000170void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
171 uint16_t idxmap);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100172/**
Alex Bennéec3b9a072017-02-23 18:29:22 +0000173 * tlb_flush_page_by_mmuidx_all_cpus:
174 * @cpu: Originating CPU of the flush
175 * @addr: virtual address of page to be flushed
176 * @idxmap: bitmap of MMU indexes to flush
177 *
178 * Flush one page from the TLB of all CPUs, for the specified
179 * MMU indexes.
180 */
181void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
182 uint16_t idxmap);
183/**
184 * tlb_flush_page_by_mmuidx_all_cpus_synced:
185 * @cpu: Originating CPU of the flush
186 * @addr: virtual address of page to be flushed
187 * @idxmap: bitmap of MMU indexes to flush
188 *
189 * Flush one page from the TLB of all CPUs, for the specified MMU
190 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
191 * vCPUs work is scheduled as safe work meaning all flushes will be
192 * complete once the source vCPUs safe work is complete. This will
193 * depend on when the guests translation ends the TB.
194 */
195void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
196 uint16_t idxmap);
197/**
Peter Maydelld7a74a92015-08-25 15:45:09 +0100198 * tlb_flush_by_mmuidx:
199 * @cpu: CPU whose TLB should be flushed
Alex Bennéec3b9a072017-02-23 18:29:22 +0000200 * @wait: If true ensure synchronisation by exiting the cpu_loop
Alex Bennée0336cbf2017-02-23 18:29:19 +0000201 * @idxmap: bitmap of MMU indexes to flush
Peter Maydelld7a74a92015-08-25 15:45:09 +0100202 *
203 * Flush all entries from the TLB of the specified CPU, for the specified
204 * MMU indexes.
205 */
Alex Bennée0336cbf2017-02-23 18:29:19 +0000206void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
Peter Maydell1787cc82016-01-21 14:15:04 +0000207/**
Alex Bennéec3b9a072017-02-23 18:29:22 +0000208 * tlb_flush_by_mmuidx_all_cpus:
209 * @cpu: Originating CPU of the flush
210 * @idxmap: bitmap of MMU indexes to flush
211 *
212 * Flush all entries from all TLBs of all CPUs, for the specified
213 * MMU indexes.
214 */
215void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
216/**
217 * tlb_flush_by_mmuidx_all_cpus_synced:
218 * @cpu: Originating CPU of the flush
219 * @idxmap: bitmap of MMU indexes to flush
220 *
221 * Flush all entries from all TLBs of all CPUs, for the specified
222 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
223 * vCPUs work is scheduled as safe work meaning all flushes will be
224 * complete once the source vCPUs safe work is complete. This will
225 * depend on when the guests translation ends the TB.
226 */
227void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
228/**
Peter Maydell1787cc82016-01-21 14:15:04 +0000229 * tlb_set_page_with_attrs:
230 * @cpu: CPU to add this TLB entry for
231 * @vaddr: virtual address of page to add entry for
232 * @paddr: physical address of the page
233 * @attrs: memory transaction attributes
234 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
235 * @mmu_idx: MMU index to insert TLB entry for
236 * @size: size of the page in bytes
237 *
238 * Add an entry to this CPU's TLB (a mapping from virtual address
239 * @vaddr to physical address @paddr) with the specified memory
240 * transaction attributes. This is generally called by the target CPU
241 * specific code after it has been called through the tlb_fill()
242 * entry point and performed a successful page table walk to find
243 * the physical address and attributes for the virtual address
244 * which provoked the TLB miss.
245 *
246 * At most one entry for a given virtual address is permitted. Only a
247 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
248 * used by tlb_flush_page.
249 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100250void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
251 hwaddr paddr, MemTxAttrs attrs,
252 int prot, int mmu_idx, target_ulong size);
Peter Maydell1787cc82016-01-21 14:15:04 +0000253/* tlb_set_page:
254 *
255 * This function is equivalent to calling tlb_set_page_with_attrs()
256 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
257 * as a convenience for CPUs which don't use memory transaction attributes.
258 */
259void tlb_set_page(CPUState *cpu, target_ulong vaddr,
260 hwaddr paddr, int prot,
261 int mmu_idx, target_ulong size);
Laurent Vivier98670d42018-01-18 20:38:40 +0100262void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
Yongbok Kim3b4afc92015-06-01 12:13:23 +0100263 uintptr_t retaddr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000264#else
Emilio G. Cota5005e252018-10-09 13:45:54 -0400265static inline void tlb_init(CPUState *cpu)
266{
267}
Andreas Färber31b030d2013-09-04 01:29:02 +0200268static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000269{
270}
Alex Bennéec3b9a072017-02-23 18:29:22 +0000271static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
272{
273}
274static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
275 target_ulong addr)
276{
277}
Alex Bennéed10eb082016-11-14 14:17:28 +0000278static inline void tlb_flush(CPUState *cpu)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000279{
280}
Alex Bennéec3b9a072017-02-23 18:29:22 +0000281static inline void tlb_flush_all_cpus(CPUState *src_cpu)
282{
283}
284static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
285{
286}
Peter Maydelld7a74a92015-08-25 15:45:09 +0100287static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
Alex Bennée0336cbf2017-02-23 18:29:19 +0000288 target_ulong addr, uint16_t idxmap)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100289{
290}
291
Alex Bennée0336cbf2017-02-23 18:29:19 +0000292static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100293{
294}
Alex Bennéec3b9a072017-02-23 18:29:22 +0000295static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
296 target_ulong addr,
297 uint16_t idxmap)
298{
299}
300static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
301 target_ulong addr,
302 uint16_t idxmap)
303{
304}
305static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
306{
307}
Paolo Bonzini8bca9a02018-05-30 11:58:36 +0200308
Alex Bennéec3b9a072017-02-23 18:29:22 +0000309static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
310 uint16_t idxmap)
311{
312}
Paul Brookc527ee82010-03-01 03:31:14 +0000313#endif
bellardd4e81642003-05-25 16:46:15 +0000314
bellardd4e81642003-05-25 16:46:15 +0000315#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
316
Richard Henderson126d89e2015-09-26 09:23:42 -0700317/* Estimated block size for TB allocation. */
318/* ??? The following is based on a 2015 survey of x86_64 host output.
319 Better would seem to be some sort of dynamically sized TB array,
320 adapting to the block sizes actually being produced. */
bellard4390df52004-01-04 18:03:10 +0000321#if defined(CONFIG_SOFTMMU)
Richard Henderson126d89e2015-09-26 09:23:42 -0700322#define CODE_GEN_AVG_BLOCK_SIZE 400
bellard4390df52004-01-04 18:03:10 +0000323#else
Richard Henderson126d89e2015-09-26 09:23:42 -0700324#define CODE_GEN_AVG_BLOCK_SIZE 150
bellard4390df52004-01-04 18:03:10 +0000325#endif
326
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400327/*
328 * Translation Cache-related fields of a TB.
Emilio G. Cota2ac01d62017-06-23 19:00:11 -0400329 * This struct exists just for convenience; we keep track of TB's in a binary
330 * search tree, and the only fields needed to compare TB's in the tree are
331 * @ptr and @size.
332 * Note: the address of search data can be obtained by adding @size to @ptr.
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400333 */
334struct tb_tc {
335 void *ptr; /* pointer to the translated code */
Emilio G. Cota2ac01d62017-06-23 19:00:11 -0400336 size_t size;
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400337};
338
pbrook2e70f6e2008-06-29 01:03:05 +0000339struct TranslationBlock {
bellard2e126692004-04-25 21:28:44 +0000340 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
341 target_ulong cs_base; /* CS base for this block */
Emilio G. Cota89fee742016-04-07 13:19:22 -0400342 uint32_t flags; /* flags defining in which context the code was generated */
bellardd4e81642003-05-25 16:46:15 +0000343 uint16_t size; /* size of target code for this block (1 <=
344 size <= TARGET_PAGE_SIZE) */
Paolo Bonzini02663592014-11-26 13:39:53 +0300345 uint16_t icount;
346 uint32_t cflags; /* compile flags */
Richard Henderson416986d2017-10-13 12:22:28 -0700347#define CF_COUNT_MASK 0x00007fff
348#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
349#define CF_NOCACHE 0x00010000 /* To be freed after execution */
350#define CF_USE_ICOUNT 0x00020000
Emilio G. Cota194125e2017-08-02 20:34:06 -0400351#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
Richard Henderson416986d2017-10-13 12:22:28 -0700352#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
Peter Maydellf7b78602019-01-29 11:46:06 +0000353#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
354#define CF_CLUSTER_SHIFT 24
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400355/* cflags' mask for hashing/comparison */
Richard Henderson0cf8a442017-10-13 12:15:06 -0700356#define CF_HASH_MASK \
Peter Maydellf7b78602019-01-29 11:46:06 +0000357 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
bellard58fe2f12004-02-16 22:11:32 +0000358
Lluís Vilanova61a67f72017-07-04 10:42:32 +0200359 /* Per-vCPU dynamic tracing state used to generate this TB */
360 uint32_t trace_vcpu_dstate;
361
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400362 struct tb_tc tc;
363
Sergey Fedorov02d57ea2015-06-30 12:35:09 +0300364 /* original tb when cflags has CF_NOCACHE */
365 struct TranslationBlock *orig_tb;
bellard4390df52004-01-04 18:03:10 +0000366 /* first and second physical page containing code. The lower bit
Emilio G. Cota0b5c91f2017-07-26 20:22:51 -0400367 of the pointer tells the index in page_next[].
368 The list is protected by the TB's page('s) lock(s) */
Emilio G. Cota1e051972017-08-03 18:37:15 -0400369 uintptr_t page_next[2];
Paul Brook41c1b1c2010-03-12 16:54:58 +0000370 tb_page_addr_t page_addr[2];
bellard4390df52004-01-04 18:03:10 +0000371
Emilio G. Cota194125e2017-08-02 20:34:06 -0400372 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
373 QemuSpin jmp_lock;
374
Sergey Fedorovf3091012016-04-10 23:35:45 +0300375 /* The following data are used to directly call another TB from
376 * the code of this one. This can be done either by emitting direct or
377 * indirect native jump instructions. These jumps are reset so that the TB
Emilio G. Cotaeb5e2b92017-06-23 19:43:01 -0400378 * just continues its execution. The TB can be linked to another one by
Sergey Fedorovf3091012016-04-10 23:35:45 +0300379 * setting one of the jump targets (or patching the jump instruction). Only
380 * two of such jumps are supported.
381 */
382 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
383#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
Richard Hendersona8583392017-07-31 22:02:31 -0700384 uintptr_t jmp_target_arg[2]; /* target address or offset */
385
Emilio G. Cota194125e2017-08-02 20:34:06 -0400386 /*
387 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
388 * Each TB can have two outgoing jumps, and therefore can participate
389 * in two lists. The list entries are kept in jmp_list_next[2]. The least
390 * significant bit (LSB) of the pointers in these lists is used to encode
391 * which of the two list entries is to be used in the pointed TB.
392 *
393 * List traversals are protected by jmp_lock. The destination TB of each
394 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
395 * can be acquired from any origin TB.
396 *
397 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
398 * being invalidated, so that no further outgoing jumps from it can be set.
399 *
400 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
401 * to a destination TB that has CF_INVALID set.
Sergey Fedorovf3091012016-04-10 23:35:45 +0300402 */
Emilio G. Cota194125e2017-08-02 20:34:06 -0400403 uintptr_t jmp_list_head;
Sergey Fedorovc37e6d72016-03-21 23:11:00 +0300404 uintptr_t jmp_list_next[2];
Emilio G. Cota194125e2017-08-02 20:34:06 -0400405 uintptr_t jmp_dest[2];
pbrook2e70f6e2008-06-29 01:03:05 +0000406};
bellardd4e81642003-05-25 16:46:15 +0000407
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400408extern bool parallel_cpus;
409
410/* Hide the atomic_read to make code a little easier on the eyes */
411static inline uint32_t tb_cflags(const TranslationBlock *tb)
412{
413 return atomic_read(&tb->cflags);
414}
415
416/* current cflags for hashing/comparison */
417static inline uint32_t curr_cflags(void)
418{
Richard Henderson416986d2017-10-13 12:22:28 -0700419 return (parallel_cpus ? CF_PARALLEL : 0)
420 | (use_icount ? CF_USE_ICOUNT : 0);
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400421}
422
Philippe Mathieu-Daudé646f34f2018-06-29 17:07:10 -0300423/* TranslationBlock invalidate API */
Philippe Mathieu-Daudé646f34f2018-06-29 17:07:10 -0300424#if defined(CONFIG_USER_ONLY)
Paolo Bonzinic40d4792018-07-02 14:45:25 +0200425void tb_invalidate_phys_addr(target_ulong addr);
Philippe Mathieu-Daudé646f34f2018-06-29 17:07:10 -0300426void tb_invalidate_phys_range(target_ulong start, target_ulong end);
Paolo Bonzinic40d4792018-07-02 14:45:25 +0200427#else
428void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
Philippe Mathieu-Daudé646f34f2018-06-29 17:07:10 -0300429#endif
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700430void tb_flush(CPUState *cpu);
Paul Brook41c1b1c2010-03-12 16:54:58 +0000431void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
Emilio G. Cotacedbcb02017-04-26 23:29:14 -0400432TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400433 target_ulong cs_base, uint32_t flags,
434 uint32_t cf_mask);
Richard Hendersona8583392017-07-31 22:02:31 -0700435void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
bellardd4e81642003-05-25 16:46:15 +0000436
Richard Henderson01ecaf42016-07-26 06:09:16 +0530437/* GETPC is the true target of the return instruction that we'll execute. */
Stefan Weil73163292011-10-05 20:03:02 +0200438#if defined(CONFIG_TCG_INTERPRETER)
Stefan Weilc3ca0462012-04-17 19:22:39 +0200439extern uintptr_t tci_tb_ptr;
Richard Henderson01ecaf42016-07-26 06:09:16 +0530440# define GETPC() tci_tb_ptr
Blue Swirl39171492011-09-21 18:13:16 +0000441#else
Richard Henderson01ecaf42016-07-26 06:09:16 +0530442# define GETPC() \
Richard Henderson0f842f82013-08-27 10:22:54 -0700443 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
Blue Swirl39171492011-09-21 18:13:16 +0000444#endif
445
Richard Henderson0f842f82013-08-27 10:22:54 -0700446/* The true return address will often point to a host insn that is part of
447 the next translated guest insn. Adjust the address backward to point to
448 the middle of the call insn. Subtracting one would do the job except for
449 several compressed mode architectures (arm, mips) which set the low bit
450 to indicate the compressed mode; subtracting two works around that. It
451 is also the case that there are no host isas that contain a call insn
452 smaller than 4 bytes, so we don't worry about special-casing this. */
Peter Crosthwaitea17d4482015-08-17 20:28:18 -0700453#define GETPC_ADJ 2
Yeongkyoon Leefdbb84d2012-10-31 16:04:24 +0900454
Emilio G. Cotafaa93722018-02-22 20:50:29 -0500455#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
456void assert_no_pages_locked(void);
457#else
458static inline void assert_no_pages_locked(void)
459{
460}
461#endif
462
bellarde95c8d52004-09-30 22:22:08 +0000463#if !defined(CONFIG_USER_ONLY)
bellard6e59c1d2003-10-27 21:24:54 +0000464
Peter Maydell2d54f192018-06-15 14:57:14 +0100465/**
466 * iotlb_to_section:
467 * @cpu: CPU performing the access
468 * @index: TCG CPU IOTLB entry
469 *
470 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
471 * it refers to. @index will have been initially created and returned
472 * by memory_region_section_get_iotlb().
473 */
474struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
475 hwaddr index, MemTxAttrs attrs);
bellard6e59c1d2003-10-27 21:24:54 +0000476#endif
bellard4390df52004-01-04 18:03:10 +0000477
478#if defined(CONFIG_USER_ONLY)
Paolo Bonzini8fd19e62015-08-11 10:57:52 +0200479void mmap_lock(void);
480void mmap_unlock(void);
Alex Bennée301e40e2016-10-27 16:10:00 +0100481bool have_mmap_lock(void);
Paolo Bonzini8fd19e62015-08-11 10:57:52 +0200482
Andreas Färber9349b4f2012-03-14 01:38:32 +0100483static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
bellard4390df52004-01-04 18:03:10 +0000484{
485 return addr;
486}
487#else
Paolo Bonzini8fd19e62015-08-11 10:57:52 +0200488static inline void mmap_lock(void) {}
489static inline void mmap_unlock(void) {}
490
Blue Swirl0cac1b62012-04-09 16:50:52 +0000491/* cputlb.c */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100492tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
Peter Crosthwaitedfccc762015-09-10 22:39:43 -0700493
494void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
495void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
496
497/* exec.c */
498void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
499
500MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000501address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Peter Maydell1f871c52018-06-15 14:57:16 +0100502 hwaddr *xlat, hwaddr *plen,
503 MemTxAttrs attrs, int *prot);
Peter Crosthwaitedfccc762015-09-10 22:39:43 -0700504hwaddr memory_region_section_get_iotlb(CPUState *cpu,
505 MemoryRegionSection *section,
506 target_ulong vaddr,
507 hwaddr paddr, hwaddr xlat,
508 int prot,
509 target_ulong *address);
bellard4390df52004-01-04 18:03:10 +0000510#endif
bellard9df217a2005-02-10 22:05:51 +0000511
aurel321b530a62009-04-05 20:08:59 +0000512/* vl.c */
513extern int singlestep;
514
aliguori875cdcf2008-10-23 13:52:00 +0000515#endif