blob: b5d688567ea7009c2dbc4b580a227cb54ef91926 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellard34865132003-10-05 14:28:56 +0000219/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
blueswir1d9b630f2008-10-05 09:57:08 +0000223static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#endif
bellard34865132003-10-05 14:28:56 +0000225FILE *logfile;
226int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000227static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000228
bellarde3db7222005-01-26 22:00:47 +0000229/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200401
Avi Kivityf7bf5462012-02-13 20:12:05 +0200402static void phys_map_node_reserve(unsigned nodes)
403{
404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
411 }
412}
413
414static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415{
416 unsigned i;
417 uint16_t ret;
418
Avi Kivityf7bf5462012-02-13 20:12:05 +0200419 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200420 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200422 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200423 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427}
428
429static void phys_map_nodes_reset(void)
430{
431 phys_map_nodes_nb = 0;
432}
433
Avi Kivityf7bf5462012-02-13 20:12:05 +0200434
Avi Kivity29990972012-02-13 20:21:20 +0200435static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438{
439 PhysPageEntry *p;
440 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442
Avi Kivity07f07b32012-02-13 20:45:32 +0200443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200449 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 }
451 }
452 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
Avi Kivity29990972012-02-13 20:21:20 +0200455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200456
Avi Kivity29990972012-02-13 20:21:20 +0200457 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200460 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 *index += step;
462 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
465 }
466 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200467 }
468}
469
Avi Kivity29990972012-02-13 20:21:20 +0200470static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000472{
Avi Kivity29990972012-02-13 20:21:20 +0200473 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200474 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000475
Avi Kivity29990972012-02-13 20:21:20 +0200476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000477}
478
Blue Swirl0cac1b62012-04-09 16:50:52 +0000479MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000480{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200484 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200485
Avi Kivity07f07b32012-02-13 20:45:32 +0200486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 goto not_found;
489 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200490 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200492 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200493
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200496 return &phys_sections[s_index];
497}
498
Blue Swirle5548612012-04-21 13:08:33 +0000499bool memory_region_is_unassigned(MemoryRegion *mr)
500{
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
504}
505
pbrookc8a706f2008-06-02 16:16:42 +0000506#define mmap_lock() do { } while(0)
507#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000508#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000509
bellard43694152008-05-29 09:35:57 +0000510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511
512#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100513/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000514 user mode. It will change when a dedicated libc will be used */
515#define USE_STATIC_CODE_GEN_BUFFER
516#endif
517
518#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200519static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
520 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000521#endif
522
blueswir18fcd3692008-08-17 20:26:25 +0000523static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000524{
bellard43694152008-05-29 09:35:57 +0000525#ifdef USE_STATIC_CODE_GEN_BUFFER
526 code_gen_buffer = static_code_gen_buffer;
527 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#else
bellard26a5f132008-05-28 12:30:31 +0000530 code_gen_buffer_size = tb_size;
531 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000532#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100535 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000536 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000537#endif
bellard26a5f132008-05-28 12:30:31 +0000538 }
539 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
540 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
541 /* The code gen buffer location may have constraints depending on
542 the host cpu and OS */
543#if defined(__linux__)
544 {
545 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000546 void *start = NULL;
547
bellard26a5f132008-05-28 12:30:31 +0000548 flags = MAP_PRIVATE | MAP_ANONYMOUS;
549#if defined(__x86_64__)
550 flags |= MAP_32BIT;
551 /* Cannot map more than that */
552 if (code_gen_buffer_size > (800 * 1024 * 1024))
553 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000554#elif defined(__sparc_v9__)
555 // Map the buffer below 2G, so we can use direct calls and branches
556 flags |= MAP_FIXED;
557 start = (void *) 0x60000000UL;
558 if (code_gen_buffer_size > (512 * 1024 * 1024))
559 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000560#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100561 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000562 if (code_gen_buffer_size > 16 * 1024 * 1024)
563 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700564#elif defined(__s390x__)
565 /* Map the buffer so that we can use direct calls and branches. */
566 /* We have a +- 4GB range on the branches; leave some slop. */
567 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
568 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
569 }
570 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000571#endif
blueswir1141ac462008-07-26 15:05:57 +0000572 code_gen_buffer = mmap(start, code_gen_buffer_size,
573 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000574 flags, -1, 0);
575 if (code_gen_buffer == MAP_FAILED) {
576 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 exit(1);
578 }
579 }
Bradcbb608a2010-12-20 21:25:40 -0500580#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000581 || defined(__DragonFly__) || defined(__OpenBSD__) \
582 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000583 {
584 int flags;
585 void *addr = NULL;
586 flags = MAP_PRIVATE | MAP_ANONYMOUS;
587#if defined(__x86_64__)
588 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
589 * 0x40000000 is free */
590 flags |= MAP_FIXED;
591 addr = (void *)0x40000000;
592 /* Cannot map more than that */
593 if (code_gen_buffer_size > (800 * 1024 * 1024))
594 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000595#elif defined(__sparc_v9__)
596 // Map the buffer below 2G, so we can use direct calls and branches
597 flags |= MAP_FIXED;
598 addr = (void *) 0x60000000UL;
599 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
600 code_gen_buffer_size = (512 * 1024 * 1024);
601 }
aliguori06e67a82008-09-27 15:32:41 +0000602#endif
603 code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
bellard26a5f132008-05-28 12:30:31 +0000611#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000613 map_exec(code_gen_buffer, code_gen_buffer_size);
614#endif
bellard43694152008-05-29 09:35:57 +0000615#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000616 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100617 code_gen_buffer_max_size = code_gen_buffer_size -
618 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000619 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000621}
622
623/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
625 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200626void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000627{
bellard26a5f132008-05-28 12:30:31 +0000628 cpu_gen_init();
629 code_gen_alloc(tb_size);
630 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700631 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000632 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700633#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx);
637#endif
bellard26a5f132008-05-28 12:30:31 +0000638}
639
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200640bool tcg_enabled(void)
641{
642 return code_gen_buffer != NULL;
643}
644
645void cpu_exec_init_all(void)
646{
647#if !defined(CONFIG_USER_ONLY)
648 memory_map_init();
649 io_mem_init();
650#endif
651}
652
pbrook9656f322008-07-01 20:01:19 +0000653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
Juan Quintelae59fb372009-09-29 22:48:21 +0200655static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100657 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200658
aurel323098dba2009-03-07 21:28:24 +0000659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000662 tlb_flush(env, 1);
663
664 return 0;
665}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
667static const VMStateDescription vmstate_cpu_common = {
668 .name = "cpu_common",
669 .version_id = 1,
670 .minimum_version_id = 1,
671 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 .post_load = cpu_common_post_load,
673 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100674 VMSTATE_UINT32(halted, CPUArchState),
675 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676 VMSTATE_END_OF_LIST()
677 }
678};
pbrook9656f322008-07-01 20:01:19 +0000679#endif
680
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400682{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100683 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400684
685 while (env) {
686 if (env->cpu_index == cpu)
687 break;
688 env = env->next_cpu;
689 }
690
691 return env;
692}
693
Andreas Färber9349b4f2012-03-14 01:38:32 +0100694void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000695{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100696 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000697 int cpu_index;
698
pbrookc2764712009-03-07 15:24:59 +0000699#if defined(CONFIG_USER_ONLY)
700 cpu_list_lock();
701#endif
bellard6a00d602005-11-21 23:25:50 +0000702 env->next_cpu = NULL;
703 penv = &first_cpu;
704 cpu_index = 0;
705 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700706 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000707 cpu_index++;
708 }
709 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000710 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000711 QTAILQ_INIT(&env->breakpoints);
712 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100713#ifndef CONFIG_USER_ONLY
714 env->thread_id = qemu_get_thread_id();
715#endif
bellard6a00d602005-11-21 23:25:50 +0000716 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000717#if defined(CONFIG_USER_ONLY)
718 cpu_list_unlock();
719#endif
pbrookb3c77242008-06-30 16:31:04 +0000720#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600721 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000723 cpu_save, cpu_load, env);
724#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000725}
726
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100727/* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729static TranslationBlock *tb_alloc(target_ulong pc)
730{
731 TranslationBlock *tb;
732
733 if (nb_tbs >= code_gen_max_blocks ||
734 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 return NULL;
736 tb = &tbs[nb_tbs++];
737 tb->pc = pc;
738 tb->cflags = 0;
739 return tb;
740}
741
742void tb_free(TranslationBlock *tb)
743{
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 code_gen_ptr = tb->tc_ptr;
749 nb_tbs--;
750 }
751}
752
bellard9fa3e852004-01-04 18:06:42 +0000753static inline void invalidate_page_bitmap(PageDesc *p)
754{
755 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500756 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000757 p->code_bitmap = NULL;
758 }
759 p->code_write_count = 0;
760}
761
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800762/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763
764static void page_flush_tb_1 (int level, void **lp)
765{
766 int i;
767
768 if (*lp == NULL) {
769 return;
770 }
771 if (level == 0) {
772 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000773 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800774 pd[i].first_tb = NULL;
775 invalidate_page_bitmap(pd + i);
776 }
777 } else {
778 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000779 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800780 page_flush_tb_1 (level - 1, pp + i);
781 }
782 }
783}
784
bellardfd6ce8f2003-05-14 19:00:11 +0000785static void page_flush_tb(void)
786{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 int i;
788 for (i = 0; i < V_L1_SIZE; i++) {
789 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000790 }
791}
792
793/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000794/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100795void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000796{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100797 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000798#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr - code_gen_buffer),
801 nb_tbs, nb_tbs > 0 ?
802 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000803#endif
bellard26a5f132008-05-28 12:30:31 +0000804 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000805 cpu_abort(env1, "Internal error: code buffer overflow\n");
806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000808
bellard6a00d602005-11-21 23:25:50 +0000809 for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 }
bellard9fa3e852004-01-04 18:06:42 +0000812
bellard8a8a6082004-10-03 13:36:49 +0000813 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000814 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000815
bellardfd6ce8f2003-05-14 19:00:11 +0000816 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000817 /* XXX: flush processor icache at this point if cache flush is
818 expensive */
bellarde3db7222005-01-26 22:00:47 +0000819 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000820}
821
822#ifdef DEBUG_TB_CHECK
823
j_mayerbc98a7e2007-04-04 07:55:12 +0000824static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000825{
826 TranslationBlock *tb;
827 int i;
828 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000829 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000831 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000835 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000836 }
837 }
838 }
839}
840
841/* verify that all the pages have correct rights for code */
842static void tb_page_check(void)
843{
844 TranslationBlock *tb;
845 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000846
pbrook99773bd2006-04-16 15:14:59 +0000847 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000849 flags1 = page_get_flags(tb->pc);
850 flags2 = page_get_flags(tb->pc + tb->size - 1);
851 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000853 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000854 }
855 }
856 }
857}
858
859#endif
860
861/* invalidate one TB */
862static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 int next_offset)
864{
865 TranslationBlock *tb1;
866 for(;;) {
867 tb1 = *ptb;
868 if (tb1 == tb) {
869 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 break;
871 }
872 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 }
874}
875
bellard9fa3e852004-01-04 18:06:42 +0000876static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200883 n1 = (uintptr_t)tb1 & 3;
884 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
bellardd4e81642003-05-25 16:46:15 +0000893static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894{
895 TranslationBlock *tb1, **ptb;
896 unsigned int n1;
897
898 ptb = &tb->jmp_next[n];
899 tb1 = *ptb;
900 if (tb1) {
901 /* find tb(n) in circular list */
902 for(;;) {
903 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000906 if (n1 == n && tb1 == tb)
907 break;
908 if (n1 == 2) {
909 ptb = &tb1->jmp_first;
910 } else {
911 ptb = &tb1->jmp_next[n1];
912 }
913 }
914 /* now we can suppress tb(n) from the list */
915 *ptb = tb->jmp_next[n];
916
917 tb->jmp_next[n] = NULL;
918 }
919}
920
921/* reset the jump entry 'n' of a TB so that it is not chained to
922 another TB */
923static inline void tb_reset_jump(TranslationBlock *tb, int n)
924{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200925 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000926}
927
Paul Brook41c1b1c2010-03-12 16:54:58 +0000928void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000929{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100930 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000931 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000932 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000933 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000934 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000935
bellard9fa3e852004-01-04 18:06:42 +0000936 /* remove the TB from the hash list */
937 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000939 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000940 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000941
bellard9fa3e852004-01-04 18:06:42 +0000942 /* remove the TB from the page list */
943 if (tb->page_addr[0] != page_addr) {
944 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 tb_page_remove(&p->first_tb, tb);
951 invalidate_page_bitmap(p);
952 }
953
bellard8a40a182005-11-20 10:35:40 +0000954 tb_invalidated_flag = 1;
955
956 /* remove the TB from the hash list */
957 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000958 for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 if (env->tb_jmp_cache[h] == tb)
960 env->tb_jmp_cache[h] = NULL;
961 }
bellard8a40a182005-11-20 10:35:40 +0000962
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb, 0);
965 tb_jmp_remove(tb, 1);
966
967 /* suppress any remaining jumps to this TB */
968 tb1 = tb->jmp_first;
969 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200970 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000971 if (n1 == 2)
972 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200973 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000974 tb2 = tb1->jmp_next[n1];
975 tb_reset_jump(tb1, n1);
976 tb1->jmp_next[n1] = NULL;
977 tb1 = tb2;
978 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200979 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000980
bellarde3db7222005-01-26 22:00:47 +0000981 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000982}
983
984static inline void set_bits(uint8_t *tab, int start, int len)
985{
986 int end, mask, end1;
987
988 end = start + len;
989 tab += start >> 3;
990 mask = 0xff << (start & 7);
991 if ((start & ~7) == (end & ~7)) {
992 if (start < end) {
993 mask &= ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 } else {
997 *tab++ |= mask;
998 start = (start + 8) & ~7;
999 end1 = end & ~7;
1000 while (start < end1) {
1001 *tab++ = 0xff;
1002 start += 8;
1003 }
1004 if (start < end) {
1005 mask = ~(0xff << (end & 7));
1006 *tab |= mask;
1007 }
1008 }
1009}
1010
1011static void build_page_bitmap(PageDesc *p)
1012{
1013 int n, tb_start, tb_end;
1014 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001015
Anthony Liguori7267c092011-08-20 22:09:37 -05001016 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001017
1018 tb = p->first_tb;
1019 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001020 n = (uintptr_t)tb & 3;
1021 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001022 /* NOTE: this is subtle as a TB may span two physical pages */
1023 if (n == 0) {
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 tb_end = tb_start + tb->size;
1028 if (tb_end > TARGET_PAGE_SIZE)
1029 tb_end = TARGET_PAGE_SIZE;
1030 } else {
1031 tb_start = 0;
1032 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 }
1034 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 tb = tb->page_next[n];
1036 }
1037}
1038
Andreas Färber9349b4f2012-03-14 01:38:32 +01001039TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001040 target_ulong pc, target_ulong cs_base,
1041 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001042{
1043 TranslationBlock *tb;
1044 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001045 tb_page_addr_t phys_pc, phys_page2;
1046 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001047 int code_gen_size;
1048
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001050 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001051 if (!tb) {
1052 /* flush must be done */
1053 tb_flush(env);
1054 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001055 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001058 }
1059 tc_ptr = code_gen_ptr;
1060 tb->tc_ptr = tc_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001064 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001065 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001067
bellardd720b932004-04-25 17:57:43 +00001068 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001070 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001072 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001073 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001074 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001075 return tb;
bellardd720b932004-04-25 17:57:43 +00001076}
ths3b46e622007-09-17 08:09:54 +00001077
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001078/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001079 * Invalidate all TBs which intersect with the target physical address range
1080 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1081 * 'is_cpu_write_access' should be true if called from a real cpu write
1082 * access: the virtual CPU will exit the current TB if code is modified inside
1083 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001084 */
1085void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1086 int is_cpu_write_access)
1087{
1088 while (start < end) {
1089 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1090 start &= TARGET_PAGE_MASK;
1091 start += TARGET_PAGE_SIZE;
1092 }
1093}
1094
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001095/*
1096 * Invalidate all TBs which intersect with the target physical address range
1097 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1098 * 'is_cpu_write_access' should be true if called from a real cpu write
1099 * access: the virtual CPU will exit the current TB if code is modified inside
1100 * this TB.
1101 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001102void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001103 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001104{
aliguori6b917542008-11-18 19:46:41 +00001105 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001106 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001107 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001108 PageDesc *p;
1109 int n;
1110#ifdef TARGET_HAS_PRECISE_SMC
1111 int current_tb_not_found = is_cpu_write_access;
1112 TranslationBlock *current_tb = NULL;
1113 int current_tb_modified = 0;
1114 target_ulong current_pc = 0;
1115 target_ulong current_cs_base = 0;
1116 int current_flags = 0;
1117#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001118
1119 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001120 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001121 return;
ths5fafdf22007-09-16 21:08:06 +00001122 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001123 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1124 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001125 /* build code bitmap */
1126 build_page_bitmap(p);
1127 }
1128
1129 /* we remove all the TBs in the range [start, end[ */
1130 /* XXX: see if in some cases it could be faster to invalidate all the code */
1131 tb = p->first_tb;
1132 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001133 n = (uintptr_t)tb & 3;
1134 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001135 tb_next = tb->page_next[n];
1136 /* NOTE: this is subtle as a TB may span two physical pages */
1137 if (n == 0) {
1138 /* NOTE: tb_end may be after the end of the page, but
1139 it is not a problem */
1140 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1141 tb_end = tb_start + tb->size;
1142 } else {
1143 tb_start = tb->page_addr[1];
1144 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1145 }
1146 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001147#ifdef TARGET_HAS_PRECISE_SMC
1148 if (current_tb_not_found) {
1149 current_tb_not_found = 0;
1150 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001151 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001152 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001153 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001154 }
1155 }
1156 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001157 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001163
bellardd720b932004-04-25 17:57:43 +00001164 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001165 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001166 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1167 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001168 }
1169#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001170 /* we need to do that to handle the case where a signal
1171 occurs while doing tb_phys_invalidate() */
1172 saved_tb = NULL;
1173 if (env) {
1174 saved_tb = env->current_tb;
1175 env->current_tb = NULL;
1176 }
bellard9fa3e852004-01-04 18:06:42 +00001177 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001178 if (env) {
1179 env->current_tb = saved_tb;
1180 if (env->interrupt_request && env->current_tb)
1181 cpu_interrupt(env, env->interrupt_request);
1182 }
bellard9fa3e852004-01-04 18:06:42 +00001183 }
1184 tb = tb_next;
1185 }
1186#if !defined(CONFIG_USER_ONLY)
1187 /* if no code remaining, no need to continue to use slow writes */
1188 if (!p->first_tb) {
1189 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001190 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001191 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001192 }
1193 }
1194#endif
1195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb_modified) {
1197 /* we generate a block containing just the instruction
1198 modifying the memory. It will ensure that it cannot modify
1199 itself */
bellardea1c1802004-06-14 18:56:36 +00001200 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001201 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001202 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001203 }
1204#endif
1205}
1206
1207/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001208static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001209{
1210 PageDesc *p;
1211 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001212#if 0
bellarda4193c82004-06-03 14:01:43 +00001213 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001214 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1215 cpu_single_env->mem_io_vaddr, len,
1216 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001217 cpu_single_env->eip +
1218 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001219 }
1220#endif
bellard9fa3e852004-01-04 18:06:42 +00001221 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001222 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001223 return;
1224 if (p->code_bitmap) {
1225 offset = start & ~TARGET_PAGE_MASK;
1226 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1227 if (b & ((1 << len) - 1))
1228 goto do_invalidate;
1229 } else {
1230 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001231 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001232 }
1233}
1234
bellard9fa3e852004-01-04 18:06:42 +00001235#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001236static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001237 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001238{
aliguori6b917542008-11-18 19:46:41 +00001239 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001240 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001241 int n;
bellardd720b932004-04-25 17:57:43 +00001242#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001243 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001244 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001245 int current_tb_modified = 0;
1246 target_ulong current_pc = 0;
1247 target_ulong current_cs_base = 0;
1248 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001249#endif
bellard9fa3e852004-01-04 18:06:42 +00001250
1251 addr &= TARGET_PAGE_MASK;
1252 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001253 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001254 return;
1255 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001256#ifdef TARGET_HAS_PRECISE_SMC
1257 if (tb && pc != 0) {
1258 current_tb = tb_find_pc(pc);
1259 }
1260#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001261 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001262 n = (uintptr_t)tb & 3;
1263 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001266 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001267 /* If we are modifying the current TB, we must stop
1268 its execution. We could be more precise by checking
1269 that the modification is after the current PC, but it
1270 would require a specialized function to partially
1271 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001272
bellardd720b932004-04-25 17:57:43 +00001273 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001274 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001275 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1276 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001277 }
1278#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001279 tb_phys_invalidate(tb, addr);
1280 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001281 }
1282 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001283#ifdef TARGET_HAS_PRECISE_SMC
1284 if (current_tb_modified) {
1285 /* we generate a block containing just the instruction
1286 modifying the memory. It will ensure that it cannot modify
1287 itself */
bellardea1c1802004-06-14 18:56:36 +00001288 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001289 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001290 cpu_resume_from_signal(env, puc);
1291 }
1292#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001293}
bellard9fa3e852004-01-04 18:06:42 +00001294#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001295
1296/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001297static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001298 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001299{
1300 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001301#ifndef CONFIG_USER_ONLY
1302 bool page_already_protected;
1303#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001304
bellard9fa3e852004-01-04 18:06:42 +00001305 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001306 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001307 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001308#ifndef CONFIG_USER_ONLY
1309 page_already_protected = p->first_tb != NULL;
1310#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001311 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001312 invalidate_page_bitmap(p);
1313
bellard107db442004-06-22 18:48:46 +00001314#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001315
bellard9fa3e852004-01-04 18:06:42 +00001316#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001317 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001318 target_ulong addr;
1319 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001320 int prot;
1321
bellardfd6ce8f2003-05-14 19:00:11 +00001322 /* force the host page as non writable (writes will have a
1323 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001324 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001325 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001326 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1327 addr += TARGET_PAGE_SIZE) {
1328
1329 p2 = page_find (addr >> TARGET_PAGE_BITS);
1330 if (!p2)
1331 continue;
1332 prot |= p2->flags;
1333 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001334 }
ths5fafdf22007-09-16 21:08:06 +00001335 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001336 (prot & PAGE_BITS) & ~PAGE_WRITE);
1337#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001338 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001339 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001340#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001341 }
bellard9fa3e852004-01-04 18:06:42 +00001342#else
1343 /* if some code is already present, then the pages are already
1344 protected. So we handle the case where only the first TB is
1345 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001346 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001347 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001348 }
1349#endif
bellardd720b932004-04-25 17:57:43 +00001350
1351#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001352}
1353
bellard9fa3e852004-01-04 18:06:42 +00001354/* add a new TB and link it to the physical page tables. phys_page2 is
1355 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001356void tb_link_page(TranslationBlock *tb,
1357 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001358{
bellard9fa3e852004-01-04 18:06:42 +00001359 unsigned int h;
1360 TranslationBlock **ptb;
1361
pbrookc8a706f2008-06-02 16:16:42 +00001362 /* Grab the mmap lock to stop another thread invalidating this TB
1363 before we are done. */
1364 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001365 /* add in the physical hash table */
1366 h = tb_phys_hash_func(phys_pc);
1367 ptb = &tb_phys_hash[h];
1368 tb->phys_hash_next = *ptb;
1369 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001370
1371 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001372 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1373 if (phys_page2 != -1)
1374 tb_alloc_page(tb, 1, phys_page2);
1375 else
1376 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001377
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001378 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001379 tb->jmp_next[0] = NULL;
1380 tb->jmp_next[1] = NULL;
1381
1382 /* init original jump addresses */
1383 if (tb->tb_next_offset[0] != 0xffff)
1384 tb_reset_jump(tb, 0);
1385 if (tb->tb_next_offset[1] != 0xffff)
1386 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001387
1388#ifdef DEBUG_TB_CHECK
1389 tb_page_check();
1390#endif
pbrookc8a706f2008-06-02 16:16:42 +00001391 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001392}
1393
bellarda513fe12003-05-27 23:29:48 +00001394/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1395 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001396TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001397{
1398 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001399 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001400 TranslationBlock *tb;
1401
1402 if (nb_tbs <= 0)
1403 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001404 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1405 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001406 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001407 }
bellarda513fe12003-05-27 23:29:48 +00001408 /* binary search (cf Knuth) */
1409 m_min = 0;
1410 m_max = nb_tbs - 1;
1411 while (m_min <= m_max) {
1412 m = (m_min + m_max) >> 1;
1413 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001414 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001415 if (v == tc_ptr)
1416 return tb;
1417 else if (tc_ptr < v) {
1418 m_max = m - 1;
1419 } else {
1420 m_min = m + 1;
1421 }
ths5fafdf22007-09-16 21:08:06 +00001422 }
bellarda513fe12003-05-27 23:29:48 +00001423 return &tbs[m_max];
1424}
bellard75012672003-06-21 13:11:07 +00001425
bellardea041c02003-06-25 16:16:50 +00001426static void tb_reset_jump_recursive(TranslationBlock *tb);
1427
1428static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1429{
1430 TranslationBlock *tb1, *tb_next, **ptb;
1431 unsigned int n1;
1432
1433 tb1 = tb->jmp_next[n];
1434 if (tb1 != NULL) {
1435 /* find head of list */
1436 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001437 n1 = (uintptr_t)tb1 & 3;
1438 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001439 if (n1 == 2)
1440 break;
1441 tb1 = tb1->jmp_next[n1];
1442 }
1443 /* we are now sure now that tb jumps to tb1 */
1444 tb_next = tb1;
1445
1446 /* remove tb from the jmp_first list */
1447 ptb = &tb_next->jmp_first;
1448 for(;;) {
1449 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001450 n1 = (uintptr_t)tb1 & 3;
1451 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001452 if (n1 == n && tb1 == tb)
1453 break;
1454 ptb = &tb1->jmp_next[n1];
1455 }
1456 *ptb = tb->jmp_next[n];
1457 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001458
bellardea041c02003-06-25 16:16:50 +00001459 /* suppress the jump to next tb in generated code */
1460 tb_reset_jump(tb, n);
1461
bellard01243112004-01-04 15:48:17 +00001462 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001463 tb_reset_jump_recursive(tb_next);
1464 }
1465}
1466
1467static void tb_reset_jump_recursive(TranslationBlock *tb)
1468{
1469 tb_reset_jump_recursive2(tb, 0);
1470 tb_reset_jump_recursive2(tb, 1);
1471}
1472
bellard1fddef42005-04-17 19:16:13 +00001473#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001474#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001475static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001476{
1477 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1478}
1479#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001480void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001481{
Anthony Liguoric227f092009-10-01 16:12:16 -05001482 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001483 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001484
Avi Kivity06ef3522012-02-13 16:11:22 +02001485 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001486 if (!(memory_region_is_ram(section->mr)
1487 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001488 return;
1489 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001490 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001491 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001492 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001493}
Max Filippov1e7855a2012-04-10 02:48:17 +04001494
1495static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1496{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001497 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1498 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001499}
bellardc27004e2005-01-03 23:35:10 +00001500#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001501#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001502
Paul Brookc527ee82010-03-01 03:31:14 +00001503#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001505
1506{
1507}
1508
Andreas Färber9349b4f2012-03-14 01:38:32 +01001509int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001510 int flags, CPUWatchpoint **watchpoint)
1511{
1512 return -ENOSYS;
1513}
1514#else
pbrook6658ffb2007-03-16 23:58:11 +00001515/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001516int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001517 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001518{
aliguorib4051332008-11-18 20:14:20 +00001519 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001520 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001521
aliguorib4051332008-11-18 20:14:20 +00001522 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001523 if ((len & (len - 1)) || (addr & ~len_mask) ||
1524 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001525 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1526 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1527 return -EINVAL;
1528 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001529 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001530
aliguoria1d1bb32008-11-18 20:07:32 +00001531 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001532 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001533 wp->flags = flags;
1534
aliguori2dc9f412008-11-18 20:56:59 +00001535 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001536 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001537 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001538 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001539 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001540
pbrook6658ffb2007-03-16 23:58:11 +00001541 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001542
1543 if (watchpoint)
1544 *watchpoint = wp;
1545 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001546}
1547
aliguoria1d1bb32008-11-18 20:07:32 +00001548/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001549int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001550 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001551{
aliguorib4051332008-11-18 20:14:20 +00001552 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001553 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001554
Blue Swirl72cf2d42009-09-12 07:36:22 +00001555 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001556 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001557 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001558 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001559 return 0;
1560 }
1561 }
aliguoria1d1bb32008-11-18 20:07:32 +00001562 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001563}
1564
aliguoria1d1bb32008-11-18 20:07:32 +00001565/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001566void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001567{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001568 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001569
aliguoria1d1bb32008-11-18 20:07:32 +00001570 tlb_flush_page(env, watchpoint->vaddr);
1571
Anthony Liguori7267c092011-08-20 22:09:37 -05001572 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001573}
1574
aliguoria1d1bb32008-11-18 20:07:32 +00001575/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001576void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001577{
aliguoric0ce9982008-11-25 22:13:57 +00001578 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001579
Blue Swirl72cf2d42009-09-12 07:36:22 +00001580 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001581 if (wp->flags & mask)
1582 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001583 }
aliguoria1d1bb32008-11-18 20:07:32 +00001584}
Paul Brookc527ee82010-03-01 03:31:14 +00001585#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001586
1587/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001588int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001589 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001590{
bellard1fddef42005-04-17 19:16:13 +00001591#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001592 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001593
Anthony Liguori7267c092011-08-20 22:09:37 -05001594 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001595
1596 bp->pc = pc;
1597 bp->flags = flags;
1598
aliguori2dc9f412008-11-18 20:56:59 +00001599 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001600 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001601 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001602 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001603 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001604
1605 breakpoint_invalidate(env, pc);
1606
1607 if (breakpoint)
1608 *breakpoint = bp;
1609 return 0;
1610#else
1611 return -ENOSYS;
1612#endif
1613}
1614
1615/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001616int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001617{
1618#if defined(TARGET_HAS_ICE)
1619 CPUBreakpoint *bp;
1620
Blue Swirl72cf2d42009-09-12 07:36:22 +00001621 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001622 if (bp->pc == pc && bp->flags == flags) {
1623 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001624 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001625 }
bellard4c3a88a2003-07-26 12:06:08 +00001626 }
aliguoria1d1bb32008-11-18 20:07:32 +00001627 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001628#else
aliguoria1d1bb32008-11-18 20:07:32 +00001629 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001630#endif
1631}
1632
aliguoria1d1bb32008-11-18 20:07:32 +00001633/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001634void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001635{
bellard1fddef42005-04-17 19:16:13 +00001636#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001637 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001638
aliguoria1d1bb32008-11-18 20:07:32 +00001639 breakpoint_invalidate(env, breakpoint->pc);
1640
Anthony Liguori7267c092011-08-20 22:09:37 -05001641 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001642#endif
1643}
1644
1645/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001646void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001647{
1648#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001649 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001650
Blue Swirl72cf2d42009-09-12 07:36:22 +00001651 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001652 if (bp->flags & mask)
1653 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001654 }
bellard4c3a88a2003-07-26 12:06:08 +00001655#endif
1656}
1657
bellardc33a3462003-07-29 20:50:33 +00001658/* enable or disable single step mode. EXCP_DEBUG is returned by the
1659 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001660void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001661{
bellard1fddef42005-04-17 19:16:13 +00001662#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001663 if (env->singlestep_enabled != enabled) {
1664 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001665 if (kvm_enabled())
1666 kvm_update_guest_debug(env, 0);
1667 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001668 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001669 /* XXX: only flush what is necessary */
1670 tb_flush(env);
1671 }
bellardc33a3462003-07-29 20:50:33 +00001672 }
1673#endif
1674}
1675
bellard34865132003-10-05 14:28:56 +00001676/* enable or disable low levels log */
1677void cpu_set_log(int log_flags)
1678{
1679 loglevel = log_flags;
1680 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001681 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001682 if (!logfile) {
1683 perror(logfilename);
1684 _exit(1);
1685 }
bellard9fa3e852004-01-04 18:06:42 +00001686#if !defined(CONFIG_SOFTMMU)
1687 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1688 {
blueswir1b55266b2008-09-20 08:07:15 +00001689 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001690 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1691 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001692#elif defined(_WIN32)
1693 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1694 setvbuf(logfile, NULL, _IONBF, 0);
1695#else
bellard34865132003-10-05 14:28:56 +00001696 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001697#endif
pbrooke735b912007-06-30 13:53:24 +00001698 log_append = 1;
1699 }
1700 if (!loglevel && logfile) {
1701 fclose(logfile);
1702 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001703 }
1704}
1705
1706void cpu_set_log_filename(const char *filename)
1707{
1708 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001709 if (logfile) {
1710 fclose(logfile);
1711 logfile = NULL;
1712 }
1713 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001714}
bellardc33a3462003-07-29 20:50:33 +00001715
Andreas Färber9349b4f2012-03-14 01:38:32 +01001716static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001717{
pbrookd5975362008-06-07 20:50:51 +00001718 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1719 problem and hope the cpu will stop of its own accord. For userspace
1720 emulation this often isn't actually as bad as it sounds. Often
1721 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001722 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001723 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001724
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001725 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001726 tb = env->current_tb;
1727 /* if the cpu is currently executing code, we must unlink it and
1728 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001729 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001730 env->current_tb = NULL;
1731 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001732 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001733 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001734}
1735
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001736#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001737/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001738static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001739{
1740 int old_mask;
1741
1742 old_mask = env->interrupt_request;
1743 env->interrupt_request |= mask;
1744
aliguori8edac962009-04-24 18:03:45 +00001745 /*
1746 * If called from iothread context, wake the target cpu in
1747 * case its halted.
1748 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001749 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001750 qemu_cpu_kick(env);
1751 return;
1752 }
aliguori8edac962009-04-24 18:03:45 +00001753
pbrook2e70f6e2008-06-29 01:03:05 +00001754 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001755 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001756 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001757 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001758 cpu_abort(env, "Raised interrupt while not in I/O function");
1759 }
pbrook2e70f6e2008-06-29 01:03:05 +00001760 } else {
aurel323098dba2009-03-07 21:28:24 +00001761 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001762 }
1763}
1764
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001765CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1766
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001767#else /* CONFIG_USER_ONLY */
1768
Andreas Färber9349b4f2012-03-14 01:38:32 +01001769void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001770{
1771 env->interrupt_request |= mask;
1772 cpu_unlink_tb(env);
1773}
1774#endif /* CONFIG_USER_ONLY */
1775
Andreas Färber9349b4f2012-03-14 01:38:32 +01001776void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001777{
1778 env->interrupt_request &= ~mask;
1779}
1780
Andreas Färber9349b4f2012-03-14 01:38:32 +01001781void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001782{
1783 env->exit_request = 1;
1784 cpu_unlink_tb(env);
1785}
1786
blueswir1c7cd6a32008-10-02 18:27:46 +00001787const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001788 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001789 "show generated host assembly code for each compiled TB" },
1790 { CPU_LOG_TB_IN_ASM, "in_asm",
1791 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001792 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001793 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001794 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001795 "show micro ops "
1796#ifdef TARGET_I386
1797 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001798#endif
blueswir1e01a1152008-03-14 17:37:11 +00001799 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001800 { CPU_LOG_INT, "int",
1801 "show interrupts/exceptions in short format" },
1802 { CPU_LOG_EXEC, "exec",
1803 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001804 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001805 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001806#ifdef TARGET_I386
1807 { CPU_LOG_PCALL, "pcall",
1808 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001809 { CPU_LOG_RESET, "cpu_reset",
1810 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001811#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001812#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001813 { CPU_LOG_IOPORT, "ioport",
1814 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001815#endif
bellardf193c792004-03-21 17:06:25 +00001816 { 0, NULL, NULL },
1817};
1818
1819static int cmp1(const char *s1, int n, const char *s2)
1820{
1821 if (strlen(s2) != n)
1822 return 0;
1823 return memcmp(s1, s2, n) == 0;
1824}
ths3b46e622007-09-17 08:09:54 +00001825
bellardf193c792004-03-21 17:06:25 +00001826/* takes a comma separated list of log masks. Return 0 if error. */
1827int cpu_str_to_log_mask(const char *str)
1828{
blueswir1c7cd6a32008-10-02 18:27:46 +00001829 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001830 int mask;
1831 const char *p, *p1;
1832
1833 p = str;
1834 mask = 0;
1835 for(;;) {
1836 p1 = strchr(p, ',');
1837 if (!p1)
1838 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001839 if(cmp1(p,p1-p,"all")) {
1840 for(item = cpu_log_items; item->mask != 0; item++) {
1841 mask |= item->mask;
1842 }
1843 } else {
1844 for(item = cpu_log_items; item->mask != 0; item++) {
1845 if (cmp1(p, p1 - p, item->name))
1846 goto found;
1847 }
1848 return 0;
bellardf193c792004-03-21 17:06:25 +00001849 }
bellardf193c792004-03-21 17:06:25 +00001850 found:
1851 mask |= item->mask;
1852 if (*p1 != ',')
1853 break;
1854 p = p1 + 1;
1855 }
1856 return mask;
1857}
bellardea041c02003-06-25 16:16:50 +00001858
Andreas Färber9349b4f2012-03-14 01:38:32 +01001859void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001860{
1861 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001862 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001863
1864 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001865 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001866 fprintf(stderr, "qemu: fatal: ");
1867 vfprintf(stderr, fmt, ap);
1868 fprintf(stderr, "\n");
1869#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001870 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1871#else
1872 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001873#endif
aliguori93fcfe32009-01-15 22:34:14 +00001874 if (qemu_log_enabled()) {
1875 qemu_log("qemu: fatal: ");
1876 qemu_log_vprintf(fmt, ap2);
1877 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001878#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001879 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001880#else
aliguori93fcfe32009-01-15 22:34:14 +00001881 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001882#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001883 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001884 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001885 }
pbrook493ae1f2007-11-23 16:53:59 +00001886 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001887 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001888#if defined(CONFIG_USER_ONLY)
1889 {
1890 struct sigaction act;
1891 sigfillset(&act.sa_mask);
1892 act.sa_handler = SIG_DFL;
1893 sigaction(SIGABRT, &act, NULL);
1894 }
1895#endif
bellard75012672003-06-21 13:11:07 +00001896 abort();
1897}
1898
Andreas Färber9349b4f2012-03-14 01:38:32 +01001899CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001900{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001901 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1902 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001903 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001904#if defined(TARGET_HAS_ICE)
1905 CPUBreakpoint *bp;
1906 CPUWatchpoint *wp;
1907#endif
1908
Andreas Färber9349b4f2012-03-14 01:38:32 +01001909 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001910
1911 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001912 new_env->next_cpu = next_cpu;
1913 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001914
1915 /* Clone all break/watchpoints.
1916 Note: Once we support ptrace with hw-debug register access, make sure
1917 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001918 QTAILQ_INIT(&env->breakpoints);
1919 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001920#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001921 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001922 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1923 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001924 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001925 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1926 wp->flags, NULL);
1927 }
1928#endif
1929
thsc5be9f02007-02-28 20:20:53 +00001930 return new_env;
1931}
1932
bellard01243112004-01-04 15:48:17 +00001933#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001934void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001935{
1936 unsigned int i;
1937
1938 /* Discard jump cache entries for any tb which might potentially
1939 overlap the flushed page. */
1940 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1941 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001942 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001943
1944 i = tb_jmp_cache_hash_page(addr);
1945 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001946 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001947}
1948
pbrook5579c7f2009-04-11 14:47:08 +00001949/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001950void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001951 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001952{
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001953 uintptr_t length, start1;
bellard1ccde1c2004-02-06 19:46:14 +00001954
1955 start &= TARGET_PAGE_MASK;
1956 end = TARGET_PAGE_ALIGN(end);
1957
1958 length = end - start;
1959 if (length == 0)
1960 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001961 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001962
bellard1ccde1c2004-02-06 19:46:14 +00001963 /* we modify the TLB cache so that the dirty bit will be set again
1964 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001965 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001966 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001967 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001968 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001969 != (end - 1) - start) {
1970 abort();
1971 }
Blue Swirle5548612012-04-21 13:08:33 +00001972 cpu_tlb_reset_dirty_all(start1, length);
bellard1ccde1c2004-02-06 19:46:14 +00001973}
1974
aliguori74576192008-10-06 14:02:03 +00001975int cpu_physical_memory_set_dirty_tracking(int enable)
1976{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001977 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001978 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001979 return ret;
aliguori74576192008-10-06 14:02:03 +00001980}
1981
Blue Swirle5548612012-04-21 13:08:33 +00001982target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1983 MemoryRegionSection *section,
1984 target_ulong vaddr,
1985 target_phys_addr_t paddr,
1986 int prot,
1987 target_ulong *address)
1988{
1989 target_phys_addr_t iotlb;
1990 CPUWatchpoint *wp;
1991
Blue Swirlcc5bea62012-04-14 14:56:48 +00001992 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001993 /* Normal RAM. */
1994 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001995 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001996 if (!section->readonly) {
1997 iotlb |= phys_section_notdirty;
1998 } else {
1999 iotlb |= phys_section_rom;
2000 }
2001 } else {
2002 /* IO handlers are currently passed a physical address.
2003 It would be nice to pass an offset from the base address
2004 of that region. This would avoid having to special case RAM,
2005 and avoid full address decoding in every device.
2006 We can't use the high bits of pd for this because
2007 IO_MEM_ROMD uses these as a ram address. */
2008 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00002009 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00002010 }
2011
2012 /* Make accesses to pages with watchpoints go via the
2013 watchpoint trap routines. */
2014 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2015 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2016 /* Avoid trapping reads of pages with a write breakpoint. */
2017 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2018 iotlb = phys_section_watch + paddr;
2019 *address |= TLB_MMIO;
2020 break;
2021 }
2022 }
2023 }
2024
2025 return iotlb;
2026}
2027
bellard01243112004-01-04 15:48:17 +00002028#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002029/*
2030 * Walks guest process memory "regions" one by one
2031 * and calls callback function 'fn' for each region.
2032 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002033
2034struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002035{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002036 walk_memory_regions_fn fn;
2037 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002038 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002039 int prot;
2040};
bellard9fa3e852004-01-04 18:06:42 +00002041
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002042static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002043 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002044{
2045 if (data->start != -1ul) {
2046 int rc = data->fn(data->priv, data->start, end, data->prot);
2047 if (rc != 0) {
2048 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002049 }
bellard33417e72003-08-10 21:47:01 +00002050 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002051
2052 data->start = (new_prot ? end : -1ul);
2053 data->prot = new_prot;
2054
2055 return 0;
2056}
2057
2058static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002059 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002060{
Paul Brookb480d9b2010-03-12 23:23:29 +00002061 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002062 int i, rc;
2063
2064 if (*lp == NULL) {
2065 return walk_memory_regions_end(data, base, 0);
2066 }
2067
2068 if (level == 0) {
2069 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002070 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002071 int prot = pd[i].flags;
2072
2073 pa = base | (i << TARGET_PAGE_BITS);
2074 if (prot != data->prot) {
2075 rc = walk_memory_regions_end(data, pa, prot);
2076 if (rc != 0) {
2077 return rc;
2078 }
2079 }
2080 }
2081 } else {
2082 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002083 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002084 pa = base | ((abi_ulong)i <<
2085 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002086 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2087 if (rc != 0) {
2088 return rc;
2089 }
2090 }
2091 }
2092
2093 return 0;
2094}
2095
2096int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2097{
2098 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002099 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002100
2101 data.fn = fn;
2102 data.priv = priv;
2103 data.start = -1ul;
2104 data.prot = 0;
2105
2106 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002107 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002108 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2109 if (rc != 0) {
2110 return rc;
2111 }
2112 }
2113
2114 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002115}
2116
Paul Brookb480d9b2010-03-12 23:23:29 +00002117static int dump_region(void *priv, abi_ulong start,
2118 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002119{
2120 FILE *f = (FILE *)priv;
2121
Paul Brookb480d9b2010-03-12 23:23:29 +00002122 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2123 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002124 start, end, end - start,
2125 ((prot & PAGE_READ) ? 'r' : '-'),
2126 ((prot & PAGE_WRITE) ? 'w' : '-'),
2127 ((prot & PAGE_EXEC) ? 'x' : '-'));
2128
2129 return (0);
2130}
2131
2132/* dump memory mappings */
2133void page_dump(FILE *f)
2134{
2135 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2136 "start", "end", "size", "prot");
2137 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002138}
2139
pbrook53a59602006-03-25 19:31:22 +00002140int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002141{
bellard9fa3e852004-01-04 18:06:42 +00002142 PageDesc *p;
2143
2144 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002145 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002146 return 0;
2147 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002148}
2149
Richard Henderson376a7902010-03-10 15:57:04 -08002150/* Modify the flags of a page and invalidate the code if necessary.
2151 The flag PAGE_WRITE_ORG is positioned automatically depending
2152 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002153void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002154{
Richard Henderson376a7902010-03-10 15:57:04 -08002155 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002156
Richard Henderson376a7902010-03-10 15:57:04 -08002157 /* This function should never be called with addresses outside the
2158 guest address space. If this assert fires, it probably indicates
2159 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002160#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2161 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002162#endif
2163 assert(start < end);
2164
bellard9fa3e852004-01-04 18:06:42 +00002165 start = start & TARGET_PAGE_MASK;
2166 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002167
2168 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002169 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002170 }
2171
2172 for (addr = start, len = end - start;
2173 len != 0;
2174 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2175 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2176
2177 /* If the write protection bit is set, then we invalidate
2178 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002179 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002180 (flags & PAGE_WRITE) &&
2181 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002182 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002183 }
2184 p->flags = flags;
2185 }
bellard9fa3e852004-01-04 18:06:42 +00002186}
2187
ths3d97b402007-11-02 19:02:07 +00002188int page_check_range(target_ulong start, target_ulong len, int flags)
2189{
2190 PageDesc *p;
2191 target_ulong end;
2192 target_ulong addr;
2193
Richard Henderson376a7902010-03-10 15:57:04 -08002194 /* This function should never be called with addresses outside the
2195 guest address space. If this assert fires, it probably indicates
2196 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002197#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2198 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002199#endif
2200
Richard Henderson3e0650a2010-03-29 10:54:42 -07002201 if (len == 0) {
2202 return 0;
2203 }
Richard Henderson376a7902010-03-10 15:57:04 -08002204 if (start + len - 1 < start) {
2205 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002206 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002207 }
balrog55f280c2008-10-28 10:24:11 +00002208
ths3d97b402007-11-02 19:02:07 +00002209 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2210 start = start & TARGET_PAGE_MASK;
2211
Richard Henderson376a7902010-03-10 15:57:04 -08002212 for (addr = start, len = end - start;
2213 len != 0;
2214 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002215 p = page_find(addr >> TARGET_PAGE_BITS);
2216 if( !p )
2217 return -1;
2218 if( !(p->flags & PAGE_VALID) )
2219 return -1;
2220
bellarddae32702007-11-14 10:51:00 +00002221 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002222 return -1;
bellarddae32702007-11-14 10:51:00 +00002223 if (flags & PAGE_WRITE) {
2224 if (!(p->flags & PAGE_WRITE_ORG))
2225 return -1;
2226 /* unprotect the page if it was put read-only because it
2227 contains translated code */
2228 if (!(p->flags & PAGE_WRITE)) {
2229 if (!page_unprotect(addr, 0, NULL))
2230 return -1;
2231 }
2232 return 0;
2233 }
ths3d97b402007-11-02 19:02:07 +00002234 }
2235 return 0;
2236}
2237
bellard9fa3e852004-01-04 18:06:42 +00002238/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002239 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002240int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002241{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002242 unsigned int prot;
2243 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002244 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002245
pbrookc8a706f2008-06-02 16:16:42 +00002246 /* Technically this isn't safe inside a signal handler. However we
2247 know this only ever happens in a synchronous SEGV handler, so in
2248 practice it seems to be ok. */
2249 mmap_lock();
2250
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002251 p = page_find(address >> TARGET_PAGE_BITS);
2252 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002253 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002254 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002255 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002256
bellard9fa3e852004-01-04 18:06:42 +00002257 /* if the page was really writable, then we change its
2258 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002259 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2260 host_start = address & qemu_host_page_mask;
2261 host_end = host_start + qemu_host_page_size;
2262
2263 prot = 0;
2264 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2265 p = page_find(addr >> TARGET_PAGE_BITS);
2266 p->flags |= PAGE_WRITE;
2267 prot |= p->flags;
2268
bellard9fa3e852004-01-04 18:06:42 +00002269 /* and since the content will be modified, we must invalidate
2270 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002271 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002272#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002273 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002274#endif
bellard9fa3e852004-01-04 18:06:42 +00002275 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002276 mprotect((void *)g2h(host_start), qemu_host_page_size,
2277 prot & PAGE_BITS);
2278
2279 mmap_unlock();
2280 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002281 }
pbrookc8a706f2008-06-02 16:16:42 +00002282 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002283 return 0;
2284}
bellard9fa3e852004-01-04 18:06:42 +00002285#endif /* defined(CONFIG_USER_ONLY) */
2286
pbrooke2eef172008-06-08 01:09:01 +00002287#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002288
Paul Brookc04b2b72010-03-01 03:31:14 +00002289#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2290typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002291 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002292 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002293 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002294} subpage_t;
2295
Anthony Liguoric227f092009-10-01 16:12:16 -05002296static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002297 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002298static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002299static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002300{
Avi Kivity5312bd82012-02-12 18:32:55 +02002301 MemoryRegionSection *section = &phys_sections[section_index];
2302 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002303
2304 if (mr->subpage) {
2305 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2306 memory_region_destroy(&subpage->iomem);
2307 g_free(subpage);
2308 }
2309}
2310
Avi Kivity4346ae32012-02-10 17:00:01 +02002311static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002312{
2313 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002314 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002315
Avi Kivityc19e8802012-02-13 20:25:31 +02002316 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002317 return;
2318 }
2319
Avi Kivityc19e8802012-02-13 20:25:31 +02002320 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002321 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002322 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002323 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002324 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002325 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002326 }
Avi Kivity54688b12012-02-09 17:34:32 +02002327 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002328 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002329 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002330}
2331
2332static void destroy_all_mappings(void)
2333{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002334 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002335 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002336}
2337
Avi Kivity5312bd82012-02-12 18:32:55 +02002338static uint16_t phys_section_add(MemoryRegionSection *section)
2339{
2340 if (phys_sections_nb == phys_sections_nb_alloc) {
2341 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2342 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2343 phys_sections_nb_alloc);
2344 }
2345 phys_sections[phys_sections_nb] = *section;
2346 return phys_sections_nb++;
2347}
2348
2349static void phys_sections_clear(void)
2350{
2351 phys_sections_nb = 0;
2352}
2353
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002354/* register physical memory.
2355 For RAM, 'size' must be a multiple of the target page size.
2356 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002357 io memory page. The address used when calling the IO function is
2358 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002359 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002360 before calculating this offset. This should not be a problem unless
2361 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002362static void register_subpage(MemoryRegionSection *section)
2363{
2364 subpage_t *subpage;
2365 target_phys_addr_t base = section->offset_within_address_space
2366 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002367 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002368 MemoryRegionSection subsection = {
2369 .offset_within_address_space = base,
2370 .size = TARGET_PAGE_SIZE,
2371 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002372 target_phys_addr_t start, end;
2373
Avi Kivityf3705d52012-03-08 16:16:34 +02002374 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002375
Avi Kivityf3705d52012-03-08 16:16:34 +02002376 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002377 subpage = subpage_init(base);
2378 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002379 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2380 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002381 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002382 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002383 }
2384 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2385 end = start + section->size;
2386 subpage_register(subpage, start, end, phys_section_add(section));
2387}
2388
2389
2390static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002391{
Avi Kivitydd811242012-01-02 12:17:03 +02002392 target_phys_addr_t start_addr = section->offset_within_address_space;
2393 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002394 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002395 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002396
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002397 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002398
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002399 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002400 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2401 section_index);
bellard33417e72003-08-10 21:47:01 +00002402}
2403
Avi Kivity0f0cb162012-02-13 17:14:32 +02002404void cpu_register_physical_memory_log(MemoryRegionSection *section,
2405 bool readonly)
2406{
2407 MemoryRegionSection now = *section, remain = *section;
2408
2409 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2410 || (now.size < TARGET_PAGE_SIZE)) {
2411 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2412 - now.offset_within_address_space,
2413 now.size);
2414 register_subpage(&now);
2415 remain.size -= now.size;
2416 remain.offset_within_address_space += now.size;
2417 remain.offset_within_region += now.size;
2418 }
2419 now = remain;
2420 now.size &= TARGET_PAGE_MASK;
2421 if (now.size) {
2422 register_multipage(&now);
2423 remain.size -= now.size;
2424 remain.offset_within_address_space += now.size;
2425 remain.offset_within_region += now.size;
2426 }
2427 now = remain;
2428 if (now.size) {
2429 register_subpage(&now);
2430 }
2431}
2432
2433
Anthony Liguoric227f092009-10-01 16:12:16 -05002434void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002435{
2436 if (kvm_enabled())
2437 kvm_coalesce_mmio_region(addr, size);
2438}
2439
Anthony Liguoric227f092009-10-01 16:12:16 -05002440void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002441{
2442 if (kvm_enabled())
2443 kvm_uncoalesce_mmio_region(addr, size);
2444}
2445
Sheng Yang62a27442010-01-26 19:21:16 +08002446void qemu_flush_coalesced_mmio_buffer(void)
2447{
2448 if (kvm_enabled())
2449 kvm_flush_coalesced_mmio_buffer();
2450}
2451
Marcelo Tosattic9027602010-03-01 20:25:08 -03002452#if defined(__linux__) && !defined(TARGET_S390X)
2453
2454#include <sys/vfs.h>
2455
2456#define HUGETLBFS_MAGIC 0x958458f6
2457
2458static long gethugepagesize(const char *path)
2459{
2460 struct statfs fs;
2461 int ret;
2462
2463 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002464 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002465 } while (ret != 0 && errno == EINTR);
2466
2467 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002468 perror(path);
2469 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002470 }
2471
2472 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002473 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002474
2475 return fs.f_bsize;
2476}
2477
Alex Williamson04b16652010-07-02 11:13:17 -06002478static void *file_ram_alloc(RAMBlock *block,
2479 ram_addr_t memory,
2480 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002481{
2482 char *filename;
2483 void *area;
2484 int fd;
2485#ifdef MAP_POPULATE
2486 int flags;
2487#endif
2488 unsigned long hpagesize;
2489
2490 hpagesize = gethugepagesize(path);
2491 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002492 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002493 }
2494
2495 if (memory < hpagesize) {
2496 return NULL;
2497 }
2498
2499 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2500 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2501 return NULL;
2502 }
2503
2504 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002505 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002506 }
2507
2508 fd = mkstemp(filename);
2509 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002510 perror("unable to create backing store for hugepages");
2511 free(filename);
2512 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002513 }
2514 unlink(filename);
2515 free(filename);
2516
2517 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2518
2519 /*
2520 * ftruncate is not supported by hugetlbfs in older
2521 * hosts, so don't bother bailing out on errors.
2522 * If anything goes wrong with it under other filesystems,
2523 * mmap will fail.
2524 */
2525 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002526 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002527
2528#ifdef MAP_POPULATE
2529 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2530 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2531 * to sidestep this quirk.
2532 */
2533 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2534 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2535#else
2536 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2537#endif
2538 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002539 perror("file_ram_alloc: can't mmap RAM pages");
2540 close(fd);
2541 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002542 }
Alex Williamson04b16652010-07-02 11:13:17 -06002543 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002544 return area;
2545}
2546#endif
2547
Alex Williamsond17b5282010-06-25 11:08:38 -06002548static ram_addr_t find_ram_offset(ram_addr_t size)
2549{
Alex Williamson04b16652010-07-02 11:13:17 -06002550 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002551 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002552
2553 if (QLIST_EMPTY(&ram_list.blocks))
2554 return 0;
2555
2556 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002557 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002558
2559 end = block->offset + block->length;
2560
2561 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2562 if (next_block->offset >= end) {
2563 next = MIN(next, next_block->offset);
2564 }
2565 }
2566 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002567 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002568 mingap = next - end;
2569 }
2570 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002571
2572 if (offset == RAM_ADDR_MAX) {
2573 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2574 (uint64_t)size);
2575 abort();
2576 }
2577
Alex Williamson04b16652010-07-02 11:13:17 -06002578 return offset;
2579}
2580
2581static ram_addr_t last_ram_offset(void)
2582{
Alex Williamsond17b5282010-06-25 11:08:38 -06002583 RAMBlock *block;
2584 ram_addr_t last = 0;
2585
2586 QLIST_FOREACH(block, &ram_list.blocks, next)
2587 last = MAX(last, block->offset + block->length);
2588
2589 return last;
2590}
2591
Avi Kivityc5705a72011-12-20 15:59:12 +02002592void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002593{
2594 RAMBlock *new_block, *block;
2595
Avi Kivityc5705a72011-12-20 15:59:12 +02002596 new_block = NULL;
2597 QLIST_FOREACH(block, &ram_list.blocks, next) {
2598 if (block->offset == addr) {
2599 new_block = block;
2600 break;
2601 }
2602 }
2603 assert(new_block);
2604 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002605
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002606 if (dev) {
2607 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002608 if (id) {
2609 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002610 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002611 }
2612 }
2613 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2614
2615 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002616 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002617 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2618 new_block->idstr);
2619 abort();
2620 }
2621 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002622}
2623
2624ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2625 MemoryRegion *mr)
2626{
2627 RAMBlock *new_block;
2628
2629 size = TARGET_PAGE_ALIGN(size);
2630 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002631
Avi Kivity7c637362011-12-21 13:09:49 +02002632 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002633 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002634 if (host) {
2635 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002636 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002637 } else {
2638 if (mem_path) {
2639#if defined (__linux__) && !defined(TARGET_S390X)
2640 new_block->host = file_ram_alloc(new_block, size, mem_path);
2641 if (!new_block->host) {
2642 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002643 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002644 }
2645#else
2646 fprintf(stderr, "-mem-path option unsupported\n");
2647 exit(1);
2648#endif
2649 } else {
2650#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002651 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2652 an system defined value, which is at least 256GB. Larger systems
2653 have larger values. We put the guest between the end of data
2654 segment (system break) and this value. We use 32GB as a base to
2655 have enough room for the system break to grow. */
2656 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002657 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002658 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002659 if (new_block->host == MAP_FAILED) {
2660 fprintf(stderr, "Allocating RAM failed\n");
2661 abort();
2662 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002663#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002664 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002665 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002666 } else {
2667 new_block->host = qemu_vmalloc(size);
2668 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002669#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002670 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002671 }
2672 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002673 new_block->length = size;
2674
2675 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2676
Anthony Liguori7267c092011-08-20 22:09:37 -05002677 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002678 last_ram_offset() >> TARGET_PAGE_BITS);
2679 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2680 0xff, size >> TARGET_PAGE_BITS);
2681
2682 if (kvm_enabled())
2683 kvm_setup_guest_memory(new_block->host, size);
2684
2685 return new_block->offset;
2686}
2687
Avi Kivityc5705a72011-12-20 15:59:12 +02002688ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002689{
Avi Kivityc5705a72011-12-20 15:59:12 +02002690 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002691}
bellarde9a1ab12007-02-08 23:08:38 +00002692
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002693void qemu_ram_free_from_ptr(ram_addr_t addr)
2694{
2695 RAMBlock *block;
2696
2697 QLIST_FOREACH(block, &ram_list.blocks, next) {
2698 if (addr == block->offset) {
2699 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002700 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002701 return;
2702 }
2703 }
2704}
2705
Anthony Liguoric227f092009-10-01 16:12:16 -05002706void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002707{
Alex Williamson04b16652010-07-02 11:13:17 -06002708 RAMBlock *block;
2709
2710 QLIST_FOREACH(block, &ram_list.blocks, next) {
2711 if (addr == block->offset) {
2712 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002713 if (block->flags & RAM_PREALLOC_MASK) {
2714 ;
2715 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002716#if defined (__linux__) && !defined(TARGET_S390X)
2717 if (block->fd) {
2718 munmap(block->host, block->length);
2719 close(block->fd);
2720 } else {
2721 qemu_vfree(block->host);
2722 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002723#else
2724 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002725#endif
2726 } else {
2727#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2728 munmap(block->host, block->length);
2729#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002730 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002731 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002732 } else {
2733 qemu_vfree(block->host);
2734 }
Alex Williamson04b16652010-07-02 11:13:17 -06002735#endif
2736 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002737 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002738 return;
2739 }
2740 }
2741
bellarde9a1ab12007-02-08 23:08:38 +00002742}
2743
Huang Yingcd19cfa2011-03-02 08:56:19 +01002744#ifndef _WIN32
2745void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2746{
2747 RAMBlock *block;
2748 ram_addr_t offset;
2749 int flags;
2750 void *area, *vaddr;
2751
2752 QLIST_FOREACH(block, &ram_list.blocks, next) {
2753 offset = addr - block->offset;
2754 if (offset < block->length) {
2755 vaddr = block->host + offset;
2756 if (block->flags & RAM_PREALLOC_MASK) {
2757 ;
2758 } else {
2759 flags = MAP_FIXED;
2760 munmap(vaddr, length);
2761 if (mem_path) {
2762#if defined(__linux__) && !defined(TARGET_S390X)
2763 if (block->fd) {
2764#ifdef MAP_POPULATE
2765 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2766 MAP_PRIVATE;
2767#else
2768 flags |= MAP_PRIVATE;
2769#endif
2770 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2771 flags, block->fd, offset);
2772 } else {
2773 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2774 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2775 flags, -1, 0);
2776 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002777#else
2778 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002779#endif
2780 } else {
2781#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2782 flags |= MAP_SHARED | MAP_ANONYMOUS;
2783 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2784 flags, -1, 0);
2785#else
2786 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2787 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2788 flags, -1, 0);
2789#endif
2790 }
2791 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002792 fprintf(stderr, "Could not remap addr: "
2793 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002794 length, addr);
2795 exit(1);
2796 }
2797 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2798 }
2799 return;
2800 }
2801 }
2802}
2803#endif /* !_WIN32 */
2804
pbrookdc828ca2009-04-09 22:21:07 +00002805/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002806 With the exception of the softmmu code in this file, this should
2807 only be used for local memory (e.g. video ram) that the device owns,
2808 and knows it isn't going to access beyond the end of the block.
2809
2810 It should not be used for general purpose DMA.
2811 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2812 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002813void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002814{
pbrook94a6b542009-04-11 17:15:54 +00002815 RAMBlock *block;
2816
Alex Williamsonf471a172010-06-11 11:11:42 -06002817 QLIST_FOREACH(block, &ram_list.blocks, next) {
2818 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002819 /* Move this entry to to start of the list. */
2820 if (block != QLIST_FIRST(&ram_list.blocks)) {
2821 QLIST_REMOVE(block, next);
2822 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2823 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002824 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002825 /* We need to check if the requested address is in the RAM
2826 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002827 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002828 */
2829 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002830 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002831 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002832 block->host =
2833 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002834 }
2835 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002836 return block->host + (addr - block->offset);
2837 }
pbrook94a6b542009-04-11 17:15:54 +00002838 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002839
2840 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2841 abort();
2842
2843 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002844}
2845
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002846/* Return a host pointer to ram allocated with qemu_ram_alloc.
2847 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2848 */
2849void *qemu_safe_ram_ptr(ram_addr_t addr)
2850{
2851 RAMBlock *block;
2852
2853 QLIST_FOREACH(block, &ram_list.blocks, next) {
2854 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002855 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002856 /* We need to check if the requested address is in the RAM
2857 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002858 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002859 */
2860 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002861 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002862 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002863 block->host =
2864 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002865 }
2866 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002867 return block->host + (addr - block->offset);
2868 }
2869 }
2870
2871 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2872 abort();
2873
2874 return NULL;
2875}
2876
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002877/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2878 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002879void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002880{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002881 if (*size == 0) {
2882 return NULL;
2883 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002884 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002885 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002886 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002887 RAMBlock *block;
2888
2889 QLIST_FOREACH(block, &ram_list.blocks, next) {
2890 if (addr - block->offset < block->length) {
2891 if (addr - block->offset + *size > block->length)
2892 *size = block->length - addr + block->offset;
2893 return block->host + (addr - block->offset);
2894 }
2895 }
2896
2897 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2898 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002899 }
2900}
2901
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002902void qemu_put_ram_ptr(void *addr)
2903{
2904 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002905}
2906
Marcelo Tosattie8902612010-10-11 15:31:19 -03002907int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002908{
pbrook94a6b542009-04-11 17:15:54 +00002909 RAMBlock *block;
2910 uint8_t *host = ptr;
2911
Jan Kiszka868bb332011-06-21 22:59:09 +02002912 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002913 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002914 return 0;
2915 }
2916
Alex Williamsonf471a172010-06-11 11:11:42 -06002917 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002918 /* This case append when the block is not mapped. */
2919 if (block->host == NULL) {
2920 continue;
2921 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002922 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002923 *ram_addr = block->offset + (host - block->host);
2924 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002925 }
pbrook94a6b542009-04-11 17:15:54 +00002926 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002927
Marcelo Tosattie8902612010-10-11 15:31:19 -03002928 return -1;
2929}
Alex Williamsonf471a172010-06-11 11:11:42 -06002930
Marcelo Tosattie8902612010-10-11 15:31:19 -03002931/* Some of the softmmu routines need to translate from a host pointer
2932 (typically a TLB entry) back to a ram offset. */
2933ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2934{
2935 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002936
Marcelo Tosattie8902612010-10-11 15:31:19 -03002937 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2938 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2939 abort();
2940 }
2941 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002942}
2943
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002944static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2945 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002946{
pbrook67d3b952006-12-18 05:03:52 +00002947#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002948 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002949#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002950#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002951 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002952#endif
2953 return 0;
2954}
2955
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002956static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2957 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002958{
2959#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002960 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002961#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002962#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002963 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002964#endif
2965}
2966
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002967static const MemoryRegionOps unassigned_mem_ops = {
2968 .read = unassigned_mem_read,
2969 .write = unassigned_mem_write,
2970 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002971};
2972
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002973static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2974 unsigned size)
2975{
2976 abort();
2977}
2978
2979static void error_mem_write(void *opaque, target_phys_addr_t addr,
2980 uint64_t value, unsigned size)
2981{
2982 abort();
2983}
2984
2985static const MemoryRegionOps error_mem_ops = {
2986 .read = error_mem_read,
2987 .write = error_mem_write,
2988 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002989};
2990
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002991static const MemoryRegionOps rom_mem_ops = {
2992 .read = error_mem_read,
2993 .write = unassigned_mem_write,
2994 .endianness = DEVICE_NATIVE_ENDIAN,
2995};
2996
2997static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2998 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002999{
bellard3a7d9292005-08-21 09:26:42 +00003000 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003001 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003002 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3003#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003004 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003005 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003006#endif
3007 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003008 switch (size) {
3009 case 1:
3010 stb_p(qemu_get_ram_ptr(ram_addr), val);
3011 break;
3012 case 2:
3013 stw_p(qemu_get_ram_ptr(ram_addr), val);
3014 break;
3015 case 4:
3016 stl_p(qemu_get_ram_ptr(ram_addr), val);
3017 break;
3018 default:
3019 abort();
3020 }
bellardf23db162005-08-21 19:12:28 +00003021 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003022 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003023 /* we remove the notdirty callback only if the code has been
3024 flushed */
3025 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003026 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003027}
3028
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003029static const MemoryRegionOps notdirty_mem_ops = {
3030 .read = error_mem_read,
3031 .write = notdirty_mem_write,
3032 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003033};
3034
pbrook0f459d12008-06-09 00:20:13 +00003035/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003036static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003037{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003038 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003039 target_ulong pc, cs_base;
3040 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003041 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003042 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003043 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003044
aliguori06d55cc2008-11-18 20:24:06 +00003045 if (env->watchpoint_hit) {
3046 /* We re-entered the check after replacing the TB. Now raise
3047 * the debug interrupt so that is will trigger after the
3048 * current instruction. */
3049 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3050 return;
3051 }
pbrook2e70f6e2008-06-29 01:03:05 +00003052 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003053 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003054 if ((vaddr == (wp->vaddr & len_mask) ||
3055 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003056 wp->flags |= BP_WATCHPOINT_HIT;
3057 if (!env->watchpoint_hit) {
3058 env->watchpoint_hit = wp;
3059 tb = tb_find_pc(env->mem_io_pc);
3060 if (!tb) {
3061 cpu_abort(env, "check_watchpoint: could not find TB for "
3062 "pc=%p", (void *)env->mem_io_pc);
3063 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003064 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003065 tb_phys_invalidate(tb, -1);
3066 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3067 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003068 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003069 } else {
3070 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3071 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003072 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003073 }
aliguori06d55cc2008-11-18 20:24:06 +00003074 }
aliguori6e140f22008-11-18 20:37:55 +00003075 } else {
3076 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003077 }
3078 }
3079}
3080
pbrook6658ffb2007-03-16 23:58:11 +00003081/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3082 so these check for a hit then pass through to the normal out-of-line
3083 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003084static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3085 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003086{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003087 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3088 switch (size) {
3089 case 1: return ldub_phys(addr);
3090 case 2: return lduw_phys(addr);
3091 case 4: return ldl_phys(addr);
3092 default: abort();
3093 }
pbrook6658ffb2007-03-16 23:58:11 +00003094}
3095
Avi Kivity1ec9b902012-01-02 12:47:48 +02003096static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3097 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003098{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003099 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3100 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003101 case 1:
3102 stb_phys(addr, val);
3103 break;
3104 case 2:
3105 stw_phys(addr, val);
3106 break;
3107 case 4:
3108 stl_phys(addr, val);
3109 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003110 default: abort();
3111 }
pbrook6658ffb2007-03-16 23:58:11 +00003112}
3113
Avi Kivity1ec9b902012-01-02 12:47:48 +02003114static const MemoryRegionOps watch_mem_ops = {
3115 .read = watch_mem_read,
3116 .write = watch_mem_write,
3117 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003118};
pbrook6658ffb2007-03-16 23:58:11 +00003119
Avi Kivity70c68e42012-01-02 12:32:48 +02003120static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3121 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003122{
Avi Kivity70c68e42012-01-02 12:32:48 +02003123 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003124 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003125 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003126#if defined(DEBUG_SUBPAGE)
3127 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3128 mmio, len, addr, idx);
3129#endif
blueswir1db7b5422007-05-26 17:36:03 +00003130
Avi Kivity5312bd82012-02-12 18:32:55 +02003131 section = &phys_sections[mmio->sub_section[idx]];
3132 addr += mmio->base;
3133 addr -= section->offset_within_address_space;
3134 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003135 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003136}
3137
Avi Kivity70c68e42012-01-02 12:32:48 +02003138static void subpage_write(void *opaque, target_phys_addr_t addr,
3139 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003140{
Avi Kivity70c68e42012-01-02 12:32:48 +02003141 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003142 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003143 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003144#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003145 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3146 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003147 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003148#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003149
Avi Kivity5312bd82012-02-12 18:32:55 +02003150 section = &phys_sections[mmio->sub_section[idx]];
3151 addr += mmio->base;
3152 addr -= section->offset_within_address_space;
3153 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003154 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003155}
3156
Avi Kivity70c68e42012-01-02 12:32:48 +02003157static const MemoryRegionOps subpage_ops = {
3158 .read = subpage_read,
3159 .write = subpage_write,
3160 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003161};
3162
Avi Kivityde712f92012-01-02 12:41:07 +02003163static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3164 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003165{
3166 ram_addr_t raddr = addr;
3167 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003168 switch (size) {
3169 case 1: return ldub_p(ptr);
3170 case 2: return lduw_p(ptr);
3171 case 4: return ldl_p(ptr);
3172 default: abort();
3173 }
Andreas Färber56384e82011-11-30 16:26:21 +01003174}
3175
Avi Kivityde712f92012-01-02 12:41:07 +02003176static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3177 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003178{
3179 ram_addr_t raddr = addr;
3180 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003181 switch (size) {
3182 case 1: return stb_p(ptr, value);
3183 case 2: return stw_p(ptr, value);
3184 case 4: return stl_p(ptr, value);
3185 default: abort();
3186 }
Andreas Färber56384e82011-11-30 16:26:21 +01003187}
3188
Avi Kivityde712f92012-01-02 12:41:07 +02003189static const MemoryRegionOps subpage_ram_ops = {
3190 .read = subpage_ram_read,
3191 .write = subpage_ram_write,
3192 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003193};
3194
Anthony Liguoric227f092009-10-01 16:12:16 -05003195static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003196 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003197{
3198 int idx, eidx;
3199
3200 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3201 return -1;
3202 idx = SUBPAGE_IDX(start);
3203 eidx = SUBPAGE_IDX(end);
3204#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003205 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003206 mmio, start, end, idx, eidx, memory);
3207#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003208 if (memory_region_is_ram(phys_sections[section].mr)) {
3209 MemoryRegionSection new_section = phys_sections[section];
3210 new_section.mr = &io_mem_subpage_ram;
3211 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003212 }
blueswir1db7b5422007-05-26 17:36:03 +00003213 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003214 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003215 }
3216
3217 return 0;
3218}
3219
Avi Kivity0f0cb162012-02-13 17:14:32 +02003220static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003221{
Anthony Liguoric227f092009-10-01 16:12:16 -05003222 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003223
Anthony Liguori7267c092011-08-20 22:09:37 -05003224 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003225
3226 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003227 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3228 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003229 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003230#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003231 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3232 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003233#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003234 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003235
3236 return mmio;
3237}
3238
Avi Kivity5312bd82012-02-12 18:32:55 +02003239static uint16_t dummy_section(MemoryRegion *mr)
3240{
3241 MemoryRegionSection section = {
3242 .mr = mr,
3243 .offset_within_address_space = 0,
3244 .offset_within_region = 0,
3245 .size = UINT64_MAX,
3246 };
3247
3248 return phys_section_add(&section);
3249}
3250
Avi Kivity37ec01d2012-03-08 18:08:35 +02003251MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003252{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003253 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003254}
3255
Avi Kivitye9179ce2009-06-14 11:38:52 +03003256static void io_mem_init(void)
3257{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003258 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003259 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3260 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3261 "unassigned", UINT64_MAX);
3262 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3263 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003264 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3265 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003266 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3267 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003268}
3269
Avi Kivity50c1e142012-02-08 21:36:02 +02003270static void core_begin(MemoryListener *listener)
3271{
Avi Kivity54688b12012-02-09 17:34:32 +02003272 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003273 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003274 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003275 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003276 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3277 phys_section_rom = dummy_section(&io_mem_rom);
3278 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003279}
3280
3281static void core_commit(MemoryListener *listener)
3282{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003283 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003284
3285 /* since each CPU stores ram addresses in its TLB cache, we must
3286 reset the modified entries */
3287 /* XXX: slow ! */
3288 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3289 tlb_flush(env, 1);
3290 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003291}
3292
Avi Kivity93632742012-02-08 16:54:16 +02003293static void core_region_add(MemoryListener *listener,
3294 MemoryRegionSection *section)
3295{
Avi Kivity4855d412012-02-08 21:16:05 +02003296 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003297}
3298
3299static void core_region_del(MemoryListener *listener,
3300 MemoryRegionSection *section)
3301{
Avi Kivity93632742012-02-08 16:54:16 +02003302}
3303
Avi Kivity50c1e142012-02-08 21:36:02 +02003304static void core_region_nop(MemoryListener *listener,
3305 MemoryRegionSection *section)
3306{
Avi Kivity54688b12012-02-09 17:34:32 +02003307 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003308}
3309
Avi Kivity93632742012-02-08 16:54:16 +02003310static void core_log_start(MemoryListener *listener,
3311 MemoryRegionSection *section)
3312{
3313}
3314
3315static void core_log_stop(MemoryListener *listener,
3316 MemoryRegionSection *section)
3317{
3318}
3319
3320static void core_log_sync(MemoryListener *listener,
3321 MemoryRegionSection *section)
3322{
3323}
3324
3325static void core_log_global_start(MemoryListener *listener)
3326{
3327 cpu_physical_memory_set_dirty_tracking(1);
3328}
3329
3330static void core_log_global_stop(MemoryListener *listener)
3331{
3332 cpu_physical_memory_set_dirty_tracking(0);
3333}
3334
3335static void core_eventfd_add(MemoryListener *listener,
3336 MemoryRegionSection *section,
3337 bool match_data, uint64_t data, int fd)
3338{
3339}
3340
3341static void core_eventfd_del(MemoryListener *listener,
3342 MemoryRegionSection *section,
3343 bool match_data, uint64_t data, int fd)
3344{
3345}
3346
Avi Kivity50c1e142012-02-08 21:36:02 +02003347static void io_begin(MemoryListener *listener)
3348{
3349}
3350
3351static void io_commit(MemoryListener *listener)
3352{
3353}
3354
Avi Kivity4855d412012-02-08 21:16:05 +02003355static void io_region_add(MemoryListener *listener,
3356 MemoryRegionSection *section)
3357{
Avi Kivitya2d33522012-03-05 17:40:12 +02003358 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3359
3360 mrio->mr = section->mr;
3361 mrio->offset = section->offset_within_region;
3362 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003363 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003364 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003365}
3366
3367static void io_region_del(MemoryListener *listener,
3368 MemoryRegionSection *section)
3369{
3370 isa_unassign_ioport(section->offset_within_address_space, section->size);
3371}
3372
Avi Kivity50c1e142012-02-08 21:36:02 +02003373static void io_region_nop(MemoryListener *listener,
3374 MemoryRegionSection *section)
3375{
3376}
3377
Avi Kivity4855d412012-02-08 21:16:05 +02003378static void io_log_start(MemoryListener *listener,
3379 MemoryRegionSection *section)
3380{
3381}
3382
3383static void io_log_stop(MemoryListener *listener,
3384 MemoryRegionSection *section)
3385{
3386}
3387
3388static void io_log_sync(MemoryListener *listener,
3389 MemoryRegionSection *section)
3390{
3391}
3392
3393static void io_log_global_start(MemoryListener *listener)
3394{
3395}
3396
3397static void io_log_global_stop(MemoryListener *listener)
3398{
3399}
3400
3401static void io_eventfd_add(MemoryListener *listener,
3402 MemoryRegionSection *section,
3403 bool match_data, uint64_t data, int fd)
3404{
3405}
3406
3407static void io_eventfd_del(MemoryListener *listener,
3408 MemoryRegionSection *section,
3409 bool match_data, uint64_t data, int fd)
3410{
3411}
3412
Avi Kivity93632742012-02-08 16:54:16 +02003413static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003414 .begin = core_begin,
3415 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003416 .region_add = core_region_add,
3417 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003418 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003419 .log_start = core_log_start,
3420 .log_stop = core_log_stop,
3421 .log_sync = core_log_sync,
3422 .log_global_start = core_log_global_start,
3423 .log_global_stop = core_log_global_stop,
3424 .eventfd_add = core_eventfd_add,
3425 .eventfd_del = core_eventfd_del,
3426 .priority = 0,
3427};
3428
Avi Kivity4855d412012-02-08 21:16:05 +02003429static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003430 .begin = io_begin,
3431 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003432 .region_add = io_region_add,
3433 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003434 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003435 .log_start = io_log_start,
3436 .log_stop = io_log_stop,
3437 .log_sync = io_log_sync,
3438 .log_global_start = io_log_global_start,
3439 .log_global_stop = io_log_global_stop,
3440 .eventfd_add = io_eventfd_add,
3441 .eventfd_del = io_eventfd_del,
3442 .priority = 0,
3443};
3444
Avi Kivity62152b82011-07-26 14:26:14 +03003445static void memory_map_init(void)
3446{
Anthony Liguori7267c092011-08-20 22:09:37 -05003447 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003448 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003449 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003450
Anthony Liguori7267c092011-08-20 22:09:37 -05003451 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003452 memory_region_init(system_io, "io", 65536);
3453 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003454
Avi Kivity4855d412012-02-08 21:16:05 +02003455 memory_listener_register(&core_memory_listener, system_memory);
3456 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003457}
3458
3459MemoryRegion *get_system_memory(void)
3460{
3461 return system_memory;
3462}
3463
Avi Kivity309cb472011-08-08 16:09:03 +03003464MemoryRegion *get_system_io(void)
3465{
3466 return system_io;
3467}
3468
pbrooke2eef172008-06-08 01:09:01 +00003469#endif /* !defined(CONFIG_USER_ONLY) */
3470
bellard13eb76e2004-01-24 15:23:36 +00003471/* physical memory access (slow version, mainly for debug) */
3472#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003473int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003474 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003475{
3476 int l, flags;
3477 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003478 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003479
3480 while (len > 0) {
3481 page = addr & TARGET_PAGE_MASK;
3482 l = (page + TARGET_PAGE_SIZE) - addr;
3483 if (l > len)
3484 l = len;
3485 flags = page_get_flags(page);
3486 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003487 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003488 if (is_write) {
3489 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003490 return -1;
bellard579a97f2007-11-11 14:26:47 +00003491 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003492 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003493 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003494 memcpy(p, buf, l);
3495 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003496 } else {
3497 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003498 return -1;
bellard579a97f2007-11-11 14:26:47 +00003499 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003500 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003501 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003502 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003503 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003504 }
3505 len -= l;
3506 buf += l;
3507 addr += l;
3508 }
Paul Brooka68fe892010-03-01 00:08:59 +00003509 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003510}
bellard8df1cd02005-01-28 22:37:22 +00003511
bellard13eb76e2004-01-24 15:23:36 +00003512#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003513void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003514 int len, int is_write)
3515{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003516 int l;
bellard13eb76e2004-01-24 15:23:36 +00003517 uint8_t *ptr;
3518 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003519 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003520 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003521
bellard13eb76e2004-01-24 15:23:36 +00003522 while (len > 0) {
3523 page = addr & TARGET_PAGE_MASK;
3524 l = (page + TARGET_PAGE_SIZE) - addr;
3525 if (l > len)
3526 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003527 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003528
bellard13eb76e2004-01-24 15:23:36 +00003529 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003530 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003531 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003532 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003533 /* XXX: could force cpu_single_env to NULL to avoid
3534 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003535 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003536 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003537 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003538 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003539 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003540 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003541 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003542 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003543 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003544 l = 2;
3545 } else {
bellard1c213d12005-09-03 10:49:04 +00003546 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003547 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003548 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003549 l = 1;
3550 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003551 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003552 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003553 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003554 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003555 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003556 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003557 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003558 if (!cpu_physical_memory_is_dirty(addr1)) {
3559 /* invalidate code */
3560 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3561 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003562 cpu_physical_memory_set_dirty_flags(
3563 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003564 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003565 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003566 }
3567 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003568 if (!(memory_region_is_ram(section->mr) ||
3569 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003570 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003571 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003572 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003573 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003574 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003575 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003576 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003577 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003578 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003579 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003580 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003581 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003582 l = 2;
3583 } else {
bellard1c213d12005-09-03 10:49:04 +00003584 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003585 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003586 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003587 l = 1;
3588 }
3589 } else {
3590 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003591 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003592 + memory_region_section_addr(section,
3593 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003594 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003595 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003596 }
3597 }
3598 len -= l;
3599 buf += l;
3600 addr += l;
3601 }
3602}
bellard8df1cd02005-01-28 22:37:22 +00003603
bellardd0ecd2a2006-04-23 17:14:48 +00003604/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003605void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003606 const uint8_t *buf, int len)
3607{
3608 int l;
3609 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003610 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003611 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003612
bellardd0ecd2a2006-04-23 17:14:48 +00003613 while (len > 0) {
3614 page = addr & TARGET_PAGE_MASK;
3615 l = (page + TARGET_PAGE_SIZE) - addr;
3616 if (l > len)
3617 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003618 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003619
Blue Swirlcc5bea62012-04-14 14:56:48 +00003620 if (!(memory_region_is_ram(section->mr) ||
3621 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003622 /* do nothing */
3623 } else {
3624 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003625 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003626 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003627 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003628 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003629 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003630 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003631 }
3632 len -= l;
3633 buf += l;
3634 addr += l;
3635 }
3636}
3637
aliguori6d16c2f2009-01-22 16:59:11 +00003638typedef struct {
3639 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003640 target_phys_addr_t addr;
3641 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003642} BounceBuffer;
3643
3644static BounceBuffer bounce;
3645
aliguoriba223c22009-01-22 16:59:16 +00003646typedef struct MapClient {
3647 void *opaque;
3648 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003649 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003650} MapClient;
3651
Blue Swirl72cf2d42009-09-12 07:36:22 +00003652static QLIST_HEAD(map_client_list, MapClient) map_client_list
3653 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003654
3655void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3656{
Anthony Liguori7267c092011-08-20 22:09:37 -05003657 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003658
3659 client->opaque = opaque;
3660 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003661 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003662 return client;
3663}
3664
3665void cpu_unregister_map_client(void *_client)
3666{
3667 MapClient *client = (MapClient *)_client;
3668
Blue Swirl72cf2d42009-09-12 07:36:22 +00003669 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003670 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003671}
3672
3673static void cpu_notify_map_clients(void)
3674{
3675 MapClient *client;
3676
Blue Swirl72cf2d42009-09-12 07:36:22 +00003677 while (!QLIST_EMPTY(&map_client_list)) {
3678 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003679 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003680 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003681 }
3682}
3683
aliguori6d16c2f2009-01-22 16:59:11 +00003684/* Map a physical memory region into a host virtual address.
3685 * May map a subset of the requested range, given by and returned in *plen.
3686 * May return NULL if resources needed to perform the mapping are exhausted.
3687 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003688 * Use cpu_register_map_client() to know when retrying the map operation is
3689 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003690 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003691void *cpu_physical_memory_map(target_phys_addr_t addr,
3692 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003693 int is_write)
3694{
Anthony Liguoric227f092009-10-01 16:12:16 -05003695 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003696 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003697 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003698 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003699 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003700 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003701 ram_addr_t rlen;
3702 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003703
3704 while (len > 0) {
3705 page = addr & TARGET_PAGE_MASK;
3706 l = (page + TARGET_PAGE_SIZE) - addr;
3707 if (l > len)
3708 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003709 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003710
Avi Kivityf3705d52012-03-08 16:16:34 +02003711 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003712 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003713 break;
3714 }
3715 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3716 bounce.addr = addr;
3717 bounce.len = l;
3718 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003719 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003720 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003721
3722 *plen = l;
3723 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003724 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003725 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003726 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003727 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003728 }
aliguori6d16c2f2009-01-22 16:59:11 +00003729
3730 len -= l;
3731 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003732 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003733 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003734 rlen = todo;
3735 ret = qemu_ram_ptr_length(raddr, &rlen);
3736 *plen = rlen;
3737 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003738}
3739
3740/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3741 * Will also mark the memory as dirty if is_write == 1. access_len gives
3742 * the amount of memory that was actually read or written by the caller.
3743 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003744void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3745 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003746{
3747 if (buffer != bounce.buffer) {
3748 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003749 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003750 while (access_len) {
3751 unsigned l;
3752 l = TARGET_PAGE_SIZE;
3753 if (l > access_len)
3754 l = access_len;
3755 if (!cpu_physical_memory_is_dirty(addr1)) {
3756 /* invalidate code */
3757 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3758 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003759 cpu_physical_memory_set_dirty_flags(
3760 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003761 }
3762 addr1 += l;
3763 access_len -= l;
3764 }
3765 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003766 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003767 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003768 }
aliguori6d16c2f2009-01-22 16:59:11 +00003769 return;
3770 }
3771 if (is_write) {
3772 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3773 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003774 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003775 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003776 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003777}
bellardd0ecd2a2006-04-23 17:14:48 +00003778
bellard8df1cd02005-01-28 22:37:22 +00003779/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003780static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3781 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003782{
bellard8df1cd02005-01-28 22:37:22 +00003783 uint8_t *ptr;
3784 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003785 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003786
Avi Kivity06ef3522012-02-13 16:11:22 +02003787 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003788
Blue Swirlcc5bea62012-04-14 14:56:48 +00003789 if (!(memory_region_is_ram(section->mr) ||
3790 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003791 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003792 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003793 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003794#if defined(TARGET_WORDS_BIGENDIAN)
3795 if (endian == DEVICE_LITTLE_ENDIAN) {
3796 val = bswap32(val);
3797 }
3798#else
3799 if (endian == DEVICE_BIG_ENDIAN) {
3800 val = bswap32(val);
3801 }
3802#endif
bellard8df1cd02005-01-28 22:37:22 +00003803 } else {
3804 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003805 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003806 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003807 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003808 switch (endian) {
3809 case DEVICE_LITTLE_ENDIAN:
3810 val = ldl_le_p(ptr);
3811 break;
3812 case DEVICE_BIG_ENDIAN:
3813 val = ldl_be_p(ptr);
3814 break;
3815 default:
3816 val = ldl_p(ptr);
3817 break;
3818 }
bellard8df1cd02005-01-28 22:37:22 +00003819 }
3820 return val;
3821}
3822
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003823uint32_t ldl_phys(target_phys_addr_t addr)
3824{
3825 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3826}
3827
3828uint32_t ldl_le_phys(target_phys_addr_t addr)
3829{
3830 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3831}
3832
3833uint32_t ldl_be_phys(target_phys_addr_t addr)
3834{
3835 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3836}
3837
bellard84b7b8e2005-11-28 21:19:04 +00003838/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003839static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3840 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003841{
bellard84b7b8e2005-11-28 21:19:04 +00003842 uint8_t *ptr;
3843 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003844 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003845
Avi Kivity06ef3522012-02-13 16:11:22 +02003846 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003847
Blue Swirlcc5bea62012-04-14 14:56:48 +00003848 if (!(memory_region_is_ram(section->mr) ||
3849 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003850 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003851 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003852
3853 /* XXX This is broken when device endian != cpu endian.
3854 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003855#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003856 val = io_mem_read(section->mr, addr, 4) << 32;
3857 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003858#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003859 val = io_mem_read(section->mr, addr, 4);
3860 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003861#endif
3862 } else {
3863 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003864 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003865 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003866 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003867 switch (endian) {
3868 case DEVICE_LITTLE_ENDIAN:
3869 val = ldq_le_p(ptr);
3870 break;
3871 case DEVICE_BIG_ENDIAN:
3872 val = ldq_be_p(ptr);
3873 break;
3874 default:
3875 val = ldq_p(ptr);
3876 break;
3877 }
bellard84b7b8e2005-11-28 21:19:04 +00003878 }
3879 return val;
3880}
3881
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003882uint64_t ldq_phys(target_phys_addr_t addr)
3883{
3884 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3885}
3886
3887uint64_t ldq_le_phys(target_phys_addr_t addr)
3888{
3889 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3890}
3891
3892uint64_t ldq_be_phys(target_phys_addr_t addr)
3893{
3894 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3895}
3896
bellardaab33092005-10-30 20:48:42 +00003897/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003898uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003899{
3900 uint8_t val;
3901 cpu_physical_memory_read(addr, &val, 1);
3902 return val;
3903}
3904
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003905/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003906static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3907 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003908{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003909 uint8_t *ptr;
3910 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003911 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003912
Avi Kivity06ef3522012-02-13 16:11:22 +02003913 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003914
Blue Swirlcc5bea62012-04-14 14:56:48 +00003915 if (!(memory_region_is_ram(section->mr) ||
3916 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003917 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003918 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003919 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003920#if defined(TARGET_WORDS_BIGENDIAN)
3921 if (endian == DEVICE_LITTLE_ENDIAN) {
3922 val = bswap16(val);
3923 }
3924#else
3925 if (endian == DEVICE_BIG_ENDIAN) {
3926 val = bswap16(val);
3927 }
3928#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003929 } else {
3930 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003931 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003932 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003933 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003934 switch (endian) {
3935 case DEVICE_LITTLE_ENDIAN:
3936 val = lduw_le_p(ptr);
3937 break;
3938 case DEVICE_BIG_ENDIAN:
3939 val = lduw_be_p(ptr);
3940 break;
3941 default:
3942 val = lduw_p(ptr);
3943 break;
3944 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003945 }
3946 return val;
bellardaab33092005-10-30 20:48:42 +00003947}
3948
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003949uint32_t lduw_phys(target_phys_addr_t addr)
3950{
3951 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3952}
3953
3954uint32_t lduw_le_phys(target_phys_addr_t addr)
3955{
3956 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3957}
3958
3959uint32_t lduw_be_phys(target_phys_addr_t addr)
3960{
3961 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3962}
3963
bellard8df1cd02005-01-28 22:37:22 +00003964/* warning: addr must be aligned. The ram page is not masked as dirty
3965 and the code inside is not invalidated. It is useful if the dirty
3966 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003967void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003968{
bellard8df1cd02005-01-28 22:37:22 +00003969 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003970 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003971
Avi Kivity06ef3522012-02-13 16:11:22 +02003972 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003973
Avi Kivityf3705d52012-03-08 16:16:34 +02003974 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003975 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003976 if (memory_region_is_ram(section->mr)) {
3977 section = &phys_sections[phys_section_rom];
3978 }
3979 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003980 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003981 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003982 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003983 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003984 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003985 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003986
3987 if (unlikely(in_migration)) {
3988 if (!cpu_physical_memory_is_dirty(addr1)) {
3989 /* invalidate code */
3990 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3991 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003992 cpu_physical_memory_set_dirty_flags(
3993 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003994 }
3995 }
bellard8df1cd02005-01-28 22:37:22 +00003996 }
3997}
3998
Anthony Liguoric227f092009-10-01 16:12:16 -05003999void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004000{
j_mayerbc98a7e2007-04-04 07:55:12 +00004001 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004002 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004003
Avi Kivity06ef3522012-02-13 16:11:22 +02004004 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004005
Avi Kivityf3705d52012-03-08 16:16:34 +02004006 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004007 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004008 if (memory_region_is_ram(section->mr)) {
4009 section = &phys_sections[phys_section_rom];
4010 }
j_mayerbc98a7e2007-04-04 07:55:12 +00004011#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004012 io_mem_write(section->mr, addr, val >> 32, 4);
4013 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004014#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004015 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4016 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004017#endif
4018 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004019 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004020 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004021 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004022 stq_p(ptr, val);
4023 }
4024}
4025
bellard8df1cd02005-01-28 22:37:22 +00004026/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004027static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4028 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004029{
bellard8df1cd02005-01-28 22:37:22 +00004030 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004031 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004032
Avi Kivity06ef3522012-02-13 16:11:22 +02004033 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004034
Avi Kivityf3705d52012-03-08 16:16:34 +02004035 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004036 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004037 if (memory_region_is_ram(section->mr)) {
4038 section = &phys_sections[phys_section_rom];
4039 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004040#if defined(TARGET_WORDS_BIGENDIAN)
4041 if (endian == DEVICE_LITTLE_ENDIAN) {
4042 val = bswap32(val);
4043 }
4044#else
4045 if (endian == DEVICE_BIG_ENDIAN) {
4046 val = bswap32(val);
4047 }
4048#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004049 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004050 } else {
4051 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004052 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004053 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004054 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004055 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004056 switch (endian) {
4057 case DEVICE_LITTLE_ENDIAN:
4058 stl_le_p(ptr, val);
4059 break;
4060 case DEVICE_BIG_ENDIAN:
4061 stl_be_p(ptr, val);
4062 break;
4063 default:
4064 stl_p(ptr, val);
4065 break;
4066 }
bellard3a7d9292005-08-21 09:26:42 +00004067 if (!cpu_physical_memory_is_dirty(addr1)) {
4068 /* invalidate code */
4069 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4070 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004071 cpu_physical_memory_set_dirty_flags(addr1,
4072 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004073 }
bellard8df1cd02005-01-28 22:37:22 +00004074 }
4075}
4076
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004077void stl_phys(target_phys_addr_t addr, uint32_t val)
4078{
4079 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4080}
4081
4082void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4083{
4084 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4085}
4086
4087void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4088{
4089 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4090}
4091
bellardaab33092005-10-30 20:48:42 +00004092/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004093void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004094{
4095 uint8_t v = val;
4096 cpu_physical_memory_write(addr, &v, 1);
4097}
4098
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004099/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004100static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4101 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004102{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004103 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004104 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004105
Avi Kivity06ef3522012-02-13 16:11:22 +02004106 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004107
Avi Kivityf3705d52012-03-08 16:16:34 +02004108 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004109 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004110 if (memory_region_is_ram(section->mr)) {
4111 section = &phys_sections[phys_section_rom];
4112 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004113#if defined(TARGET_WORDS_BIGENDIAN)
4114 if (endian == DEVICE_LITTLE_ENDIAN) {
4115 val = bswap16(val);
4116 }
4117#else
4118 if (endian == DEVICE_BIG_ENDIAN) {
4119 val = bswap16(val);
4120 }
4121#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004122 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004123 } else {
4124 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004125 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004126 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004127 /* RAM case */
4128 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004129 switch (endian) {
4130 case DEVICE_LITTLE_ENDIAN:
4131 stw_le_p(ptr, val);
4132 break;
4133 case DEVICE_BIG_ENDIAN:
4134 stw_be_p(ptr, val);
4135 break;
4136 default:
4137 stw_p(ptr, val);
4138 break;
4139 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004140 if (!cpu_physical_memory_is_dirty(addr1)) {
4141 /* invalidate code */
4142 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4143 /* set dirty bit */
4144 cpu_physical_memory_set_dirty_flags(addr1,
4145 (0xff & ~CODE_DIRTY_FLAG));
4146 }
4147 }
bellardaab33092005-10-30 20:48:42 +00004148}
4149
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004150void stw_phys(target_phys_addr_t addr, uint32_t val)
4151{
4152 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4153}
4154
4155void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4156{
4157 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4158}
4159
4160void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4161{
4162 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4163}
4164
bellardaab33092005-10-30 20:48:42 +00004165/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004166void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004167{
4168 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004169 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004170}
4171
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004172void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4173{
4174 val = cpu_to_le64(val);
4175 cpu_physical_memory_write(addr, &val, 8);
4176}
4177
4178void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4179{
4180 val = cpu_to_be64(val);
4181 cpu_physical_memory_write(addr, &val, 8);
4182}
4183
aliguori5e2972f2009-03-28 17:51:36 +00004184/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004185int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004186 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004187{
4188 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004189 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004190 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004191
4192 while (len > 0) {
4193 page = addr & TARGET_PAGE_MASK;
4194 phys_addr = cpu_get_phys_page_debug(env, page);
4195 /* if no physical page mapped, return an error */
4196 if (phys_addr == -1)
4197 return -1;
4198 l = (page + TARGET_PAGE_SIZE) - addr;
4199 if (l > len)
4200 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004201 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004202 if (is_write)
4203 cpu_physical_memory_write_rom(phys_addr, buf, l);
4204 else
aliguori5e2972f2009-03-28 17:51:36 +00004205 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004206 len -= l;
4207 buf += l;
4208 addr += l;
4209 }
4210 return 0;
4211}
Paul Brooka68fe892010-03-01 00:08:59 +00004212#endif
bellard13eb76e2004-01-24 15:23:36 +00004213
pbrook2e70f6e2008-06-29 01:03:05 +00004214/* in deterministic execution mode, instructions doing device I/Os
4215 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004216void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004217{
4218 TranslationBlock *tb;
4219 uint32_t n, cflags;
4220 target_ulong pc, cs_base;
4221 uint64_t flags;
4222
Blue Swirl20503962012-04-09 14:20:20 +00004223 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004224 if (!tb) {
4225 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004226 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004227 }
4228 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004229 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004230 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004231 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004232 n = n - env->icount_decr.u16.low;
4233 /* Generate a new TB ending on the I/O insn. */
4234 n++;
4235 /* On MIPS and SH, delay slot instructions can only be restarted if
4236 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004237 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004238 branch. */
4239#if defined(TARGET_MIPS)
4240 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4241 env->active_tc.PC -= 4;
4242 env->icount_decr.u16.low++;
4243 env->hflags &= ~MIPS_HFLAG_BMASK;
4244 }
4245#elif defined(TARGET_SH4)
4246 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4247 && n > 1) {
4248 env->pc -= 2;
4249 env->icount_decr.u16.low++;
4250 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4251 }
4252#endif
4253 /* This should never happen. */
4254 if (n > CF_COUNT_MASK)
4255 cpu_abort(env, "TB too big during recompile");
4256
4257 cflags = n | CF_LAST_IO;
4258 pc = tb->pc;
4259 cs_base = tb->cs_base;
4260 flags = tb->flags;
4261 tb_phys_invalidate(tb, -1);
4262 /* FIXME: In theory this could raise an exception. In practice
4263 we have already translated the block once so it's probably ok. */
4264 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004265 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004266 the first in the TB) then we end up generating a whole new TB and
4267 repeating the fault, which is horribly inefficient.
4268 Better would be to execute just this insn uncached, or generate a
4269 second new TB. */
4270 cpu_resume_from_signal(env, NULL);
4271}
4272
Paul Brookb3755a92010-03-12 16:54:58 +00004273#if !defined(CONFIG_USER_ONLY)
4274
Stefan Weil055403b2010-10-22 23:03:32 +02004275void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004276{
4277 int i, target_code_size, max_target_code_size;
4278 int direct_jmp_count, direct_jmp2_count, cross_page;
4279 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004280
bellarde3db7222005-01-26 22:00:47 +00004281 target_code_size = 0;
4282 max_target_code_size = 0;
4283 cross_page = 0;
4284 direct_jmp_count = 0;
4285 direct_jmp2_count = 0;
4286 for(i = 0; i < nb_tbs; i++) {
4287 tb = &tbs[i];
4288 target_code_size += tb->size;
4289 if (tb->size > max_target_code_size)
4290 max_target_code_size = tb->size;
4291 if (tb->page_addr[1] != -1)
4292 cross_page++;
4293 if (tb->tb_next_offset[0] != 0xffff) {
4294 direct_jmp_count++;
4295 if (tb->tb_next_offset[1] != 0xffff) {
4296 direct_jmp2_count++;
4297 }
4298 }
4299 }
4300 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004301 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004302 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004303 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4304 cpu_fprintf(f, "TB count %d/%d\n",
4305 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004306 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004307 nb_tbs ? target_code_size / nb_tbs : 0,
4308 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004309 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004310 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4311 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004312 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4313 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004314 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4315 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004316 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004317 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4318 direct_jmp2_count,
4319 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004320 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004321 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4322 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4323 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004324 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004325}
4326
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004327/*
4328 * A helper function for the _utterly broken_ virtio device model to find out if
4329 * it's running on a big endian machine. Don't do this at home kids!
4330 */
4331bool virtio_is_big_endian(void);
4332bool virtio_is_big_endian(void)
4333{
4334#if defined(TARGET_WORDS_BIGENDIAN)
4335 return true;
4336#else
4337 return false;
4338#endif
4339}
4340
bellard61382a52003-10-27 21:22:23 +00004341#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004342
4343#ifndef CONFIG_USER_ONLY
4344bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4345{
4346 MemoryRegionSection *section;
4347
4348 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4349
4350 return !(memory_region_is_ram(section->mr) ||
4351 memory_region_is_romd(section->mr));
4352}
4353#endif