blob: 0756919e66cdea01f23e36346d0107fd3d7e7972 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195typedef struct PhysPageEntry PhysPageEntry;
196
Avi Kivity5312bd82012-02-12 18:32:55 +0200197static MemoryRegionSection *phys_sections;
198static unsigned phys_sections_nb, phys_sections_nb_alloc;
199static uint16_t phys_section_unassigned;
200
Avi Kivity4346ae32012-02-10 17:00:01 +0200201struct PhysPageEntry {
202 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200203 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200205 } u;
206};
207
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208/* Simple allocator for PhysPageEntry nodes */
209static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
210static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
211
212#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
213
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800214/* This is a multi-level map on the physical address space.
215 The bottom level has pointers to PhysPageDesc. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200216static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000217
pbrooke2eef172008-06-08 01:09:01 +0000218static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300219static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000220
bellard33417e72003-08-10 21:47:01 +0000221/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200222MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000223static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200224static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000225#endif
bellard33417e72003-08-10 21:47:01 +0000226
bellard34865132003-10-05 14:28:56 +0000227/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200228#ifdef WIN32
229static const char *logfilename = "qemu.log";
230#else
blueswir1d9b630f2008-10-05 09:57:08 +0000231static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200232#endif
bellard34865132003-10-05 14:28:56 +0000233FILE *logfile;
234int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000235static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000236
bellarde3db7222005-01-26 22:00:47 +0000237/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000238#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000239static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000240#endif
bellarde3db7222005-01-26 22:00:47 +0000241static int tb_flush_count;
242static int tb_phys_invalidate_count;
243
bellard7cb69ca2008-05-10 10:55:51 +0000244#ifdef _WIN32
245static void map_exec(void *addr, long size)
246{
247 DWORD old_protect;
248 VirtualProtect(addr, size,
249 PAGE_EXECUTE_READWRITE, &old_protect);
250
251}
252#else
253static void map_exec(void *addr, long size)
254{
bellard43694152008-05-29 09:35:57 +0000255 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000256
bellard43694152008-05-29 09:35:57 +0000257 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000258 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000259 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000260
261 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000262 end += page_size - 1;
263 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000264
265 mprotect((void *)start, end - start,
266 PROT_READ | PROT_WRITE | PROT_EXEC);
267}
268#endif
269
bellardb346ff42003-06-15 20:05:50 +0000270static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000271{
bellard83fb7ad2004-07-05 21:25:26 +0000272 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000273 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000274#ifdef _WIN32
275 {
276 SYSTEM_INFO system_info;
277
278 GetSystemInfo(&system_info);
279 qemu_real_host_page_size = system_info.dwPageSize;
280 }
281#else
282 qemu_real_host_page_size = getpagesize();
283#endif
bellard83fb7ad2004-07-05 21:25:26 +0000284 if (qemu_host_page_size == 0)
285 qemu_host_page_size = qemu_real_host_page_size;
286 if (qemu_host_page_size < TARGET_PAGE_SIZE)
287 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000288 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000289
Paul Brook2e9a5712010-05-05 16:32:59 +0100290#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000291 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100292#ifdef HAVE_KINFO_GETVMMAP
293 struct kinfo_vmentry *freep;
294 int i, cnt;
295
296 freep = kinfo_getvmmap(getpid(), &cnt);
297 if (freep) {
298 mmap_lock();
299 for (i = 0; i < cnt; i++) {
300 unsigned long startaddr, endaddr;
301
302 startaddr = freep[i].kve_start;
303 endaddr = freep[i].kve_end;
304 if (h2g_valid(startaddr)) {
305 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
306
307 if (h2g_valid(endaddr)) {
308 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200309 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100310 } else {
311#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
312 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200313 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100314#endif
315 }
316 }
317 }
318 free(freep);
319 mmap_unlock();
320 }
321#else
balrog50a95692007-12-12 01:16:23 +0000322 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000323
pbrook07765902008-05-31 16:33:53 +0000324 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325
Aurelien Jarnofd436902010-04-10 17:20:36 +0200326 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000327 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_lock();
329
balrog50a95692007-12-12 01:16:23 +0000330 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800331 unsigned long startaddr, endaddr;
332 int n;
333
334 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
335
336 if (n == 2 && h2g_valid(startaddr)) {
337 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
338
339 if (h2g_valid(endaddr)) {
340 endaddr = h2g(endaddr);
341 } else {
342 endaddr = ~0ul;
343 }
344 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000345 }
346 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347
balrog50a95692007-12-12 01:16:23 +0000348 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000350 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100351#endif
balrog50a95692007-12-12 01:16:23 +0000352 }
353#endif
bellard54936002003-05-13 00:25:15 +0000354}
355
Paul Brook41c1b1c2010-03-12 16:54:58 +0000356static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000357{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000358 PageDesc *pd;
359 void **lp;
360 int i;
361
pbrook17e23772008-06-09 13:47:45 +0000362#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500363 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
365 do { \
366 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
367 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000369#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500371 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000372#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800373
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800374 /* Level 1. Always allocated. */
375 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
376
377 /* Level 2..N-1. */
378 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
379 void **p = *lp;
380
381 if (p == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(p, sizeof(void *) * L2_SIZE);
386 *lp = p;
387 }
388
389 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000390 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 pd = *lp;
393 if (pd == NULL) {
394 if (!alloc) {
395 return NULL;
396 }
397 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
398 *lp = pd;
399 }
400
401#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402
403 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000404}
405
Paul Brook41c1b1c2010-03-12 16:54:58 +0000406static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000407{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800408 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000409}
410
Paul Brook6d9a1302010-02-28 23:55:53 +0000411#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412
413static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
414{
415 unsigned i;
416 uint16_t ret;
417
418 /* Assign early to avoid the pointer being invalidated by g_renew() */
419 *ptr = ret = phys_map_nodes_nb++;
420 assert(ret != PHYS_MAP_NODE_NIL);
421 if (ret == phys_map_nodes_nb_alloc) {
422 typedef PhysPageEntry Node[L2_SIZE];
423 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
424 phys_map_nodes = g_renew(Node, phys_map_nodes,
425 phys_map_nodes_nb_alloc);
426 }
427 for (i = 0; i < L2_SIZE; ++i) {
428 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
429 }
430 return phys_map_nodes[ret];
431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivity5312bd82012-02-12 18:32:55 +0200438static uint16_t *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000439{
Avi Kivity4346ae32012-02-10 17:00:01 +0200440 PhysPageEntry *lp, *p;
441 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000442
Avi Kivity3eef53d2012-02-10 14:57:31 +0200443 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000444
Avi Kivity4346ae32012-02-10 17:00:01 +0200445 /* Level 1..N. */
446 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200447 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800448 if (!alloc) {
449 return NULL;
450 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200451 p = phys_map_node_alloc(&lp->u.node);
Avi Kivity4346ae32012-02-10 17:00:01 +0200452 if (i == 0) {
Avi Kivity4346ae32012-02-10 17:00:01 +0200453 for (j = 0; j < L2_SIZE; j++) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200454 p[j].u.leaf = phys_section_unassigned;
Avi Kivity4346ae32012-02-10 17:00:01 +0200455 }
456 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200457 } else {
458 p = phys_map_nodes[lp->u.node];
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800459 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200460 lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000461 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800462
Avi Kivity4346ae32012-02-10 17:00:01 +0200463 return &lp->u.leaf;
bellard92e873b2004-05-21 14:52:29 +0000464}
465
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200466static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000467{
Avi Kivity5312bd82012-02-12 18:32:55 +0200468 uint16_t *p = phys_page_find_alloc(index, 0);
469 uint16_t s_index = phys_section_unassigned;
470 MemoryRegionSection *section;
471 PhysPageDesc pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200472
473 if (p) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200474 s_index = *p;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475 }
Avi Kivity5312bd82012-02-12 18:32:55 +0200476 section = &phys_sections[s_index];
477 index <<= TARGET_PAGE_BITS;
478 assert(section->offset_within_address_space <= index
479 && index <= section->offset_within_address_space + section->size-1);
480 pd.phys_offset = section->mr->ram_addr;
481 pd.region_offset = (index - section->offset_within_address_space)
482 + section->offset_within_region;
483 if (memory_region_is_ram(section->mr)) {
484 pd.phys_offset += pd.region_offset;
485 pd.region_offset = 0;
486 } else if (section->mr->rom_device) {
487 pd.phys_offset += pd.region_offset;
488 }
489 if (section->readonly) {
490 pd.phys_offset |= io_mem_rom.ram_addr;
491 }
492 return pd;
bellard92e873b2004-05-21 14:52:29 +0000493}
494
Anthony Liguoric227f092009-10-01 16:12:16 -0500495static void tlb_protect_code(ram_addr_t ram_addr);
496static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000497 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000498#define mmap_lock() do { } while(0)
499#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000500#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000501
bellard43694152008-05-29 09:35:57 +0000502#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
503
504#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100505/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000506 user mode. It will change when a dedicated libc will be used */
507#define USE_STATIC_CODE_GEN_BUFFER
508#endif
509
510#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200511static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
512 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000513#endif
514
blueswir18fcd3692008-08-17 20:26:25 +0000515static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000516{
bellard43694152008-05-29 09:35:57 +0000517#ifdef USE_STATIC_CODE_GEN_BUFFER
518 code_gen_buffer = static_code_gen_buffer;
519 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
520 map_exec(code_gen_buffer, code_gen_buffer_size);
521#else
bellard26a5f132008-05-28 12:30:31 +0000522 code_gen_buffer_size = tb_size;
523 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000524#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000525 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
526#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100527 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000528 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000529#endif
bellard26a5f132008-05-28 12:30:31 +0000530 }
531 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
532 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
533 /* The code gen buffer location may have constraints depending on
534 the host cpu and OS */
535#if defined(__linux__)
536 {
537 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000538 void *start = NULL;
539
bellard26a5f132008-05-28 12:30:31 +0000540 flags = MAP_PRIVATE | MAP_ANONYMOUS;
541#if defined(__x86_64__)
542 flags |= MAP_32BIT;
543 /* Cannot map more than that */
544 if (code_gen_buffer_size > (800 * 1024 * 1024))
545 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000546#elif defined(__sparc_v9__)
547 // Map the buffer below 2G, so we can use direct calls and branches
548 flags |= MAP_FIXED;
549 start = (void *) 0x60000000UL;
550 if (code_gen_buffer_size > (512 * 1024 * 1024))
551 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000552#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100553 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000554 if (code_gen_buffer_size > 16 * 1024 * 1024)
555 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700556#elif defined(__s390x__)
557 /* Map the buffer so that we can use direct calls and branches. */
558 /* We have a +- 4GB range on the branches; leave some slop. */
559 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
560 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
561 }
562 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000563#endif
blueswir1141ac462008-07-26 15:05:57 +0000564 code_gen_buffer = mmap(start, code_gen_buffer_size,
565 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000566 flags, -1, 0);
567 if (code_gen_buffer == MAP_FAILED) {
568 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
569 exit(1);
570 }
571 }
Bradcbb608a2010-12-20 21:25:40 -0500572#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000573 || defined(__DragonFly__) || defined(__OpenBSD__) \
574 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000575 {
576 int flags;
577 void *addr = NULL;
578 flags = MAP_PRIVATE | MAP_ANONYMOUS;
579#if defined(__x86_64__)
580 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
581 * 0x40000000 is free */
582 flags |= MAP_FIXED;
583 addr = (void *)0x40000000;
584 /* Cannot map more than that */
585 if (code_gen_buffer_size > (800 * 1024 * 1024))
586 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000587#elif defined(__sparc_v9__)
588 // Map the buffer below 2G, so we can use direct calls and branches
589 flags |= MAP_FIXED;
590 addr = (void *) 0x60000000UL;
591 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
592 code_gen_buffer_size = (512 * 1024 * 1024);
593 }
aliguori06e67a82008-09-27 15:32:41 +0000594#endif
595 code_gen_buffer = mmap(addr, code_gen_buffer_size,
596 PROT_WRITE | PROT_READ | PROT_EXEC,
597 flags, -1, 0);
598 if (code_gen_buffer == MAP_FAILED) {
599 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
600 exit(1);
601 }
602 }
bellard26a5f132008-05-28 12:30:31 +0000603#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500604 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000605 map_exec(code_gen_buffer, code_gen_buffer_size);
606#endif
bellard43694152008-05-29 09:35:57 +0000607#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000608 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100609 code_gen_buffer_max_size = code_gen_buffer_size -
610 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000611 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000613}
614
615/* Must be called before using the QEMU cpus. 'tb_size' is the size
616 (in bytes) allocated to the translation buffer. Zero means default
617 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200618void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000619{
bellard26a5f132008-05-28 12:30:31 +0000620 cpu_gen_init();
621 code_gen_alloc(tb_size);
622 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000623 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700624#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
625 /* There's no guest base to take into account, so go ahead and
626 initialize the prologue now. */
627 tcg_prologue_init(&tcg_ctx);
628#endif
bellard26a5f132008-05-28 12:30:31 +0000629}
630
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200631bool tcg_enabled(void)
632{
633 return code_gen_buffer != NULL;
634}
635
636void cpu_exec_init_all(void)
637{
638#if !defined(CONFIG_USER_ONLY)
639 memory_map_init();
640 io_mem_init();
641#endif
642}
643
pbrook9656f322008-07-01 20:01:19 +0000644#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645
Juan Quintelae59fb372009-09-29 22:48:21 +0200646static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200647{
648 CPUState *env = opaque;
649
aurel323098dba2009-03-07 21:28:24 +0000650 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
651 version_id is increased. */
652 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000653 tlb_flush(env, 1);
654
655 return 0;
656}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200657
658static const VMStateDescription vmstate_cpu_common = {
659 .name = "cpu_common",
660 .version_id = 1,
661 .minimum_version_id = 1,
662 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200663 .post_load = cpu_common_post_load,
664 .fields = (VMStateField []) {
665 VMSTATE_UINT32(halted, CPUState),
666 VMSTATE_UINT32(interrupt_request, CPUState),
667 VMSTATE_END_OF_LIST()
668 }
669};
pbrook9656f322008-07-01 20:01:19 +0000670#endif
671
Glauber Costa950f1472009-06-09 12:15:18 -0400672CPUState *qemu_get_cpu(int cpu)
673{
674 CPUState *env = first_cpu;
675
676 while (env) {
677 if (env->cpu_index == cpu)
678 break;
679 env = env->next_cpu;
680 }
681
682 return env;
683}
684
bellard6a00d602005-11-21 23:25:50 +0000685void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000686{
bellard6a00d602005-11-21 23:25:50 +0000687 CPUState **penv;
688 int cpu_index;
689
pbrookc2764712009-03-07 15:24:59 +0000690#if defined(CONFIG_USER_ONLY)
691 cpu_list_lock();
692#endif
bellard6a00d602005-11-21 23:25:50 +0000693 env->next_cpu = NULL;
694 penv = &first_cpu;
695 cpu_index = 0;
696 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700697 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000698 cpu_index++;
699 }
700 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000701 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000702 QTAILQ_INIT(&env->breakpoints);
703 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100704#ifndef CONFIG_USER_ONLY
705 env->thread_id = qemu_get_thread_id();
706#endif
bellard6a00d602005-11-21 23:25:50 +0000707 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000708#if defined(CONFIG_USER_ONLY)
709 cpu_list_unlock();
710#endif
pbrookb3c77242008-06-30 16:31:04 +0000711#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600712 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
713 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000714 cpu_save, cpu_load, env);
715#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000716}
717
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100718/* Allocate a new translation block. Flush the translation buffer if
719 too many translation blocks or too much generated code. */
720static TranslationBlock *tb_alloc(target_ulong pc)
721{
722 TranslationBlock *tb;
723
724 if (nb_tbs >= code_gen_max_blocks ||
725 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
726 return NULL;
727 tb = &tbs[nb_tbs++];
728 tb->pc = pc;
729 tb->cflags = 0;
730 return tb;
731}
732
733void tb_free(TranslationBlock *tb)
734{
735 /* In practice this is mostly used for single use temporary TB
736 Ignore the hard cases and just back up if this TB happens to
737 be the last one generated. */
738 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
739 code_gen_ptr = tb->tc_ptr;
740 nb_tbs--;
741 }
742}
743
bellard9fa3e852004-01-04 18:06:42 +0000744static inline void invalidate_page_bitmap(PageDesc *p)
745{
746 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500747 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000748 p->code_bitmap = NULL;
749 }
750 p->code_write_count = 0;
751}
752
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800753/* Set to NULL all the 'first_tb' fields in all PageDescs. */
754
755static void page_flush_tb_1 (int level, void **lp)
756{
757 int i;
758
759 if (*lp == NULL) {
760 return;
761 }
762 if (level == 0) {
763 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000764 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800765 pd[i].first_tb = NULL;
766 invalidate_page_bitmap(pd + i);
767 }
768 } else {
769 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000770 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800771 page_flush_tb_1 (level - 1, pp + i);
772 }
773 }
774}
775
bellardfd6ce8f2003-05-14 19:00:11 +0000776static void page_flush_tb(void)
777{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800778 int i;
779 for (i = 0; i < V_L1_SIZE; i++) {
780 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000781 }
782}
783
784/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000785/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000786void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000787{
bellard6a00d602005-11-21 23:25:50 +0000788 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000789#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(code_gen_ptr - code_gen_buffer),
792 nb_tbs, nb_tbs > 0 ?
793 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000794#endif
bellard26a5f132008-05-28 12:30:31 +0000795 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000796 cpu_abort(env1, "Internal error: code buffer overflow\n");
797
bellardfd6ce8f2003-05-14 19:00:11 +0000798 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000799
bellard6a00d602005-11-21 23:25:50 +0000800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
801 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
802 }
bellard9fa3e852004-01-04 18:06:42 +0000803
bellard8a8a6082004-10-03 13:36:49 +0000804 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000805 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000808 /* XXX: flush processor icache at this point if cache flush is
809 expensive */
bellarde3db7222005-01-26 22:00:47 +0000810 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000811}
812
813#ifdef DEBUG_TB_CHECK
814
j_mayerbc98a7e2007-04-04 07:55:12 +0000815static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000816{
817 TranslationBlock *tb;
818 int i;
819 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000820 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
821 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000822 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
823 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000824 printf("ERROR invalidate: address=" TARGET_FMT_lx
825 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000826 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000827 }
828 }
829 }
830}
831
832/* verify that all the pages have correct rights for code */
833static void tb_page_check(void)
834{
835 TranslationBlock *tb;
836 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000837
pbrook99773bd2006-04-16 15:14:59 +0000838 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
839 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000840 flags1 = page_get_flags(tb->pc);
841 flags2 = page_get_flags(tb->pc + tb->size - 1);
842 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
843 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000844 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000845 }
846 }
847 }
848}
849
850#endif
851
852/* invalidate one TB */
853static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
854 int next_offset)
855{
856 TranslationBlock *tb1;
857 for(;;) {
858 tb1 = *ptb;
859 if (tb1 == tb) {
860 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
861 break;
862 }
863 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
864 }
865}
866
bellard9fa3e852004-01-04 18:06:42 +0000867static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
868{
869 TranslationBlock *tb1;
870 unsigned int n1;
871
872 for(;;) {
873 tb1 = *ptb;
874 n1 = (long)tb1 & 3;
875 tb1 = (TranslationBlock *)((long)tb1 & ~3);
876 if (tb1 == tb) {
877 *ptb = tb1->page_next[n1];
878 break;
879 }
880 ptb = &tb1->page_next[n1];
881 }
882}
883
bellardd4e81642003-05-25 16:46:15 +0000884static inline void tb_jmp_remove(TranslationBlock *tb, int n)
885{
886 TranslationBlock *tb1, **ptb;
887 unsigned int n1;
888
889 ptb = &tb->jmp_next[n];
890 tb1 = *ptb;
891 if (tb1) {
892 /* find tb(n) in circular list */
893 for(;;) {
894 tb1 = *ptb;
895 n1 = (long)tb1 & 3;
896 tb1 = (TranslationBlock *)((long)tb1 & ~3);
897 if (n1 == n && tb1 == tb)
898 break;
899 if (n1 == 2) {
900 ptb = &tb1->jmp_first;
901 } else {
902 ptb = &tb1->jmp_next[n1];
903 }
904 }
905 /* now we can suppress tb(n) from the list */
906 *ptb = tb->jmp_next[n];
907
908 tb->jmp_next[n] = NULL;
909 }
910}
911
912/* reset the jump entry 'n' of a TB so that it is not chained to
913 another TB */
914static inline void tb_reset_jump(TranslationBlock *tb, int n)
915{
916 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
917}
918
Paul Brook41c1b1c2010-03-12 16:54:58 +0000919void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000920{
bellard6a00d602005-11-21 23:25:50 +0000921 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000922 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000923 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000924 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000925 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000926
bellard9fa3e852004-01-04 18:06:42 +0000927 /* remove the TB from the hash list */
928 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
929 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000930 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000931 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000932
bellard9fa3e852004-01-04 18:06:42 +0000933 /* remove the TB from the page list */
934 if (tb->page_addr[0] != page_addr) {
935 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
936 tb_page_remove(&p->first_tb, tb);
937 invalidate_page_bitmap(p);
938 }
939 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
940 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
941 tb_page_remove(&p->first_tb, tb);
942 invalidate_page_bitmap(p);
943 }
944
bellard8a40a182005-11-20 10:35:40 +0000945 tb_invalidated_flag = 1;
946
947 /* remove the TB from the hash list */
948 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000949 for(env = first_cpu; env != NULL; env = env->next_cpu) {
950 if (env->tb_jmp_cache[h] == tb)
951 env->tb_jmp_cache[h] = NULL;
952 }
bellard8a40a182005-11-20 10:35:40 +0000953
954 /* suppress this TB from the two jump lists */
955 tb_jmp_remove(tb, 0);
956 tb_jmp_remove(tb, 1);
957
958 /* suppress any remaining jumps to this TB */
959 tb1 = tb->jmp_first;
960 for(;;) {
961 n1 = (long)tb1 & 3;
962 if (n1 == 2)
963 break;
964 tb1 = (TranslationBlock *)((long)tb1 & ~3);
965 tb2 = tb1->jmp_next[n1];
966 tb_reset_jump(tb1, n1);
967 tb1->jmp_next[n1] = NULL;
968 tb1 = tb2;
969 }
970 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
971
bellarde3db7222005-01-26 22:00:47 +0000972 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000973}
974
975static inline void set_bits(uint8_t *tab, int start, int len)
976{
977 int end, mask, end1;
978
979 end = start + len;
980 tab += start >> 3;
981 mask = 0xff << (start & 7);
982 if ((start & ~7) == (end & ~7)) {
983 if (start < end) {
984 mask &= ~(0xff << (end & 7));
985 *tab |= mask;
986 }
987 } else {
988 *tab++ |= mask;
989 start = (start + 8) & ~7;
990 end1 = end & ~7;
991 while (start < end1) {
992 *tab++ = 0xff;
993 start += 8;
994 }
995 if (start < end) {
996 mask = ~(0xff << (end & 7));
997 *tab |= mask;
998 }
999 }
1000}
1001
1002static void build_page_bitmap(PageDesc *p)
1003{
1004 int n, tb_start, tb_end;
1005 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001006
Anthony Liguori7267c092011-08-20 22:09:37 -05001007 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001008
1009 tb = p->first_tb;
1010 while (tb != NULL) {
1011 n = (long)tb & 3;
1012 tb = (TranslationBlock *)((long)tb & ~3);
1013 /* NOTE: this is subtle as a TB may span two physical pages */
1014 if (n == 0) {
1015 /* NOTE: tb_end may be after the end of the page, but
1016 it is not a problem */
1017 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1018 tb_end = tb_start + tb->size;
1019 if (tb_end > TARGET_PAGE_SIZE)
1020 tb_end = TARGET_PAGE_SIZE;
1021 } else {
1022 tb_start = 0;
1023 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1024 }
1025 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1026 tb = tb->page_next[n];
1027 }
1028}
1029
pbrook2e70f6e2008-06-29 01:03:05 +00001030TranslationBlock *tb_gen_code(CPUState *env,
1031 target_ulong pc, target_ulong cs_base,
1032 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001033{
1034 TranslationBlock *tb;
1035 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001036 tb_page_addr_t phys_pc, phys_page2;
1037 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001038 int code_gen_size;
1039
Paul Brook41c1b1c2010-03-12 16:54:58 +00001040 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001041 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001042 if (!tb) {
1043 /* flush must be done */
1044 tb_flush(env);
1045 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001046 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001047 /* Don't forget to invalidate previous TB info. */
1048 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001049 }
1050 tc_ptr = code_gen_ptr;
1051 tb->tc_ptr = tc_ptr;
1052 tb->cs_base = cs_base;
1053 tb->flags = flags;
1054 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001055 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001056 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001057
bellardd720b932004-04-25 17:57:43 +00001058 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001060 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001062 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001063 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001064 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001065 return tb;
bellardd720b932004-04-25 17:57:43 +00001066}
ths3b46e622007-09-17 08:09:54 +00001067
bellard9fa3e852004-01-04 18:06:42 +00001068/* invalidate all TBs which intersect with the target physical page
1069 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001070 the same physical page. 'is_cpu_write_access' should be true if called
1071 from a real cpu write access: the virtual CPU will exit the current
1072 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001073void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001074 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001075{
aliguori6b917542008-11-18 19:46:41 +00001076 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001077 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001079 PageDesc *p;
1080 int n;
1081#ifdef TARGET_HAS_PRECISE_SMC
1082 int current_tb_not_found = is_cpu_write_access;
1083 TranslationBlock *current_tb = NULL;
1084 int current_tb_modified = 0;
1085 target_ulong current_pc = 0;
1086 target_ulong current_cs_base = 0;
1087 int current_flags = 0;
1088#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001089
1090 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001091 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001092 return;
ths5fafdf22007-09-16 21:08:06 +00001093 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001094 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1095 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001096 /* build code bitmap */
1097 build_page_bitmap(p);
1098 }
1099
1100 /* we remove all the TBs in the range [start, end[ */
1101 /* XXX: see if in some cases it could be faster to invalidate all the code */
1102 tb = p->first_tb;
1103 while (tb != NULL) {
1104 n = (long)tb & 3;
1105 tb = (TranslationBlock *)((long)tb & ~3);
1106 tb_next = tb->page_next[n];
1107 /* NOTE: this is subtle as a TB may span two physical pages */
1108 if (n == 0) {
1109 /* NOTE: tb_end may be after the end of the page, but
1110 it is not a problem */
1111 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1112 tb_end = tb_start + tb->size;
1113 } else {
1114 tb_start = tb->page_addr[1];
1115 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1116 }
1117 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001118#ifdef TARGET_HAS_PRECISE_SMC
1119 if (current_tb_not_found) {
1120 current_tb_not_found = 0;
1121 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001122 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001123 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001124 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001125 }
1126 }
1127 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001128 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001129 /* If we are modifying the current TB, we must stop
1130 its execution. We could be more precise by checking
1131 that the modification is after the current PC, but it
1132 would require a specialized function to partially
1133 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001134
bellardd720b932004-04-25 17:57:43 +00001135 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001136 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001137 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1138 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001139 }
1140#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001141 /* we need to do that to handle the case where a signal
1142 occurs while doing tb_phys_invalidate() */
1143 saved_tb = NULL;
1144 if (env) {
1145 saved_tb = env->current_tb;
1146 env->current_tb = NULL;
1147 }
bellard9fa3e852004-01-04 18:06:42 +00001148 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001149 if (env) {
1150 env->current_tb = saved_tb;
1151 if (env->interrupt_request && env->current_tb)
1152 cpu_interrupt(env, env->interrupt_request);
1153 }
bellard9fa3e852004-01-04 18:06:42 +00001154 }
1155 tb = tb_next;
1156 }
1157#if !defined(CONFIG_USER_ONLY)
1158 /* if no code remaining, no need to continue to use slow writes */
1159 if (!p->first_tb) {
1160 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001161 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001162 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001163 }
1164 }
1165#endif
1166#ifdef TARGET_HAS_PRECISE_SMC
1167 if (current_tb_modified) {
1168 /* we generate a block containing just the instruction
1169 modifying the memory. It will ensure that it cannot modify
1170 itself */
bellardea1c1802004-06-14 18:56:36 +00001171 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001172 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001173 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001174 }
1175#endif
1176}
1177
1178/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001179static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001180{
1181 PageDesc *p;
1182 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001183#if 0
bellarda4193c82004-06-03 14:01:43 +00001184 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001185 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1186 cpu_single_env->mem_io_vaddr, len,
1187 cpu_single_env->eip,
1188 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001189 }
1190#endif
bellard9fa3e852004-01-04 18:06:42 +00001191 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001192 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001193 return;
1194 if (p->code_bitmap) {
1195 offset = start & ~TARGET_PAGE_MASK;
1196 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1197 if (b & ((1 << len) - 1))
1198 goto do_invalidate;
1199 } else {
1200 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001201 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001202 }
1203}
1204
bellard9fa3e852004-01-04 18:06:42 +00001205#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001206static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001207 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001208{
aliguori6b917542008-11-18 19:46:41 +00001209 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001210 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001211 int n;
bellardd720b932004-04-25 17:57:43 +00001212#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001213 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001214 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001215 int current_tb_modified = 0;
1216 target_ulong current_pc = 0;
1217 target_ulong current_cs_base = 0;
1218 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001219#endif
bellard9fa3e852004-01-04 18:06:42 +00001220
1221 addr &= TARGET_PAGE_MASK;
1222 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001223 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001224 return;
1225 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001226#ifdef TARGET_HAS_PRECISE_SMC
1227 if (tb && pc != 0) {
1228 current_tb = tb_find_pc(pc);
1229 }
1230#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001231 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001232 n = (long)tb & 3;
1233 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001234#ifdef TARGET_HAS_PRECISE_SMC
1235 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001236 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001237 /* If we are modifying the current TB, we must stop
1238 its execution. We could be more precise by checking
1239 that the modification is after the current PC, but it
1240 would require a specialized function to partially
1241 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001242
bellardd720b932004-04-25 17:57:43 +00001243 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001244 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001245 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1246 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001247 }
1248#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001249 tb_phys_invalidate(tb, addr);
1250 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001251 }
1252 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001253#ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb_modified) {
1255 /* we generate a block containing just the instruction
1256 modifying the memory. It will ensure that it cannot modify
1257 itself */
bellardea1c1802004-06-14 18:56:36 +00001258 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001259 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001260 cpu_resume_from_signal(env, puc);
1261 }
1262#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001263}
bellard9fa3e852004-01-04 18:06:42 +00001264#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001265
1266/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001267static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001268 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001269{
1270 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001271#ifndef CONFIG_USER_ONLY
1272 bool page_already_protected;
1273#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001274
bellard9fa3e852004-01-04 18:06:42 +00001275 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001276 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001277 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001278#ifndef CONFIG_USER_ONLY
1279 page_already_protected = p->first_tb != NULL;
1280#endif
bellard9fa3e852004-01-04 18:06:42 +00001281 p->first_tb = (TranslationBlock *)((long)tb | n);
1282 invalidate_page_bitmap(p);
1283
bellard107db442004-06-22 18:48:46 +00001284#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001285
bellard9fa3e852004-01-04 18:06:42 +00001286#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001287 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001288 target_ulong addr;
1289 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001290 int prot;
1291
bellardfd6ce8f2003-05-14 19:00:11 +00001292 /* force the host page as non writable (writes will have a
1293 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001294 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001295 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001296 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1297 addr += TARGET_PAGE_SIZE) {
1298
1299 p2 = page_find (addr >> TARGET_PAGE_BITS);
1300 if (!p2)
1301 continue;
1302 prot |= p2->flags;
1303 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001304 }
ths5fafdf22007-09-16 21:08:06 +00001305 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001306 (prot & PAGE_BITS) & ~PAGE_WRITE);
1307#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001308 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001309 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001310#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001311 }
bellard9fa3e852004-01-04 18:06:42 +00001312#else
1313 /* if some code is already present, then the pages are already
1314 protected. So we handle the case where only the first TB is
1315 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001316 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001317 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001318 }
1319#endif
bellardd720b932004-04-25 17:57:43 +00001320
1321#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001322}
1323
bellard9fa3e852004-01-04 18:06:42 +00001324/* add a new TB and link it to the physical page tables. phys_page2 is
1325 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001326void tb_link_page(TranslationBlock *tb,
1327 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001328{
bellard9fa3e852004-01-04 18:06:42 +00001329 unsigned int h;
1330 TranslationBlock **ptb;
1331
pbrookc8a706f2008-06-02 16:16:42 +00001332 /* Grab the mmap lock to stop another thread invalidating this TB
1333 before we are done. */
1334 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001335 /* add in the physical hash table */
1336 h = tb_phys_hash_func(phys_pc);
1337 ptb = &tb_phys_hash[h];
1338 tb->phys_hash_next = *ptb;
1339 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001340
1341 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001342 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1343 if (phys_page2 != -1)
1344 tb_alloc_page(tb, 1, phys_page2);
1345 else
1346 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001347
bellardd4e81642003-05-25 16:46:15 +00001348 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1349 tb->jmp_next[0] = NULL;
1350 tb->jmp_next[1] = NULL;
1351
1352 /* init original jump addresses */
1353 if (tb->tb_next_offset[0] != 0xffff)
1354 tb_reset_jump(tb, 0);
1355 if (tb->tb_next_offset[1] != 0xffff)
1356 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001357
1358#ifdef DEBUG_TB_CHECK
1359 tb_page_check();
1360#endif
pbrookc8a706f2008-06-02 16:16:42 +00001361 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001362}
1363
bellarda513fe12003-05-27 23:29:48 +00001364/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1365 tb[1].tc_ptr. Return NULL if not found */
1366TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1367{
1368 int m_min, m_max, m;
1369 unsigned long v;
1370 TranslationBlock *tb;
1371
1372 if (nb_tbs <= 0)
1373 return NULL;
1374 if (tc_ptr < (unsigned long)code_gen_buffer ||
1375 tc_ptr >= (unsigned long)code_gen_ptr)
1376 return NULL;
1377 /* binary search (cf Knuth) */
1378 m_min = 0;
1379 m_max = nb_tbs - 1;
1380 while (m_min <= m_max) {
1381 m = (m_min + m_max) >> 1;
1382 tb = &tbs[m];
1383 v = (unsigned long)tb->tc_ptr;
1384 if (v == tc_ptr)
1385 return tb;
1386 else if (tc_ptr < v) {
1387 m_max = m - 1;
1388 } else {
1389 m_min = m + 1;
1390 }
ths5fafdf22007-09-16 21:08:06 +00001391 }
bellarda513fe12003-05-27 23:29:48 +00001392 return &tbs[m_max];
1393}
bellard75012672003-06-21 13:11:07 +00001394
bellardea041c02003-06-25 16:16:50 +00001395static void tb_reset_jump_recursive(TranslationBlock *tb);
1396
1397static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1398{
1399 TranslationBlock *tb1, *tb_next, **ptb;
1400 unsigned int n1;
1401
1402 tb1 = tb->jmp_next[n];
1403 if (tb1 != NULL) {
1404 /* find head of list */
1405 for(;;) {
1406 n1 = (long)tb1 & 3;
1407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1408 if (n1 == 2)
1409 break;
1410 tb1 = tb1->jmp_next[n1];
1411 }
1412 /* we are now sure now that tb jumps to tb1 */
1413 tb_next = tb1;
1414
1415 /* remove tb from the jmp_first list */
1416 ptb = &tb_next->jmp_first;
1417 for(;;) {
1418 tb1 = *ptb;
1419 n1 = (long)tb1 & 3;
1420 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1421 if (n1 == n && tb1 == tb)
1422 break;
1423 ptb = &tb1->jmp_next[n1];
1424 }
1425 *ptb = tb->jmp_next[n];
1426 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001427
bellardea041c02003-06-25 16:16:50 +00001428 /* suppress the jump to next tb in generated code */
1429 tb_reset_jump(tb, n);
1430
bellard01243112004-01-04 15:48:17 +00001431 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001432 tb_reset_jump_recursive(tb_next);
1433 }
1434}
1435
1436static void tb_reset_jump_recursive(TranslationBlock *tb)
1437{
1438 tb_reset_jump_recursive2(tb, 0);
1439 tb_reset_jump_recursive2(tb, 1);
1440}
1441
bellard1fddef42005-04-17 19:16:13 +00001442#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001443#if defined(CONFIG_USER_ONLY)
1444static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1445{
1446 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1447}
1448#else
bellardd720b932004-04-25 17:57:43 +00001449static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1450{
Anthony Liguoric227f092009-10-01 16:12:16 -05001451 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001452 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001453 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001454 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001455
pbrookc2f07f82006-04-08 17:14:56 +00001456 addr = cpu_get_phys_page_debug(env, pc);
1457 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001458 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001459 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001460 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001461}
bellardc27004e2005-01-03 23:35:10 +00001462#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001463#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001464
Paul Brookc527ee82010-03-01 03:31:14 +00001465#if defined(CONFIG_USER_ONLY)
1466void cpu_watchpoint_remove_all(CPUState *env, int mask)
1467
1468{
1469}
1470
1471int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags, CPUWatchpoint **watchpoint)
1473{
1474 return -ENOSYS;
1475}
1476#else
pbrook6658ffb2007-03-16 23:58:11 +00001477/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001478int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1479 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001480{
aliguorib4051332008-11-18 20:14:20 +00001481 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001482 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001483
aliguorib4051332008-11-18 20:14:20 +00001484 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1485 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1486 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1487 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1488 return -EINVAL;
1489 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001490 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001493 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001494 wp->flags = flags;
1495
aliguori2dc9f412008-11-18 20:56:59 +00001496 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001497 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001498 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001499 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001500 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001501
pbrook6658ffb2007-03-16 23:58:11 +00001502 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001503
1504 if (watchpoint)
1505 *watchpoint = wp;
1506 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001507}
1508
aliguoria1d1bb32008-11-18 20:07:32 +00001509/* Remove a specific watchpoint. */
1510int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1511 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001512{
aliguorib4051332008-11-18 20:14:20 +00001513 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001514 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001515
Blue Swirl72cf2d42009-09-12 07:36:22 +00001516 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001517 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001518 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001519 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001520 return 0;
1521 }
1522 }
aliguoria1d1bb32008-11-18 20:07:32 +00001523 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001524}
1525
aliguoria1d1bb32008-11-18 20:07:32 +00001526/* Remove a specific watchpoint by reference. */
1527void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1528{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001529 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001530
aliguoria1d1bb32008-11-18 20:07:32 +00001531 tlb_flush_page(env, watchpoint->vaddr);
1532
Anthony Liguori7267c092011-08-20 22:09:37 -05001533 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001534}
1535
aliguoria1d1bb32008-11-18 20:07:32 +00001536/* Remove all matching watchpoints. */
1537void cpu_watchpoint_remove_all(CPUState *env, int mask)
1538{
aliguoric0ce9982008-11-25 22:13:57 +00001539 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001540
Blue Swirl72cf2d42009-09-12 07:36:22 +00001541 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001542 if (wp->flags & mask)
1543 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001544 }
aliguoria1d1bb32008-11-18 20:07:32 +00001545}
Paul Brookc527ee82010-03-01 03:31:14 +00001546#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001547
1548/* Add a breakpoint. */
1549int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1550 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001551{
bellard1fddef42005-04-17 19:16:13 +00001552#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001553 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001554
Anthony Liguori7267c092011-08-20 22:09:37 -05001555 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001556
1557 bp->pc = pc;
1558 bp->flags = flags;
1559
aliguori2dc9f412008-11-18 20:56:59 +00001560 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001561 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001562 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001563 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001565
1566 breakpoint_invalidate(env, pc);
1567
1568 if (breakpoint)
1569 *breakpoint = bp;
1570 return 0;
1571#else
1572 return -ENOSYS;
1573#endif
1574}
1575
1576/* Remove a specific breakpoint. */
1577int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1578{
1579#if defined(TARGET_HAS_ICE)
1580 CPUBreakpoint *bp;
1581
Blue Swirl72cf2d42009-09-12 07:36:22 +00001582 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001583 if (bp->pc == pc && bp->flags == flags) {
1584 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001585 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001586 }
bellard4c3a88a2003-07-26 12:06:08 +00001587 }
aliguoria1d1bb32008-11-18 20:07:32 +00001588 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001589#else
aliguoria1d1bb32008-11-18 20:07:32 +00001590 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001591#endif
1592}
1593
aliguoria1d1bb32008-11-18 20:07:32 +00001594/* Remove a specific breakpoint by reference. */
1595void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001596{
bellard1fddef42005-04-17 19:16:13 +00001597#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001598 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001599
aliguoria1d1bb32008-11-18 20:07:32 +00001600 breakpoint_invalidate(env, breakpoint->pc);
1601
Anthony Liguori7267c092011-08-20 22:09:37 -05001602 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001603#endif
1604}
1605
1606/* Remove all matching breakpoints. */
1607void cpu_breakpoint_remove_all(CPUState *env, int mask)
1608{
1609#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001610 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001611
Blue Swirl72cf2d42009-09-12 07:36:22 +00001612 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001613 if (bp->flags & mask)
1614 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001615 }
bellard4c3a88a2003-07-26 12:06:08 +00001616#endif
1617}
1618
bellardc33a3462003-07-29 20:50:33 +00001619/* enable or disable single step mode. EXCP_DEBUG is returned by the
1620 CPU loop after each instruction */
1621void cpu_single_step(CPUState *env, int enabled)
1622{
bellard1fddef42005-04-17 19:16:13 +00001623#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001624 if (env->singlestep_enabled != enabled) {
1625 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001626 if (kvm_enabled())
1627 kvm_update_guest_debug(env, 0);
1628 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001629 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001630 /* XXX: only flush what is necessary */
1631 tb_flush(env);
1632 }
bellardc33a3462003-07-29 20:50:33 +00001633 }
1634#endif
1635}
1636
bellard34865132003-10-05 14:28:56 +00001637/* enable or disable low levels log */
1638void cpu_set_log(int log_flags)
1639{
1640 loglevel = log_flags;
1641 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001642 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001643 if (!logfile) {
1644 perror(logfilename);
1645 _exit(1);
1646 }
bellard9fa3e852004-01-04 18:06:42 +00001647#if !defined(CONFIG_SOFTMMU)
1648 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1649 {
blueswir1b55266b2008-09-20 08:07:15 +00001650 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001651 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1652 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001653#elif defined(_WIN32)
1654 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1655 setvbuf(logfile, NULL, _IONBF, 0);
1656#else
bellard34865132003-10-05 14:28:56 +00001657 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001658#endif
pbrooke735b912007-06-30 13:53:24 +00001659 log_append = 1;
1660 }
1661 if (!loglevel && logfile) {
1662 fclose(logfile);
1663 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001664 }
1665}
1666
1667void cpu_set_log_filename(const char *filename)
1668{
1669 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001670 if (logfile) {
1671 fclose(logfile);
1672 logfile = NULL;
1673 }
1674 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001675}
bellardc33a3462003-07-29 20:50:33 +00001676
aurel323098dba2009-03-07 21:28:24 +00001677static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001678{
pbrookd5975362008-06-07 20:50:51 +00001679 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1680 problem and hope the cpu will stop of its own accord. For userspace
1681 emulation this often isn't actually as bad as it sounds. Often
1682 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001683 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001684 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001685
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001686 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001687 tb = env->current_tb;
1688 /* if the cpu is currently executing code, we must unlink it and
1689 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001690 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001691 env->current_tb = NULL;
1692 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001693 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001694 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001695}
1696
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001697#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001698/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001699static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001700{
1701 int old_mask;
1702
1703 old_mask = env->interrupt_request;
1704 env->interrupt_request |= mask;
1705
aliguori8edac962009-04-24 18:03:45 +00001706 /*
1707 * If called from iothread context, wake the target cpu in
1708 * case its halted.
1709 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001710 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001711 qemu_cpu_kick(env);
1712 return;
1713 }
aliguori8edac962009-04-24 18:03:45 +00001714
pbrook2e70f6e2008-06-29 01:03:05 +00001715 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001716 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001717 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001718 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001719 cpu_abort(env, "Raised interrupt while not in I/O function");
1720 }
pbrook2e70f6e2008-06-29 01:03:05 +00001721 } else {
aurel323098dba2009-03-07 21:28:24 +00001722 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001723 }
1724}
1725
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001726CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1727
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001728#else /* CONFIG_USER_ONLY */
1729
1730void cpu_interrupt(CPUState *env, int mask)
1731{
1732 env->interrupt_request |= mask;
1733 cpu_unlink_tb(env);
1734}
1735#endif /* CONFIG_USER_ONLY */
1736
bellardb54ad042004-05-20 13:42:52 +00001737void cpu_reset_interrupt(CPUState *env, int mask)
1738{
1739 env->interrupt_request &= ~mask;
1740}
1741
aurel323098dba2009-03-07 21:28:24 +00001742void cpu_exit(CPUState *env)
1743{
1744 env->exit_request = 1;
1745 cpu_unlink_tb(env);
1746}
1747
blueswir1c7cd6a32008-10-02 18:27:46 +00001748const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001749 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001750 "show generated host assembly code for each compiled TB" },
1751 { CPU_LOG_TB_IN_ASM, "in_asm",
1752 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001753 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001754 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001755 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001756 "show micro ops "
1757#ifdef TARGET_I386
1758 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001759#endif
blueswir1e01a1152008-03-14 17:37:11 +00001760 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001761 { CPU_LOG_INT, "int",
1762 "show interrupts/exceptions in short format" },
1763 { CPU_LOG_EXEC, "exec",
1764 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001765 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001766 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001767#ifdef TARGET_I386
1768 { CPU_LOG_PCALL, "pcall",
1769 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001770 { CPU_LOG_RESET, "cpu_reset",
1771 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001772#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001773#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001774 { CPU_LOG_IOPORT, "ioport",
1775 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001776#endif
bellardf193c792004-03-21 17:06:25 +00001777 { 0, NULL, NULL },
1778};
1779
1780static int cmp1(const char *s1, int n, const char *s2)
1781{
1782 if (strlen(s2) != n)
1783 return 0;
1784 return memcmp(s1, s2, n) == 0;
1785}
ths3b46e622007-09-17 08:09:54 +00001786
bellardf193c792004-03-21 17:06:25 +00001787/* takes a comma separated list of log masks. Return 0 if error. */
1788int cpu_str_to_log_mask(const char *str)
1789{
blueswir1c7cd6a32008-10-02 18:27:46 +00001790 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001791 int mask;
1792 const char *p, *p1;
1793
1794 p = str;
1795 mask = 0;
1796 for(;;) {
1797 p1 = strchr(p, ',');
1798 if (!p1)
1799 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001800 if(cmp1(p,p1-p,"all")) {
1801 for(item = cpu_log_items; item->mask != 0; item++) {
1802 mask |= item->mask;
1803 }
1804 } else {
1805 for(item = cpu_log_items; item->mask != 0; item++) {
1806 if (cmp1(p, p1 - p, item->name))
1807 goto found;
1808 }
1809 return 0;
bellardf193c792004-03-21 17:06:25 +00001810 }
bellardf193c792004-03-21 17:06:25 +00001811 found:
1812 mask |= item->mask;
1813 if (*p1 != ',')
1814 break;
1815 p = p1 + 1;
1816 }
1817 return mask;
1818}
bellardea041c02003-06-25 16:16:50 +00001819
bellard75012672003-06-21 13:11:07 +00001820void cpu_abort(CPUState *env, const char *fmt, ...)
1821{
1822 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001823 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001824
1825 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001826 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001827 fprintf(stderr, "qemu: fatal: ");
1828 vfprintf(stderr, fmt, ap);
1829 fprintf(stderr, "\n");
1830#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001831 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1832#else
1833 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001834#endif
aliguori93fcfe32009-01-15 22:34:14 +00001835 if (qemu_log_enabled()) {
1836 qemu_log("qemu: fatal: ");
1837 qemu_log_vprintf(fmt, ap2);
1838 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001839#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001840 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001841#else
aliguori93fcfe32009-01-15 22:34:14 +00001842 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001843#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001844 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001845 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001846 }
pbrook493ae1f2007-11-23 16:53:59 +00001847 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001848 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001849#if defined(CONFIG_USER_ONLY)
1850 {
1851 struct sigaction act;
1852 sigfillset(&act.sa_mask);
1853 act.sa_handler = SIG_DFL;
1854 sigaction(SIGABRT, &act, NULL);
1855 }
1856#endif
bellard75012672003-06-21 13:11:07 +00001857 abort();
1858}
1859
thsc5be9f02007-02-28 20:20:53 +00001860CPUState *cpu_copy(CPUState *env)
1861{
ths01ba9812007-12-09 02:22:57 +00001862 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001863 CPUState *next_cpu = new_env->next_cpu;
1864 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001865#if defined(TARGET_HAS_ICE)
1866 CPUBreakpoint *bp;
1867 CPUWatchpoint *wp;
1868#endif
1869
thsc5be9f02007-02-28 20:20:53 +00001870 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001871
1872 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001873 new_env->next_cpu = next_cpu;
1874 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001875
1876 /* Clone all break/watchpoints.
1877 Note: Once we support ptrace with hw-debug register access, make sure
1878 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001879 QTAILQ_INIT(&env->breakpoints);
1880 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001881#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001882 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001883 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1884 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001885 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001886 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1887 wp->flags, NULL);
1888 }
1889#endif
1890
thsc5be9f02007-02-28 20:20:53 +00001891 return new_env;
1892}
1893
bellard01243112004-01-04 15:48:17 +00001894#if !defined(CONFIG_USER_ONLY)
1895
edgar_igl5c751e92008-05-06 08:44:21 +00001896static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1897{
1898 unsigned int i;
1899
1900 /* Discard jump cache entries for any tb which might potentially
1901 overlap the flushed page. */
1902 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1903 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001904 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001905
1906 i = tb_jmp_cache_hash_page(addr);
1907 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001908 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001909}
1910
Igor Kovalenko08738982009-07-12 02:15:40 +04001911static CPUTLBEntry s_cputlb_empty_entry = {
1912 .addr_read = -1,
1913 .addr_write = -1,
1914 .addr_code = -1,
1915 .addend = -1,
1916};
1917
Peter Maydell771124e2012-01-17 13:23:13 +00001918/* NOTE:
1919 * If flush_global is true (the usual case), flush all tlb entries.
1920 * If flush_global is false, flush (at least) all tlb entries not
1921 * marked global.
1922 *
1923 * Since QEMU doesn't currently implement a global/not-global flag
1924 * for tlb entries, at the moment tlb_flush() will also flush all
1925 * tlb entries in the flush_global == false case. This is OK because
1926 * CPU architectures generally permit an implementation to drop
1927 * entries from the TLB at any time, so flushing more entries than
1928 * required is only an efficiency issue, not a correctness issue.
1929 */
bellardee8b7022004-02-03 23:35:10 +00001930void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001931{
bellard33417e72003-08-10 21:47:01 +00001932 int i;
bellard01243112004-01-04 15:48:17 +00001933
bellard9fa3e852004-01-04 18:06:42 +00001934#if defined(DEBUG_TLB)
1935 printf("tlb_flush:\n");
1936#endif
bellard01243112004-01-04 15:48:17 +00001937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
bellard33417e72003-08-10 21:47:01 +00001941 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001942 int mmu_idx;
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001944 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001945 }
bellard33417e72003-08-10 21:47:01 +00001946 }
bellard9fa3e852004-01-04 18:06:42 +00001947
bellard8a40a182005-11-20 10:35:40 +00001948 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001949
Paul Brookd4c430a2010-03-17 02:14:28 +00001950 env->tlb_flush_addr = -1;
1951 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001952 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001953}
1954
bellard274da6b2004-05-20 21:56:27 +00001955static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001956{
ths5fafdf22007-09-16 21:08:06 +00001957 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001958 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001959 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001960 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001961 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001962 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001963 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001964 }
bellard61382a52003-10-27 21:22:23 +00001965}
1966
bellard2e126692004-04-25 21:28:44 +00001967void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001968{
bellard8a40a182005-11-20 10:35:40 +00001969 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001970 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001971
bellard9fa3e852004-01-04 18:06:42 +00001972#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001973 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001974#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001975 /* Check if we need to flush due to large pages. */
1976 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977#if defined(DEBUG_TLB)
1978 printf("tlb_flush_page: forced full flush ("
1979 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 env->tlb_flush_addr, env->tlb_flush_mask);
1981#endif
1982 tlb_flush(env, 1);
1983 return;
1984 }
bellard01243112004-01-04 15:48:17 +00001985 /* must reset current TB so that interrupts cannot modify the
1986 links while we are modifying them */
1987 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001988
bellard61382a52003-10-27 21:22:23 +00001989 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001990 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001991 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001993
edgar_igl5c751e92008-05-06 08:44:21 +00001994 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001995}
1996
bellard9fa3e852004-01-04 18:06:42 +00001997/* update the TLBs so that writes to code in the virtual page 'addr'
1998 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001999static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002000{
ths5fafdf22007-09-16 21:08:06 +00002001 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002002 ram_addr + TARGET_PAGE_SIZE,
2003 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002004}
2005
bellard9fa3e852004-01-04 18:06:42 +00002006/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002007 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002008static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002009 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002010{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002011 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002012}
2013
ths5fafdf22007-09-16 21:08:06 +00002014static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002015 unsigned long start, unsigned long length)
2016{
2017 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002018 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002019 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002020 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002021 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002022 }
2023 }
2024}
2025
pbrook5579c7f2009-04-11 14:47:08 +00002026/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002027void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002028 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002029{
2030 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002031 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002032 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002033
2034 start &= TARGET_PAGE_MASK;
2035 end = TARGET_PAGE_ALIGN(end);
2036
2037 length = end - start;
2038 if (length == 0)
2039 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002040 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002041
bellard1ccde1c2004-02-06 19:46:14 +00002042 /* we modify the TLB cache so that the dirty bit will be set again
2043 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002044 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002045 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002046 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002047 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002048 != (end - 1) - start) {
2049 abort();
2050 }
2051
bellard6a00d602005-11-21 23:25:50 +00002052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002053 int mmu_idx;
2054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 for(i = 0; i < CPU_TLB_SIZE; i++)
2056 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 start1, length);
2058 }
bellard6a00d602005-11-21 23:25:50 +00002059 }
bellard1ccde1c2004-02-06 19:46:14 +00002060}
2061
aliguori74576192008-10-06 14:02:03 +00002062int cpu_physical_memory_set_dirty_tracking(int enable)
2063{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002064 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002065 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002066 return ret;
aliguori74576192008-10-06 14:02:03 +00002067}
2068
bellard3a7d9292005-08-21 09:26:42 +00002069static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2070{
Anthony Liguoric227f092009-10-01 16:12:16 -05002071 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002072 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002073
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002074 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002075 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2076 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002077 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002078 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002079 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002080 }
2081 }
2082}
2083
2084/* update the TLB according to the current state of the dirty bits */
2085void cpu_tlb_update_dirty(CPUState *env)
2086{
2087 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002088 int mmu_idx;
2089 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2090 for(i = 0; i < CPU_TLB_SIZE; i++)
2091 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2092 }
bellard3a7d9292005-08-21 09:26:42 +00002093}
2094
pbrook0f459d12008-06-09 00:20:13 +00002095static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002096{
pbrook0f459d12008-06-09 00:20:13 +00002097 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2098 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002099}
2100
pbrook0f459d12008-06-09 00:20:13 +00002101/* update the TLB corresponding to virtual page vaddr
2102 so that it is no longer dirty */
2103static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002104{
bellard1ccde1c2004-02-06 19:46:14 +00002105 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002106 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002107
pbrook0f459d12008-06-09 00:20:13 +00002108 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002109 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002110 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2111 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002112}
2113
Paul Brookd4c430a2010-03-17 02:14:28 +00002114/* Our TLB does not support large pages, so remember the area covered by
2115 large pages and trigger a full TLB flush if these are invalidated. */
2116static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2117 target_ulong size)
2118{
2119 target_ulong mask = ~(size - 1);
2120
2121 if (env->tlb_flush_addr == (target_ulong)-1) {
2122 env->tlb_flush_addr = vaddr & mask;
2123 env->tlb_flush_mask = mask;
2124 return;
2125 }
2126 /* Extend the existing region to include the new page.
2127 This is a compromise between unnecessary flushes and the cost
2128 of maintaining a full variable size TLB. */
2129 mask &= env->tlb_flush_mask;
2130 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2131 mask <<= 1;
2132 }
2133 env->tlb_flush_addr &= mask;
2134 env->tlb_flush_mask = mask;
2135}
2136
Avi Kivity1d393fa2012-01-01 21:15:42 +02002137static bool is_ram_rom(ram_addr_t pd)
2138{
2139 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002140 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002141}
2142
Avi Kivity75c578d2012-01-02 15:40:52 +02002143static bool is_romd(ram_addr_t pd)
2144{
2145 MemoryRegion *mr;
2146
2147 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002148 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002149 return mr->rom_device && mr->readable;
2150}
2151
Avi Kivity1d393fa2012-01-01 21:15:42 +02002152static bool is_ram_rom_romd(ram_addr_t pd)
2153{
Avi Kivity75c578d2012-01-02 15:40:52 +02002154 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002155}
2156
Paul Brookd4c430a2010-03-17 02:14:28 +00002157/* Add a new TLB entry. At most one entry for a given virtual address
2158 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2159 supplied size is only used by tlb_flush_page. */
2160void tlb_set_page(CPUState *env, target_ulong vaddr,
2161 target_phys_addr_t paddr, int prot,
2162 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002163{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002164 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002165 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002166 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002167 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002168 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002169 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002170 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002171 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002172 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002173
Paul Brookd4c430a2010-03-17 02:14:28 +00002174 assert(size >= TARGET_PAGE_SIZE);
2175 if (size != TARGET_PAGE_SIZE) {
2176 tlb_add_large_page(env, vaddr, size);
2177 }
bellard92e873b2004-05-21 14:52:29 +00002178 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002179 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002180#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002181 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2182 " prot=%x idx=%d pd=0x%08lx\n",
2183 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002184#endif
2185
pbrook0f459d12008-06-09 00:20:13 +00002186 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002187 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002188 /* IO memory case (romd handled later) */
2189 address |= TLB_MMIO;
2190 }
pbrook5579c7f2009-04-11 14:47:08 +00002191 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002192 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002193 /* Normal RAM. */
2194 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002195 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2196 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002197 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002198 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002199 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002200 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002201 It would be nice to pass an offset from the base address
2202 of that region. This would avoid having to special case RAM,
2203 and avoid full address decoding in every device.
2204 We can't use the high bits of pd for this because
2205 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002206 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002207 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002208 }
pbrook6658ffb2007-03-16 23:58:11 +00002209
pbrook0f459d12008-06-09 00:20:13 +00002210 code_address = address;
2211 /* Make accesses to pages with watchpoints go via the
2212 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002213 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002214 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002215 /* Avoid trapping reads of pages with a write breakpoint. */
2216 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002217 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002218 address |= TLB_MMIO;
2219 break;
2220 }
pbrook6658ffb2007-03-16 23:58:11 +00002221 }
pbrook0f459d12008-06-09 00:20:13 +00002222 }
balrogd79acba2007-06-26 20:01:13 +00002223
pbrook0f459d12008-06-09 00:20:13 +00002224 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2225 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2226 te = &env->tlb_table[mmu_idx][index];
2227 te->addend = addend - vaddr;
2228 if (prot & PAGE_READ) {
2229 te->addr_read = address;
2230 } else {
2231 te->addr_read = -1;
2232 }
edgar_igl5c751e92008-05-06 08:44:21 +00002233
pbrook0f459d12008-06-09 00:20:13 +00002234 if (prot & PAGE_EXEC) {
2235 te->addr_code = code_address;
2236 } else {
2237 te->addr_code = -1;
2238 }
2239 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002240 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002241 /* Write access calls the I/O callback. */
2242 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002243 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002244 !cpu_physical_memory_is_dirty(pd)) {
2245 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002246 } else {
pbrook0f459d12008-06-09 00:20:13 +00002247 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002248 }
pbrook0f459d12008-06-09 00:20:13 +00002249 } else {
2250 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002251 }
bellard9fa3e852004-01-04 18:06:42 +00002252}
2253
bellard01243112004-01-04 15:48:17 +00002254#else
2255
bellardee8b7022004-02-03 23:35:10 +00002256void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002257{
2258}
2259
bellard2e126692004-04-25 21:28:44 +00002260void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002261{
2262}
2263
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002264/*
2265 * Walks guest process memory "regions" one by one
2266 * and calls callback function 'fn' for each region.
2267 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002268
2269struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002270{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002271 walk_memory_regions_fn fn;
2272 void *priv;
2273 unsigned long start;
2274 int prot;
2275};
bellard9fa3e852004-01-04 18:06:42 +00002276
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002277static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002278 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002279{
2280 if (data->start != -1ul) {
2281 int rc = data->fn(data->priv, data->start, end, data->prot);
2282 if (rc != 0) {
2283 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002284 }
bellard33417e72003-08-10 21:47:01 +00002285 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002286
2287 data->start = (new_prot ? end : -1ul);
2288 data->prot = new_prot;
2289
2290 return 0;
2291}
2292
2293static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002294 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295{
Paul Brookb480d9b2010-03-12 23:23:29 +00002296 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002297 int i, rc;
2298
2299 if (*lp == NULL) {
2300 return walk_memory_regions_end(data, base, 0);
2301 }
2302
2303 if (level == 0) {
2304 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002305 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002306 int prot = pd[i].flags;
2307
2308 pa = base | (i << TARGET_PAGE_BITS);
2309 if (prot != data->prot) {
2310 rc = walk_memory_regions_end(data, pa, prot);
2311 if (rc != 0) {
2312 return rc;
2313 }
2314 }
2315 }
2316 } else {
2317 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002318 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002319 pa = base | ((abi_ulong)i <<
2320 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2322 if (rc != 0) {
2323 return rc;
2324 }
2325 }
2326 }
2327
2328 return 0;
2329}
2330
2331int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2332{
2333 struct walk_memory_regions_data data;
2334 unsigned long i;
2335
2336 data.fn = fn;
2337 data.priv = priv;
2338 data.start = -1ul;
2339 data.prot = 0;
2340
2341 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002342 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002343 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2344 if (rc != 0) {
2345 return rc;
2346 }
2347 }
2348
2349 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002350}
2351
Paul Brookb480d9b2010-03-12 23:23:29 +00002352static int dump_region(void *priv, abi_ulong start,
2353 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002354{
2355 FILE *f = (FILE *)priv;
2356
Paul Brookb480d9b2010-03-12 23:23:29 +00002357 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2358 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002359 start, end, end - start,
2360 ((prot & PAGE_READ) ? 'r' : '-'),
2361 ((prot & PAGE_WRITE) ? 'w' : '-'),
2362 ((prot & PAGE_EXEC) ? 'x' : '-'));
2363
2364 return (0);
2365}
2366
2367/* dump memory mappings */
2368void page_dump(FILE *f)
2369{
2370 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2371 "start", "end", "size", "prot");
2372 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002373}
2374
pbrook53a59602006-03-25 19:31:22 +00002375int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002376{
bellard9fa3e852004-01-04 18:06:42 +00002377 PageDesc *p;
2378
2379 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002380 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002381 return 0;
2382 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002383}
2384
Richard Henderson376a7902010-03-10 15:57:04 -08002385/* Modify the flags of a page and invalidate the code if necessary.
2386 The flag PAGE_WRITE_ORG is positioned automatically depending
2387 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002388void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002389{
Richard Henderson376a7902010-03-10 15:57:04 -08002390 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002391
Richard Henderson376a7902010-03-10 15:57:04 -08002392 /* This function should never be called with addresses outside the
2393 guest address space. If this assert fires, it probably indicates
2394 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002395#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2396 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002397#endif
2398 assert(start < end);
2399
bellard9fa3e852004-01-04 18:06:42 +00002400 start = start & TARGET_PAGE_MASK;
2401 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002402
2403 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002404 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002405 }
2406
2407 for (addr = start, len = end - start;
2408 len != 0;
2409 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2410 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2411
2412 /* If the write protection bit is set, then we invalidate
2413 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002414 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002415 (flags & PAGE_WRITE) &&
2416 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002417 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002418 }
2419 p->flags = flags;
2420 }
bellard9fa3e852004-01-04 18:06:42 +00002421}
2422
ths3d97b402007-11-02 19:02:07 +00002423int page_check_range(target_ulong start, target_ulong len, int flags)
2424{
2425 PageDesc *p;
2426 target_ulong end;
2427 target_ulong addr;
2428
Richard Henderson376a7902010-03-10 15:57:04 -08002429 /* This function should never be called with addresses outside the
2430 guest address space. If this assert fires, it probably indicates
2431 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002432#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2433 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002434#endif
2435
Richard Henderson3e0650a2010-03-29 10:54:42 -07002436 if (len == 0) {
2437 return 0;
2438 }
Richard Henderson376a7902010-03-10 15:57:04 -08002439 if (start + len - 1 < start) {
2440 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002441 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002442 }
balrog55f280c2008-10-28 10:24:11 +00002443
ths3d97b402007-11-02 19:02:07 +00002444 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2445 start = start & TARGET_PAGE_MASK;
2446
Richard Henderson376a7902010-03-10 15:57:04 -08002447 for (addr = start, len = end - start;
2448 len != 0;
2449 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002450 p = page_find(addr >> TARGET_PAGE_BITS);
2451 if( !p )
2452 return -1;
2453 if( !(p->flags & PAGE_VALID) )
2454 return -1;
2455
bellarddae32702007-11-14 10:51:00 +00002456 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002457 return -1;
bellarddae32702007-11-14 10:51:00 +00002458 if (flags & PAGE_WRITE) {
2459 if (!(p->flags & PAGE_WRITE_ORG))
2460 return -1;
2461 /* unprotect the page if it was put read-only because it
2462 contains translated code */
2463 if (!(p->flags & PAGE_WRITE)) {
2464 if (!page_unprotect(addr, 0, NULL))
2465 return -1;
2466 }
2467 return 0;
2468 }
ths3d97b402007-11-02 19:02:07 +00002469 }
2470 return 0;
2471}
2472
bellard9fa3e852004-01-04 18:06:42 +00002473/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002474 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002475int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002476{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002477 unsigned int prot;
2478 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002479 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002480
pbrookc8a706f2008-06-02 16:16:42 +00002481 /* Technically this isn't safe inside a signal handler. However we
2482 know this only ever happens in a synchronous SEGV handler, so in
2483 practice it seems to be ok. */
2484 mmap_lock();
2485
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002486 p = page_find(address >> TARGET_PAGE_BITS);
2487 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002488 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002489 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002490 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002491
bellard9fa3e852004-01-04 18:06:42 +00002492 /* if the page was really writable, then we change its
2493 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002494 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2495 host_start = address & qemu_host_page_mask;
2496 host_end = host_start + qemu_host_page_size;
2497
2498 prot = 0;
2499 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2500 p = page_find(addr >> TARGET_PAGE_BITS);
2501 p->flags |= PAGE_WRITE;
2502 prot |= p->flags;
2503
bellard9fa3e852004-01-04 18:06:42 +00002504 /* and since the content will be modified, we must invalidate
2505 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002506 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002507#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002508 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002509#endif
bellard9fa3e852004-01-04 18:06:42 +00002510 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002511 mprotect((void *)g2h(host_start), qemu_host_page_size,
2512 prot & PAGE_BITS);
2513
2514 mmap_unlock();
2515 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002516 }
pbrookc8a706f2008-06-02 16:16:42 +00002517 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002518 return 0;
2519}
2520
bellard6a00d602005-11-21 23:25:50 +00002521static inline void tlb_set_dirty(CPUState *env,
2522 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002523{
2524}
bellard9fa3e852004-01-04 18:06:42 +00002525#endif /* defined(CONFIG_USER_ONLY) */
2526
pbrooke2eef172008-06-08 01:09:01 +00002527#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002528
Paul Brookc04b2b72010-03-01 03:31:14 +00002529#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2530typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002531 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002532 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002533 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002534} subpage_t;
2535
Anthony Liguoric227f092009-10-01 16:12:16 -05002536static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002537 uint16_t section);
2538static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section,
2539 uint16_t orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00002540#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2541 need_subpage) \
2542 do { \
2543 if (addr > start_addr) \
2544 start_addr2 = 0; \
2545 else { \
2546 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2547 if (start_addr2 > 0) \
2548 need_subpage = 1; \
2549 } \
2550 \
blueswir149e9fba2007-05-30 17:25:06 +00002551 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002552 end_addr2 = TARGET_PAGE_SIZE - 1; \
2553 else { \
2554 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2555 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2556 need_subpage = 1; \
2557 } \
2558 } while (0)
2559
Avi Kivity5312bd82012-02-12 18:32:55 +02002560static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002561{
Avi Kivity5312bd82012-02-12 18:32:55 +02002562 MemoryRegionSection *section = &phys_sections[section_index];
2563 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002564
2565 if (mr->subpage) {
2566 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2567 memory_region_destroy(&subpage->iomem);
2568 g_free(subpage);
2569 }
2570}
2571
Avi Kivity4346ae32012-02-10 17:00:01 +02002572static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002573{
2574 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002575 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002576
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002577 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002578 return;
2579 }
2580
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002581 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002582 for (i = 0; i < L2_SIZE; ++i) {
2583 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002584 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002585 } else {
2586 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002587 }
Avi Kivity54688b12012-02-09 17:34:32 +02002588 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002589 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002590}
2591
2592static void destroy_all_mappings(void)
2593{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002594 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002595 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002596}
2597
Avi Kivity5312bd82012-02-12 18:32:55 +02002598static uint16_t phys_section_add(MemoryRegionSection *section)
2599{
2600 if (phys_sections_nb == phys_sections_nb_alloc) {
2601 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2602 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2603 phys_sections_nb_alloc);
2604 }
2605 phys_sections[phys_sections_nb] = *section;
2606 return phys_sections_nb++;
2607}
2608
2609static void phys_sections_clear(void)
2610{
2611 phys_sections_nb = 0;
2612}
2613
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002614/* register physical memory.
2615 For RAM, 'size' must be a multiple of the target page size.
2616 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002617 io memory page. The address used when calling the IO function is
2618 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002619 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002620 before calculating this offset. This should not be a problem unless
2621 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002622void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002623 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002624{
Avi Kivitydd811242012-01-02 12:17:03 +02002625 target_phys_addr_t start_addr = section->offset_within_address_space;
2626 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002627 target_phys_addr_t addr, end_addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002628 uint16_t *p;
bellard9d420372006-06-25 22:25:22 +00002629 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002630 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002631 subpage_t *subpage;
Avi Kivity5312bd82012-02-12 18:32:55 +02002632 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002633
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002634 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002635
bellard5fd386f2004-05-23 21:11:22 +00002636 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002637 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002638
2639 addr = start_addr;
2640 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002641 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity5312bd82012-02-12 18:32:55 +02002642 if (p && *p != phys_section_unassigned) {
2643 uint16_t orig_memory= *p;
Anthony Liguoric227f092009-10-01 16:12:16 -05002644 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002645 int need_subpage = 0;
Avi Kivity5312bd82012-02-12 18:32:55 +02002646 MemoryRegion *mr = phys_sections[orig_memory].mr;
blueswir1db7b5422007-05-26 17:36:03 +00002647
2648 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2649 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002650 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002651 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002652 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity5312bd82012-02-12 18:32:55 +02002653 p, orig_memory);
blueswir1db7b5422007-05-26 17:36:03 +00002654 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002655 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002656 }
Avi Kivity5312bd82012-02-12 18:32:55 +02002657 subpage_register(subpage, start_addr2, end_addr2,
2658 section_index);
blueswir1db7b5422007-05-26 17:36:03 +00002659 } else {
Avi Kivity5312bd82012-02-12 18:32:55 +02002660 *p = section_index;
blueswir1db7b5422007-05-26 17:36:03 +00002661 }
2662 } else {
Avi Kivity5312bd82012-02-12 18:32:55 +02002663 MemoryRegion *mr = section->mr;
blueswir1db7b5422007-05-26 17:36:03 +00002664 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
Avi Kivity5312bd82012-02-12 18:32:55 +02002665 *p = section_index;
2666 if (!(memory_region_is_ram(mr) || mr->rom_device)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002667 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002668 int need_subpage = 0;
2669
2670 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2671 end_addr2, need_subpage);
2672
Richard Hendersonf6405242010-04-22 16:47:31 -07002673 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002674 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity5312bd82012-02-12 18:32:55 +02002675 p, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00002676 subpage_register(subpage, start_addr2, end_addr2,
Avi Kivity5312bd82012-02-12 18:32:55 +02002677 section_index);
blueswir1db7b5422007-05-26 17:36:03 +00002678 }
2679 }
2680 }
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002681 addr += TARGET_PAGE_SIZE;
2682 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002683
bellard9d420372006-06-25 22:25:22 +00002684 /* since each CPU stores ram addresses in its TLB cache, we must
2685 reset the modified entries */
2686 /* XXX: slow ! */
2687 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2688 tlb_flush(env, 1);
2689 }
bellard33417e72003-08-10 21:47:01 +00002690}
2691
Anthony Liguoric227f092009-10-01 16:12:16 -05002692void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002693{
2694 if (kvm_enabled())
2695 kvm_coalesce_mmio_region(addr, size);
2696}
2697
Anthony Liguoric227f092009-10-01 16:12:16 -05002698void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002699{
2700 if (kvm_enabled())
2701 kvm_uncoalesce_mmio_region(addr, size);
2702}
2703
Sheng Yang62a27442010-01-26 19:21:16 +08002704void qemu_flush_coalesced_mmio_buffer(void)
2705{
2706 if (kvm_enabled())
2707 kvm_flush_coalesced_mmio_buffer();
2708}
2709
Marcelo Tosattic9027602010-03-01 20:25:08 -03002710#if defined(__linux__) && !defined(TARGET_S390X)
2711
2712#include <sys/vfs.h>
2713
2714#define HUGETLBFS_MAGIC 0x958458f6
2715
2716static long gethugepagesize(const char *path)
2717{
2718 struct statfs fs;
2719 int ret;
2720
2721 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002722 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002723 } while (ret != 0 && errno == EINTR);
2724
2725 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002726 perror(path);
2727 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002728 }
2729
2730 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002731 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002732
2733 return fs.f_bsize;
2734}
2735
Alex Williamson04b16652010-07-02 11:13:17 -06002736static void *file_ram_alloc(RAMBlock *block,
2737 ram_addr_t memory,
2738 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002739{
2740 char *filename;
2741 void *area;
2742 int fd;
2743#ifdef MAP_POPULATE
2744 int flags;
2745#endif
2746 unsigned long hpagesize;
2747
2748 hpagesize = gethugepagesize(path);
2749 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002750 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002751 }
2752
2753 if (memory < hpagesize) {
2754 return NULL;
2755 }
2756
2757 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2758 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2759 return NULL;
2760 }
2761
2762 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002763 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002764 }
2765
2766 fd = mkstemp(filename);
2767 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002768 perror("unable to create backing store for hugepages");
2769 free(filename);
2770 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002771 }
2772 unlink(filename);
2773 free(filename);
2774
2775 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2776
2777 /*
2778 * ftruncate is not supported by hugetlbfs in older
2779 * hosts, so don't bother bailing out on errors.
2780 * If anything goes wrong with it under other filesystems,
2781 * mmap will fail.
2782 */
2783 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002784 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002785
2786#ifdef MAP_POPULATE
2787 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2788 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2789 * to sidestep this quirk.
2790 */
2791 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2792 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2793#else
2794 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2795#endif
2796 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002797 perror("file_ram_alloc: can't mmap RAM pages");
2798 close(fd);
2799 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002800 }
Alex Williamson04b16652010-07-02 11:13:17 -06002801 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002802 return area;
2803}
2804#endif
2805
Alex Williamsond17b5282010-06-25 11:08:38 -06002806static ram_addr_t find_ram_offset(ram_addr_t size)
2807{
Alex Williamson04b16652010-07-02 11:13:17 -06002808 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002809 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002810
2811 if (QLIST_EMPTY(&ram_list.blocks))
2812 return 0;
2813
2814 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002815 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002816
2817 end = block->offset + block->length;
2818
2819 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2820 if (next_block->offset >= end) {
2821 next = MIN(next, next_block->offset);
2822 }
2823 }
2824 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002825 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002826 mingap = next - end;
2827 }
2828 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002829
2830 if (offset == RAM_ADDR_MAX) {
2831 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2832 (uint64_t)size);
2833 abort();
2834 }
2835
Alex Williamson04b16652010-07-02 11:13:17 -06002836 return offset;
2837}
2838
2839static ram_addr_t last_ram_offset(void)
2840{
Alex Williamsond17b5282010-06-25 11:08:38 -06002841 RAMBlock *block;
2842 ram_addr_t last = 0;
2843
2844 QLIST_FOREACH(block, &ram_list.blocks, next)
2845 last = MAX(last, block->offset + block->length);
2846
2847 return last;
2848}
2849
Avi Kivityc5705a72011-12-20 15:59:12 +02002850void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002851{
2852 RAMBlock *new_block, *block;
2853
Avi Kivityc5705a72011-12-20 15:59:12 +02002854 new_block = NULL;
2855 QLIST_FOREACH(block, &ram_list.blocks, next) {
2856 if (block->offset == addr) {
2857 new_block = block;
2858 break;
2859 }
2860 }
2861 assert(new_block);
2862 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002863
2864 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2865 char *id = dev->parent_bus->info->get_dev_path(dev);
2866 if (id) {
2867 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002868 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002869 }
2870 }
2871 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2872
2873 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002874 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002875 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2876 new_block->idstr);
2877 abort();
2878 }
2879 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002880}
2881
2882ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2883 MemoryRegion *mr)
2884{
2885 RAMBlock *new_block;
2886
2887 size = TARGET_PAGE_ALIGN(size);
2888 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002889
Avi Kivity7c637362011-12-21 13:09:49 +02002890 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002891 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002892 if (host) {
2893 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002894 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002895 } else {
2896 if (mem_path) {
2897#if defined (__linux__) && !defined(TARGET_S390X)
2898 new_block->host = file_ram_alloc(new_block, size, mem_path);
2899 if (!new_block->host) {
2900 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002901 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002902 }
2903#else
2904 fprintf(stderr, "-mem-path option unsupported\n");
2905 exit(1);
2906#endif
2907 } else {
2908#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002909 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2910 an system defined value, which is at least 256GB. Larger systems
2911 have larger values. We put the guest between the end of data
2912 segment (system break) and this value. We use 32GB as a base to
2913 have enough room for the system break to grow. */
2914 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002915 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002916 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002917 if (new_block->host == MAP_FAILED) {
2918 fprintf(stderr, "Allocating RAM failed\n");
2919 abort();
2920 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002921#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002922 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002923 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002924 } else {
2925 new_block->host = qemu_vmalloc(size);
2926 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002927#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002928 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002929 }
2930 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002931 new_block->length = size;
2932
2933 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2934
Anthony Liguori7267c092011-08-20 22:09:37 -05002935 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002936 last_ram_offset() >> TARGET_PAGE_BITS);
2937 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2938 0xff, size >> TARGET_PAGE_BITS);
2939
2940 if (kvm_enabled())
2941 kvm_setup_guest_memory(new_block->host, size);
2942
2943 return new_block->offset;
2944}
2945
Avi Kivityc5705a72011-12-20 15:59:12 +02002946ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002947{
Avi Kivityc5705a72011-12-20 15:59:12 +02002948 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002949}
bellarde9a1ab12007-02-08 23:08:38 +00002950
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002951void qemu_ram_free_from_ptr(ram_addr_t addr)
2952{
2953 RAMBlock *block;
2954
2955 QLIST_FOREACH(block, &ram_list.blocks, next) {
2956 if (addr == block->offset) {
2957 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002958 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002959 return;
2960 }
2961 }
2962}
2963
Anthony Liguoric227f092009-10-01 16:12:16 -05002964void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002965{
Alex Williamson04b16652010-07-02 11:13:17 -06002966 RAMBlock *block;
2967
2968 QLIST_FOREACH(block, &ram_list.blocks, next) {
2969 if (addr == block->offset) {
2970 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002971 if (block->flags & RAM_PREALLOC_MASK) {
2972 ;
2973 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002974#if defined (__linux__) && !defined(TARGET_S390X)
2975 if (block->fd) {
2976 munmap(block->host, block->length);
2977 close(block->fd);
2978 } else {
2979 qemu_vfree(block->host);
2980 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002981#else
2982 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002983#endif
2984 } else {
2985#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2986 munmap(block->host, block->length);
2987#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002988 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002989 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002990 } else {
2991 qemu_vfree(block->host);
2992 }
Alex Williamson04b16652010-07-02 11:13:17 -06002993#endif
2994 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002995 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002996 return;
2997 }
2998 }
2999
bellarde9a1ab12007-02-08 23:08:38 +00003000}
3001
Huang Yingcd19cfa2011-03-02 08:56:19 +01003002#ifndef _WIN32
3003void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3004{
3005 RAMBlock *block;
3006 ram_addr_t offset;
3007 int flags;
3008 void *area, *vaddr;
3009
3010 QLIST_FOREACH(block, &ram_list.blocks, next) {
3011 offset = addr - block->offset;
3012 if (offset < block->length) {
3013 vaddr = block->host + offset;
3014 if (block->flags & RAM_PREALLOC_MASK) {
3015 ;
3016 } else {
3017 flags = MAP_FIXED;
3018 munmap(vaddr, length);
3019 if (mem_path) {
3020#if defined(__linux__) && !defined(TARGET_S390X)
3021 if (block->fd) {
3022#ifdef MAP_POPULATE
3023 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3024 MAP_PRIVATE;
3025#else
3026 flags |= MAP_PRIVATE;
3027#endif
3028 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3029 flags, block->fd, offset);
3030 } else {
3031 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3032 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3033 flags, -1, 0);
3034 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003035#else
3036 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003037#endif
3038 } else {
3039#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3040 flags |= MAP_SHARED | MAP_ANONYMOUS;
3041 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3042 flags, -1, 0);
3043#else
3044 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3045 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3046 flags, -1, 0);
3047#endif
3048 }
3049 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003050 fprintf(stderr, "Could not remap addr: "
3051 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003052 length, addr);
3053 exit(1);
3054 }
3055 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3056 }
3057 return;
3058 }
3059 }
3060}
3061#endif /* !_WIN32 */
3062
pbrookdc828ca2009-04-09 22:21:07 +00003063/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003064 With the exception of the softmmu code in this file, this should
3065 only be used for local memory (e.g. video ram) that the device owns,
3066 and knows it isn't going to access beyond the end of the block.
3067
3068 It should not be used for general purpose DMA.
3069 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3070 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003071void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003072{
pbrook94a6b542009-04-11 17:15:54 +00003073 RAMBlock *block;
3074
Alex Williamsonf471a172010-06-11 11:11:42 -06003075 QLIST_FOREACH(block, &ram_list.blocks, next) {
3076 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003077 /* Move this entry to to start of the list. */
3078 if (block != QLIST_FIRST(&ram_list.blocks)) {
3079 QLIST_REMOVE(block, next);
3080 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3081 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003082 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003083 /* We need to check if the requested address is in the RAM
3084 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003085 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003086 */
3087 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003088 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003089 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003090 block->host =
3091 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003092 }
3093 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003094 return block->host + (addr - block->offset);
3095 }
pbrook94a6b542009-04-11 17:15:54 +00003096 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003097
3098 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3099 abort();
3100
3101 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003102}
3103
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003104/* Return a host pointer to ram allocated with qemu_ram_alloc.
3105 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3106 */
3107void *qemu_safe_ram_ptr(ram_addr_t addr)
3108{
3109 RAMBlock *block;
3110
3111 QLIST_FOREACH(block, &ram_list.blocks, next) {
3112 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003113 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003114 /* We need to check if the requested address is in the RAM
3115 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003116 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003117 */
3118 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003119 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003120 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003121 block->host =
3122 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003123 }
3124 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003125 return block->host + (addr - block->offset);
3126 }
3127 }
3128
3129 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3130 abort();
3131
3132 return NULL;
3133}
3134
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003135/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3136 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003137void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003138{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003139 if (*size == 0) {
3140 return NULL;
3141 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003142 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003143 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003144 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003145 RAMBlock *block;
3146
3147 QLIST_FOREACH(block, &ram_list.blocks, next) {
3148 if (addr - block->offset < block->length) {
3149 if (addr - block->offset + *size > block->length)
3150 *size = block->length - addr + block->offset;
3151 return block->host + (addr - block->offset);
3152 }
3153 }
3154
3155 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3156 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003157 }
3158}
3159
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003160void qemu_put_ram_ptr(void *addr)
3161{
3162 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003163}
3164
Marcelo Tosattie8902612010-10-11 15:31:19 -03003165int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003166{
pbrook94a6b542009-04-11 17:15:54 +00003167 RAMBlock *block;
3168 uint8_t *host = ptr;
3169
Jan Kiszka868bb332011-06-21 22:59:09 +02003170 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003171 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003172 return 0;
3173 }
3174
Alex Williamsonf471a172010-06-11 11:11:42 -06003175 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003176 /* This case append when the block is not mapped. */
3177 if (block->host == NULL) {
3178 continue;
3179 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003180 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003181 *ram_addr = block->offset + (host - block->host);
3182 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003183 }
pbrook94a6b542009-04-11 17:15:54 +00003184 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003185
Marcelo Tosattie8902612010-10-11 15:31:19 -03003186 return -1;
3187}
Alex Williamsonf471a172010-06-11 11:11:42 -06003188
Marcelo Tosattie8902612010-10-11 15:31:19 -03003189/* Some of the softmmu routines need to translate from a host pointer
3190 (typically a TLB entry) back to a ram offset. */
3191ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3192{
3193 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003194
Marcelo Tosattie8902612010-10-11 15:31:19 -03003195 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3196 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3197 abort();
3198 }
3199 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003200}
3201
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003202static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3203 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003204{
pbrook67d3b952006-12-18 05:03:52 +00003205#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003206 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003207#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003208#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003209 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003210#endif
3211 return 0;
3212}
3213
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003214static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3215 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003216{
3217#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003218 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003219#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003220#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003221 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003222#endif
3223}
3224
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003225static const MemoryRegionOps unassigned_mem_ops = {
3226 .read = unassigned_mem_read,
3227 .write = unassigned_mem_write,
3228 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003229};
3230
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003231static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3232 unsigned size)
3233{
3234 abort();
3235}
3236
3237static void error_mem_write(void *opaque, target_phys_addr_t addr,
3238 uint64_t value, unsigned size)
3239{
3240 abort();
3241}
3242
3243static const MemoryRegionOps error_mem_ops = {
3244 .read = error_mem_read,
3245 .write = error_mem_write,
3246 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003247};
3248
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003249static const MemoryRegionOps rom_mem_ops = {
3250 .read = error_mem_read,
3251 .write = unassigned_mem_write,
3252 .endianness = DEVICE_NATIVE_ENDIAN,
3253};
3254
3255static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3256 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003257{
bellard3a7d9292005-08-21 09:26:42 +00003258 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003259 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003260 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3261#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003262 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003263 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003264#endif
3265 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003266 switch (size) {
3267 case 1:
3268 stb_p(qemu_get_ram_ptr(ram_addr), val);
3269 break;
3270 case 2:
3271 stw_p(qemu_get_ram_ptr(ram_addr), val);
3272 break;
3273 case 4:
3274 stl_p(qemu_get_ram_ptr(ram_addr), val);
3275 break;
3276 default:
3277 abort();
3278 }
bellardf23db162005-08-21 19:12:28 +00003279 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003280 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003281 /* we remove the notdirty callback only if the code has been
3282 flushed */
3283 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003284 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003285}
3286
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003287static const MemoryRegionOps notdirty_mem_ops = {
3288 .read = error_mem_read,
3289 .write = notdirty_mem_write,
3290 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003291};
3292
pbrook0f459d12008-06-09 00:20:13 +00003293/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003294static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003295{
3296 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003297 target_ulong pc, cs_base;
3298 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003299 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003300 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003301 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003302
aliguori06d55cc2008-11-18 20:24:06 +00003303 if (env->watchpoint_hit) {
3304 /* We re-entered the check after replacing the TB. Now raise
3305 * the debug interrupt so that is will trigger after the
3306 * current instruction. */
3307 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3308 return;
3309 }
pbrook2e70f6e2008-06-29 01:03:05 +00003310 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003311 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003312 if ((vaddr == (wp->vaddr & len_mask) ||
3313 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003314 wp->flags |= BP_WATCHPOINT_HIT;
3315 if (!env->watchpoint_hit) {
3316 env->watchpoint_hit = wp;
3317 tb = tb_find_pc(env->mem_io_pc);
3318 if (!tb) {
3319 cpu_abort(env, "check_watchpoint: could not find TB for "
3320 "pc=%p", (void *)env->mem_io_pc);
3321 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003322 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003323 tb_phys_invalidate(tb, -1);
3324 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3325 env->exception_index = EXCP_DEBUG;
3326 } else {
3327 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3328 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3329 }
3330 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003331 }
aliguori6e140f22008-11-18 20:37:55 +00003332 } else {
3333 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003334 }
3335 }
3336}
3337
pbrook6658ffb2007-03-16 23:58:11 +00003338/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3339 so these check for a hit then pass through to the normal out-of-line
3340 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003341static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3342 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003343{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003344 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3345 switch (size) {
3346 case 1: return ldub_phys(addr);
3347 case 2: return lduw_phys(addr);
3348 case 4: return ldl_phys(addr);
3349 default: abort();
3350 }
pbrook6658ffb2007-03-16 23:58:11 +00003351}
3352
Avi Kivity1ec9b902012-01-02 12:47:48 +02003353static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3354 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003355{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003356 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3357 switch (size) {
3358 case 1: stb_phys(addr, val);
3359 case 2: stw_phys(addr, val);
3360 case 4: stl_phys(addr, val);
3361 default: abort();
3362 }
pbrook6658ffb2007-03-16 23:58:11 +00003363}
3364
Avi Kivity1ec9b902012-01-02 12:47:48 +02003365static const MemoryRegionOps watch_mem_ops = {
3366 .read = watch_mem_read,
3367 .write = watch_mem_write,
3368 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003369};
pbrook6658ffb2007-03-16 23:58:11 +00003370
Avi Kivity70c68e42012-01-02 12:32:48 +02003371static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3372 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003373{
Avi Kivity70c68e42012-01-02 12:32:48 +02003374 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003375 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003376 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003377#if defined(DEBUG_SUBPAGE)
3378 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3379 mmio, len, addr, idx);
3380#endif
blueswir1db7b5422007-05-26 17:36:03 +00003381
Avi Kivity5312bd82012-02-12 18:32:55 +02003382 section = &phys_sections[mmio->sub_section[idx]];
3383 addr += mmio->base;
3384 addr -= section->offset_within_address_space;
3385 addr += section->offset_within_region;
3386 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003387}
3388
Avi Kivity70c68e42012-01-02 12:32:48 +02003389static void subpage_write(void *opaque, target_phys_addr_t addr,
3390 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003391{
Avi Kivity70c68e42012-01-02 12:32:48 +02003392 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003393 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003394 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003395#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003396 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3397 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003398 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003399#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003400
Avi Kivity5312bd82012-02-12 18:32:55 +02003401 section = &phys_sections[mmio->sub_section[idx]];
3402 addr += mmio->base;
3403 addr -= section->offset_within_address_space;
3404 addr += section->offset_within_region;
3405 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003406}
3407
Avi Kivity70c68e42012-01-02 12:32:48 +02003408static const MemoryRegionOps subpage_ops = {
3409 .read = subpage_read,
3410 .write = subpage_write,
3411 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003412};
3413
Avi Kivityde712f92012-01-02 12:41:07 +02003414static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3415 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003416{
3417 ram_addr_t raddr = addr;
3418 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003419 switch (size) {
3420 case 1: return ldub_p(ptr);
3421 case 2: return lduw_p(ptr);
3422 case 4: return ldl_p(ptr);
3423 default: abort();
3424 }
Andreas Färber56384e82011-11-30 16:26:21 +01003425}
3426
Avi Kivityde712f92012-01-02 12:41:07 +02003427static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3428 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003429{
3430 ram_addr_t raddr = addr;
3431 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003432 switch (size) {
3433 case 1: return stb_p(ptr, value);
3434 case 2: return stw_p(ptr, value);
3435 case 4: return stl_p(ptr, value);
3436 default: abort();
3437 }
Andreas Färber56384e82011-11-30 16:26:21 +01003438}
3439
Avi Kivityde712f92012-01-02 12:41:07 +02003440static const MemoryRegionOps subpage_ram_ops = {
3441 .read = subpage_ram_read,
3442 .write = subpage_ram_write,
3443 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003444};
3445
Anthony Liguoric227f092009-10-01 16:12:16 -05003446static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003447 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003448{
3449 int idx, eidx;
3450
3451 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3452 return -1;
3453 idx = SUBPAGE_IDX(start);
3454 eidx = SUBPAGE_IDX(end);
3455#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003456 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003457 mmio, start, end, idx, eidx, memory);
3458#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003459 if (memory_region_is_ram(phys_sections[section].mr)) {
3460 MemoryRegionSection new_section = phys_sections[section];
3461 new_section.mr = &io_mem_subpage_ram;
3462 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003463 }
blueswir1db7b5422007-05-26 17:36:03 +00003464 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003465 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003466 }
3467
3468 return 0;
3469}
3470
Avi Kivity5312bd82012-02-12 18:32:55 +02003471static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section_ind,
3472 uint16_t orig_section)
blueswir1db7b5422007-05-26 17:36:03 +00003473{
Anthony Liguoric227f092009-10-01 16:12:16 -05003474 subpage_t *mmio;
Avi Kivity5312bd82012-02-12 18:32:55 +02003475 MemoryRegionSection section = {
3476 .offset_within_address_space = base,
3477 .size = TARGET_PAGE_SIZE,
3478 };
blueswir1db7b5422007-05-26 17:36:03 +00003479
Anthony Liguori7267c092011-08-20 22:09:37 -05003480 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003481
3482 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003483 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3484 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003485 mmio->iomem.subpage = true;
Avi Kivity5312bd82012-02-12 18:32:55 +02003486 section.mr = &mmio->iomem;
blueswir1db7b5422007-05-26 17:36:03 +00003487#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003488 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3489 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003490#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003491 *section_ind = phys_section_add(&section);
3492 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00003493
3494 return mmio;
3495}
3496
aliguori88715652009-02-11 15:20:58 +00003497static int get_free_io_mem_idx(void)
3498{
3499 int i;
3500
3501 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3502 if (!io_mem_used[i]) {
3503 io_mem_used[i] = 1;
3504 return i;
3505 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003506 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003507 return -1;
3508}
3509
bellard33417e72003-08-10 21:47:01 +00003510/* mem_read and mem_write are arrays of functions containing the
3511 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003512 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003513 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003514 modified. If it is zero, a new io zone is allocated. The return
3515 value can be used with cpu_register_physical_memory(). (-1) is
3516 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003517static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003518{
bellard33417e72003-08-10 21:47:01 +00003519 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003520 io_index = get_free_io_mem_idx();
3521 if (io_index == -1)
3522 return io_index;
bellard33417e72003-08-10 21:47:01 +00003523 } else {
3524 if (io_index >= IO_MEM_NB_ENTRIES)
3525 return -1;
3526 }
bellardb5ff1b32005-11-26 10:38:39 +00003527
Avi Kivitya621f382012-01-02 13:12:08 +02003528 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003529
Avi Kivity11c7ef02012-01-02 17:21:07 +02003530 return io_index;
bellard33417e72003-08-10 21:47:01 +00003531}
bellard61382a52003-10-27 21:22:23 +00003532
Avi Kivitya621f382012-01-02 13:12:08 +02003533int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003534{
Avi Kivitya621f382012-01-02 13:12:08 +02003535 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003536}
3537
Avi Kivity11c7ef02012-01-02 17:21:07 +02003538void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003539{
Avi Kivitya621f382012-01-02 13:12:08 +02003540 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003541 io_mem_used[io_index] = 0;
3542}
3543
Avi Kivity5312bd82012-02-12 18:32:55 +02003544static uint16_t dummy_section(MemoryRegion *mr)
3545{
3546 MemoryRegionSection section = {
3547 .mr = mr,
3548 .offset_within_address_space = 0,
3549 .offset_within_region = 0,
3550 .size = UINT64_MAX,
3551 };
3552
3553 return phys_section_add(&section);
3554}
3555
Avi Kivitye9179ce2009-06-14 11:38:52 +03003556static void io_mem_init(void)
3557{
3558 int i;
3559
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003560 /* Must be first: */
3561 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3562 assert(io_mem_ram.ram_addr == 0);
3563 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3564 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3565 "unassigned", UINT64_MAX);
3566 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3567 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003568 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3569 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003570 for (i=0; i<5; i++)
3571 io_mem_used[i] = 1;
3572
Avi Kivity1ec9b902012-01-02 12:47:48 +02003573 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3574 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003575}
3576
Avi Kivity50c1e142012-02-08 21:36:02 +02003577static void core_begin(MemoryListener *listener)
3578{
Avi Kivity54688b12012-02-09 17:34:32 +02003579 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003580 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003581 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003582 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003583}
3584
3585static void core_commit(MemoryListener *listener)
3586{
3587}
3588
Avi Kivity93632742012-02-08 16:54:16 +02003589static void core_region_add(MemoryListener *listener,
3590 MemoryRegionSection *section)
3591{
Avi Kivity4855d412012-02-08 21:16:05 +02003592 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003593}
3594
3595static void core_region_del(MemoryListener *listener,
3596 MemoryRegionSection *section)
3597{
Avi Kivity93632742012-02-08 16:54:16 +02003598}
3599
Avi Kivity50c1e142012-02-08 21:36:02 +02003600static void core_region_nop(MemoryListener *listener,
3601 MemoryRegionSection *section)
3602{
Avi Kivity54688b12012-02-09 17:34:32 +02003603 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003604}
3605
Avi Kivity93632742012-02-08 16:54:16 +02003606static void core_log_start(MemoryListener *listener,
3607 MemoryRegionSection *section)
3608{
3609}
3610
3611static void core_log_stop(MemoryListener *listener,
3612 MemoryRegionSection *section)
3613{
3614}
3615
3616static void core_log_sync(MemoryListener *listener,
3617 MemoryRegionSection *section)
3618{
3619}
3620
3621static void core_log_global_start(MemoryListener *listener)
3622{
3623 cpu_physical_memory_set_dirty_tracking(1);
3624}
3625
3626static void core_log_global_stop(MemoryListener *listener)
3627{
3628 cpu_physical_memory_set_dirty_tracking(0);
3629}
3630
3631static void core_eventfd_add(MemoryListener *listener,
3632 MemoryRegionSection *section,
3633 bool match_data, uint64_t data, int fd)
3634{
3635}
3636
3637static void core_eventfd_del(MemoryListener *listener,
3638 MemoryRegionSection *section,
3639 bool match_data, uint64_t data, int fd)
3640{
3641}
3642
Avi Kivity50c1e142012-02-08 21:36:02 +02003643static void io_begin(MemoryListener *listener)
3644{
3645}
3646
3647static void io_commit(MemoryListener *listener)
3648{
3649}
3650
Avi Kivity4855d412012-02-08 21:16:05 +02003651static void io_region_add(MemoryListener *listener,
3652 MemoryRegionSection *section)
3653{
3654 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3655 section->offset_within_address_space, section->size);
3656 ioport_register(&section->mr->iorange);
3657}
3658
3659static void io_region_del(MemoryListener *listener,
3660 MemoryRegionSection *section)
3661{
3662 isa_unassign_ioport(section->offset_within_address_space, section->size);
3663}
3664
Avi Kivity50c1e142012-02-08 21:36:02 +02003665static void io_region_nop(MemoryListener *listener,
3666 MemoryRegionSection *section)
3667{
3668}
3669
Avi Kivity4855d412012-02-08 21:16:05 +02003670static void io_log_start(MemoryListener *listener,
3671 MemoryRegionSection *section)
3672{
3673}
3674
3675static void io_log_stop(MemoryListener *listener,
3676 MemoryRegionSection *section)
3677{
3678}
3679
3680static void io_log_sync(MemoryListener *listener,
3681 MemoryRegionSection *section)
3682{
3683}
3684
3685static void io_log_global_start(MemoryListener *listener)
3686{
3687}
3688
3689static void io_log_global_stop(MemoryListener *listener)
3690{
3691}
3692
3693static void io_eventfd_add(MemoryListener *listener,
3694 MemoryRegionSection *section,
3695 bool match_data, uint64_t data, int fd)
3696{
3697}
3698
3699static void io_eventfd_del(MemoryListener *listener,
3700 MemoryRegionSection *section,
3701 bool match_data, uint64_t data, int fd)
3702{
3703}
3704
Avi Kivity93632742012-02-08 16:54:16 +02003705static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003706 .begin = core_begin,
3707 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003708 .region_add = core_region_add,
3709 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003710 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003711 .log_start = core_log_start,
3712 .log_stop = core_log_stop,
3713 .log_sync = core_log_sync,
3714 .log_global_start = core_log_global_start,
3715 .log_global_stop = core_log_global_stop,
3716 .eventfd_add = core_eventfd_add,
3717 .eventfd_del = core_eventfd_del,
3718 .priority = 0,
3719};
3720
Avi Kivity4855d412012-02-08 21:16:05 +02003721static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003722 .begin = io_begin,
3723 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003724 .region_add = io_region_add,
3725 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003726 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003727 .log_start = io_log_start,
3728 .log_stop = io_log_stop,
3729 .log_sync = io_log_sync,
3730 .log_global_start = io_log_global_start,
3731 .log_global_stop = io_log_global_stop,
3732 .eventfd_add = io_eventfd_add,
3733 .eventfd_del = io_eventfd_del,
3734 .priority = 0,
3735};
3736
Avi Kivity62152b82011-07-26 14:26:14 +03003737static void memory_map_init(void)
3738{
Anthony Liguori7267c092011-08-20 22:09:37 -05003739 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003740 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003741 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003742
Anthony Liguori7267c092011-08-20 22:09:37 -05003743 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003744 memory_region_init(system_io, "io", 65536);
3745 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003746
Avi Kivity4855d412012-02-08 21:16:05 +02003747 memory_listener_register(&core_memory_listener, system_memory);
3748 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003749}
3750
3751MemoryRegion *get_system_memory(void)
3752{
3753 return system_memory;
3754}
3755
Avi Kivity309cb472011-08-08 16:09:03 +03003756MemoryRegion *get_system_io(void)
3757{
3758 return system_io;
3759}
3760
pbrooke2eef172008-06-08 01:09:01 +00003761#endif /* !defined(CONFIG_USER_ONLY) */
3762
bellard13eb76e2004-01-24 15:23:36 +00003763/* physical memory access (slow version, mainly for debug) */
3764#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003765int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3766 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003767{
3768 int l, flags;
3769 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003770 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003771
3772 while (len > 0) {
3773 page = addr & TARGET_PAGE_MASK;
3774 l = (page + TARGET_PAGE_SIZE) - addr;
3775 if (l > len)
3776 l = len;
3777 flags = page_get_flags(page);
3778 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003779 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003780 if (is_write) {
3781 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003782 return -1;
bellard579a97f2007-11-11 14:26:47 +00003783 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003784 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003785 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003786 memcpy(p, buf, l);
3787 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003788 } else {
3789 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003790 return -1;
bellard579a97f2007-11-11 14:26:47 +00003791 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003792 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003793 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003794 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003795 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003796 }
3797 len -= l;
3798 buf += l;
3799 addr += l;
3800 }
Paul Brooka68fe892010-03-01 00:08:59 +00003801 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003802}
bellard8df1cd02005-01-28 22:37:22 +00003803
bellard13eb76e2004-01-24 15:23:36 +00003804#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003805void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003806 int len, int is_write)
3807{
3808 int l, io_index;
3809 uint8_t *ptr;
3810 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003811 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003812 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003813 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003814
bellard13eb76e2004-01-24 15:23:36 +00003815 while (len > 0) {
3816 page = addr & TARGET_PAGE_MASK;
3817 l = (page + TARGET_PAGE_SIZE) - addr;
3818 if (l > len)
3819 l = len;
bellard92e873b2004-05-21 14:52:29 +00003820 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003821 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003822
bellard13eb76e2004-01-24 15:23:36 +00003823 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003824 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003825 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003826 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003827 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003828 /* XXX: could force cpu_single_env to NULL to avoid
3829 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003830 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003831 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003832 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003833 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003834 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003835 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003836 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003837 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003838 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003839 l = 2;
3840 } else {
bellard1c213d12005-09-03 10:49:04 +00003841 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003842 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003843 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003844 l = 1;
3845 }
3846 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003847 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003848 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003849 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003850 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003851 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003852 if (!cpu_physical_memory_is_dirty(addr1)) {
3853 /* invalidate code */
3854 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3855 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003856 cpu_physical_memory_set_dirty_flags(
3857 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003858 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003859 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003860 }
3861 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003862 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003863 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003864 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003865 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003866 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003867 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003868 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003869 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003870 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003871 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003872 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003873 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003874 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003875 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003876 l = 2;
3877 } else {
bellard1c213d12005-09-03 10:49:04 +00003878 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003879 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003880 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003881 l = 1;
3882 }
3883 } else {
3884 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003885 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3886 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3887 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003888 }
3889 }
3890 len -= l;
3891 buf += l;
3892 addr += l;
3893 }
3894}
bellard8df1cd02005-01-28 22:37:22 +00003895
bellardd0ecd2a2006-04-23 17:14:48 +00003896/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003897void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003898 const uint8_t *buf, int len)
3899{
3900 int l;
3901 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003902 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003903 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003904 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003905
bellardd0ecd2a2006-04-23 17:14:48 +00003906 while (len > 0) {
3907 page = addr & TARGET_PAGE_MASK;
3908 l = (page + TARGET_PAGE_SIZE) - addr;
3909 if (l > len)
3910 l = len;
3911 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003912 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003913
Avi Kivity1d393fa2012-01-01 21:15:42 +02003914 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003915 /* do nothing */
3916 } else {
3917 unsigned long addr1;
3918 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3919 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003920 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003921 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003922 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003923 }
3924 len -= l;
3925 buf += l;
3926 addr += l;
3927 }
3928}
3929
aliguori6d16c2f2009-01-22 16:59:11 +00003930typedef struct {
3931 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003932 target_phys_addr_t addr;
3933 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003934} BounceBuffer;
3935
3936static BounceBuffer bounce;
3937
aliguoriba223c22009-01-22 16:59:16 +00003938typedef struct MapClient {
3939 void *opaque;
3940 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003941 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003942} MapClient;
3943
Blue Swirl72cf2d42009-09-12 07:36:22 +00003944static QLIST_HEAD(map_client_list, MapClient) map_client_list
3945 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003946
3947void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3948{
Anthony Liguori7267c092011-08-20 22:09:37 -05003949 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003950
3951 client->opaque = opaque;
3952 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003953 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003954 return client;
3955}
3956
3957void cpu_unregister_map_client(void *_client)
3958{
3959 MapClient *client = (MapClient *)_client;
3960
Blue Swirl72cf2d42009-09-12 07:36:22 +00003961 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003962 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003963}
3964
3965static void cpu_notify_map_clients(void)
3966{
3967 MapClient *client;
3968
Blue Swirl72cf2d42009-09-12 07:36:22 +00003969 while (!QLIST_EMPTY(&map_client_list)) {
3970 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003971 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003972 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003973 }
3974}
3975
aliguori6d16c2f2009-01-22 16:59:11 +00003976/* Map a physical memory region into a host virtual address.
3977 * May map a subset of the requested range, given by and returned in *plen.
3978 * May return NULL if resources needed to perform the mapping are exhausted.
3979 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003980 * Use cpu_register_map_client() to know when retrying the map operation is
3981 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003982 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003983void *cpu_physical_memory_map(target_phys_addr_t addr,
3984 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003985 int is_write)
3986{
Anthony Liguoric227f092009-10-01 16:12:16 -05003987 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003988 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003989 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003990 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003991 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003992 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003993 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003994 ram_addr_t rlen;
3995 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003996
3997 while (len > 0) {
3998 page = addr & TARGET_PAGE_MASK;
3999 l = (page + TARGET_PAGE_SIZE) - addr;
4000 if (l > len)
4001 l = len;
4002 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004003 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00004004
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004005 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004006 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004007 break;
4008 }
4009 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4010 bounce.addr = addr;
4011 bounce.len = l;
4012 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004013 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004014 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004015
4016 *plen = l;
4017 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004018 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004019 if (!todo) {
4020 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4021 }
aliguori6d16c2f2009-01-22 16:59:11 +00004022
4023 len -= l;
4024 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004025 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004026 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004027 rlen = todo;
4028 ret = qemu_ram_ptr_length(raddr, &rlen);
4029 *plen = rlen;
4030 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004031}
4032
4033/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4034 * Will also mark the memory as dirty if is_write == 1. access_len gives
4035 * the amount of memory that was actually read or written by the caller.
4036 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004037void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4038 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004039{
4040 if (buffer != bounce.buffer) {
4041 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004042 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004043 while (access_len) {
4044 unsigned l;
4045 l = TARGET_PAGE_SIZE;
4046 if (l > access_len)
4047 l = access_len;
4048 if (!cpu_physical_memory_is_dirty(addr1)) {
4049 /* invalidate code */
4050 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4051 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004052 cpu_physical_memory_set_dirty_flags(
4053 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004054 }
4055 addr1 += l;
4056 access_len -= l;
4057 }
4058 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004059 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004060 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004061 }
aliguori6d16c2f2009-01-22 16:59:11 +00004062 return;
4063 }
4064 if (is_write) {
4065 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4066 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004067 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004068 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004069 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004070}
bellardd0ecd2a2006-04-23 17:14:48 +00004071
bellard8df1cd02005-01-28 22:37:22 +00004072/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004073static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4074 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004075{
4076 int io_index;
4077 uint8_t *ptr;
4078 uint32_t val;
4079 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004080 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004081
4082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004083 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004084
Avi Kivity1d393fa2012-01-01 21:15:42 +02004085 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00004086 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004087 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004088 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004089 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004090#if defined(TARGET_WORDS_BIGENDIAN)
4091 if (endian == DEVICE_LITTLE_ENDIAN) {
4092 val = bswap32(val);
4093 }
4094#else
4095 if (endian == DEVICE_BIG_ENDIAN) {
4096 val = bswap32(val);
4097 }
4098#endif
bellard8df1cd02005-01-28 22:37:22 +00004099 } else {
4100 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004101 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004102 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004103 switch (endian) {
4104 case DEVICE_LITTLE_ENDIAN:
4105 val = ldl_le_p(ptr);
4106 break;
4107 case DEVICE_BIG_ENDIAN:
4108 val = ldl_be_p(ptr);
4109 break;
4110 default:
4111 val = ldl_p(ptr);
4112 break;
4113 }
bellard8df1cd02005-01-28 22:37:22 +00004114 }
4115 return val;
4116}
4117
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004118uint32_t ldl_phys(target_phys_addr_t addr)
4119{
4120 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4121}
4122
4123uint32_t ldl_le_phys(target_phys_addr_t addr)
4124{
4125 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4126}
4127
4128uint32_t ldl_be_phys(target_phys_addr_t addr)
4129{
4130 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4131}
4132
bellard84b7b8e2005-11-28 21:19:04 +00004133/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004134static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4135 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004136{
4137 int io_index;
4138 uint8_t *ptr;
4139 uint64_t val;
4140 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004141 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00004142
4143 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004144 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004145
Avi Kivity1d393fa2012-01-01 21:15:42 +02004146 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00004147 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004148 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004149 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004150
4151 /* XXX This is broken when device endian != cpu endian.
4152 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004153#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004154 val = io_mem_read(io_index, addr, 4) << 32;
4155 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004156#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004157 val = io_mem_read(io_index, addr, 4);
4158 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004159#endif
4160 } else {
4161 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004162 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004163 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004164 switch (endian) {
4165 case DEVICE_LITTLE_ENDIAN:
4166 val = ldq_le_p(ptr);
4167 break;
4168 case DEVICE_BIG_ENDIAN:
4169 val = ldq_be_p(ptr);
4170 break;
4171 default:
4172 val = ldq_p(ptr);
4173 break;
4174 }
bellard84b7b8e2005-11-28 21:19:04 +00004175 }
4176 return val;
4177}
4178
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004179uint64_t ldq_phys(target_phys_addr_t addr)
4180{
4181 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4182}
4183
4184uint64_t ldq_le_phys(target_phys_addr_t addr)
4185{
4186 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4187}
4188
4189uint64_t ldq_be_phys(target_phys_addr_t addr)
4190{
4191 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4192}
4193
bellardaab33092005-10-30 20:48:42 +00004194/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004195uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004196{
4197 uint8_t val;
4198 cpu_physical_memory_read(addr, &val, 1);
4199 return val;
4200}
4201
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004202/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004203static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4204 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004205{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004206 int io_index;
4207 uint8_t *ptr;
4208 uint64_t val;
4209 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004210 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004211
4212 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004213 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004214
Avi Kivity1d393fa2012-01-01 21:15:42 +02004215 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004216 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004217 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004218 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004219 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004220#if defined(TARGET_WORDS_BIGENDIAN)
4221 if (endian == DEVICE_LITTLE_ENDIAN) {
4222 val = bswap16(val);
4223 }
4224#else
4225 if (endian == DEVICE_BIG_ENDIAN) {
4226 val = bswap16(val);
4227 }
4228#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004229 } else {
4230 /* RAM case */
4231 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4232 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004233 switch (endian) {
4234 case DEVICE_LITTLE_ENDIAN:
4235 val = lduw_le_p(ptr);
4236 break;
4237 case DEVICE_BIG_ENDIAN:
4238 val = lduw_be_p(ptr);
4239 break;
4240 default:
4241 val = lduw_p(ptr);
4242 break;
4243 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004244 }
4245 return val;
bellardaab33092005-10-30 20:48:42 +00004246}
4247
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004248uint32_t lduw_phys(target_phys_addr_t addr)
4249{
4250 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4251}
4252
4253uint32_t lduw_le_phys(target_phys_addr_t addr)
4254{
4255 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4256}
4257
4258uint32_t lduw_be_phys(target_phys_addr_t addr)
4259{
4260 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4261}
4262
bellard8df1cd02005-01-28 22:37:22 +00004263/* warning: addr must be aligned. The ram page is not masked as dirty
4264 and the code inside is not invalidated. It is useful if the dirty
4265 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004266void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004267{
4268 int io_index;
4269 uint8_t *ptr;
4270 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004271 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004272
4273 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004274 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004275
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004276 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004277 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004278 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004279 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004280 } else {
aliguori74576192008-10-06 14:02:03 +00004281 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004282 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004283 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004284
4285 if (unlikely(in_migration)) {
4286 if (!cpu_physical_memory_is_dirty(addr1)) {
4287 /* invalidate code */
4288 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4289 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004290 cpu_physical_memory_set_dirty_flags(
4291 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004292 }
4293 }
bellard8df1cd02005-01-28 22:37:22 +00004294 }
4295}
4296
Anthony Liguoric227f092009-10-01 16:12:16 -05004297void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004298{
4299 int io_index;
4300 uint8_t *ptr;
4301 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004302 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004303
4304 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004305 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004306
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004307 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004308 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004309 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004310#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004311 io_mem_write(io_index, addr, val >> 32, 4);
4312 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004313#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004314 io_mem_write(io_index, addr, (uint32_t)val, 4);
4315 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004316#endif
4317 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004318 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004319 (addr & ~TARGET_PAGE_MASK);
4320 stq_p(ptr, val);
4321 }
4322}
4323
bellard8df1cd02005-01-28 22:37:22 +00004324/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004325static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4326 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004327{
4328 int io_index;
4329 uint8_t *ptr;
4330 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004331 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004332
4333 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004334 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004335
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004336 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004337 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004338 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004339#if defined(TARGET_WORDS_BIGENDIAN)
4340 if (endian == DEVICE_LITTLE_ENDIAN) {
4341 val = bswap32(val);
4342 }
4343#else
4344 if (endian == DEVICE_BIG_ENDIAN) {
4345 val = bswap32(val);
4346 }
4347#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004348 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004349 } else {
4350 unsigned long addr1;
4351 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4352 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004353 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004354 switch (endian) {
4355 case DEVICE_LITTLE_ENDIAN:
4356 stl_le_p(ptr, val);
4357 break;
4358 case DEVICE_BIG_ENDIAN:
4359 stl_be_p(ptr, val);
4360 break;
4361 default:
4362 stl_p(ptr, val);
4363 break;
4364 }
bellard3a7d9292005-08-21 09:26:42 +00004365 if (!cpu_physical_memory_is_dirty(addr1)) {
4366 /* invalidate code */
4367 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4368 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004369 cpu_physical_memory_set_dirty_flags(addr1,
4370 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004371 }
bellard8df1cd02005-01-28 22:37:22 +00004372 }
4373}
4374
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004375void stl_phys(target_phys_addr_t addr, uint32_t val)
4376{
4377 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4378}
4379
4380void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4381{
4382 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4383}
4384
4385void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4386{
4387 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4388}
4389
bellardaab33092005-10-30 20:48:42 +00004390/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004391void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004392{
4393 uint8_t v = val;
4394 cpu_physical_memory_write(addr, &v, 1);
4395}
4396
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004397/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004398static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4399 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004400{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004401 int io_index;
4402 uint8_t *ptr;
4403 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004404 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004405
4406 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004407 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004408
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004409 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004410 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004411 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004412#if defined(TARGET_WORDS_BIGENDIAN)
4413 if (endian == DEVICE_LITTLE_ENDIAN) {
4414 val = bswap16(val);
4415 }
4416#else
4417 if (endian == DEVICE_BIG_ENDIAN) {
4418 val = bswap16(val);
4419 }
4420#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004421 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004422 } else {
4423 unsigned long addr1;
4424 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4425 /* RAM case */
4426 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004427 switch (endian) {
4428 case DEVICE_LITTLE_ENDIAN:
4429 stw_le_p(ptr, val);
4430 break;
4431 case DEVICE_BIG_ENDIAN:
4432 stw_be_p(ptr, val);
4433 break;
4434 default:
4435 stw_p(ptr, val);
4436 break;
4437 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004438 if (!cpu_physical_memory_is_dirty(addr1)) {
4439 /* invalidate code */
4440 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4441 /* set dirty bit */
4442 cpu_physical_memory_set_dirty_flags(addr1,
4443 (0xff & ~CODE_DIRTY_FLAG));
4444 }
4445 }
bellardaab33092005-10-30 20:48:42 +00004446}
4447
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004448void stw_phys(target_phys_addr_t addr, uint32_t val)
4449{
4450 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4451}
4452
4453void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4454{
4455 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4456}
4457
4458void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4459{
4460 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4461}
4462
bellardaab33092005-10-30 20:48:42 +00004463/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004464void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004465{
4466 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004467 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004468}
4469
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004470void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4471{
4472 val = cpu_to_le64(val);
4473 cpu_physical_memory_write(addr, &val, 8);
4474}
4475
4476void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4477{
4478 val = cpu_to_be64(val);
4479 cpu_physical_memory_write(addr, &val, 8);
4480}
4481
aliguori5e2972f2009-03-28 17:51:36 +00004482/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004483int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004484 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004485{
4486 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004487 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004488 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004489
4490 while (len > 0) {
4491 page = addr & TARGET_PAGE_MASK;
4492 phys_addr = cpu_get_phys_page_debug(env, page);
4493 /* if no physical page mapped, return an error */
4494 if (phys_addr == -1)
4495 return -1;
4496 l = (page + TARGET_PAGE_SIZE) - addr;
4497 if (l > len)
4498 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004499 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004500 if (is_write)
4501 cpu_physical_memory_write_rom(phys_addr, buf, l);
4502 else
aliguori5e2972f2009-03-28 17:51:36 +00004503 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004504 len -= l;
4505 buf += l;
4506 addr += l;
4507 }
4508 return 0;
4509}
Paul Brooka68fe892010-03-01 00:08:59 +00004510#endif
bellard13eb76e2004-01-24 15:23:36 +00004511
pbrook2e70f6e2008-06-29 01:03:05 +00004512/* in deterministic execution mode, instructions doing device I/Os
4513 must be at the end of the TB */
4514void cpu_io_recompile(CPUState *env, void *retaddr)
4515{
4516 TranslationBlock *tb;
4517 uint32_t n, cflags;
4518 target_ulong pc, cs_base;
4519 uint64_t flags;
4520
4521 tb = tb_find_pc((unsigned long)retaddr);
4522 if (!tb) {
4523 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4524 retaddr);
4525 }
4526 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004527 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004528 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004529 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004530 n = n - env->icount_decr.u16.low;
4531 /* Generate a new TB ending on the I/O insn. */
4532 n++;
4533 /* On MIPS and SH, delay slot instructions can only be restarted if
4534 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004535 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004536 branch. */
4537#if defined(TARGET_MIPS)
4538 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4539 env->active_tc.PC -= 4;
4540 env->icount_decr.u16.low++;
4541 env->hflags &= ~MIPS_HFLAG_BMASK;
4542 }
4543#elif defined(TARGET_SH4)
4544 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4545 && n > 1) {
4546 env->pc -= 2;
4547 env->icount_decr.u16.low++;
4548 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4549 }
4550#endif
4551 /* This should never happen. */
4552 if (n > CF_COUNT_MASK)
4553 cpu_abort(env, "TB too big during recompile");
4554
4555 cflags = n | CF_LAST_IO;
4556 pc = tb->pc;
4557 cs_base = tb->cs_base;
4558 flags = tb->flags;
4559 tb_phys_invalidate(tb, -1);
4560 /* FIXME: In theory this could raise an exception. In practice
4561 we have already translated the block once so it's probably ok. */
4562 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004563 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004564 the first in the TB) then we end up generating a whole new TB and
4565 repeating the fault, which is horribly inefficient.
4566 Better would be to execute just this insn uncached, or generate a
4567 second new TB. */
4568 cpu_resume_from_signal(env, NULL);
4569}
4570
Paul Brookb3755a92010-03-12 16:54:58 +00004571#if !defined(CONFIG_USER_ONLY)
4572
Stefan Weil055403b2010-10-22 23:03:32 +02004573void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004574{
4575 int i, target_code_size, max_target_code_size;
4576 int direct_jmp_count, direct_jmp2_count, cross_page;
4577 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004578
bellarde3db7222005-01-26 22:00:47 +00004579 target_code_size = 0;
4580 max_target_code_size = 0;
4581 cross_page = 0;
4582 direct_jmp_count = 0;
4583 direct_jmp2_count = 0;
4584 for(i = 0; i < nb_tbs; i++) {
4585 tb = &tbs[i];
4586 target_code_size += tb->size;
4587 if (tb->size > max_target_code_size)
4588 max_target_code_size = tb->size;
4589 if (tb->page_addr[1] != -1)
4590 cross_page++;
4591 if (tb->tb_next_offset[0] != 0xffff) {
4592 direct_jmp_count++;
4593 if (tb->tb_next_offset[1] != 0xffff) {
4594 direct_jmp2_count++;
4595 }
4596 }
4597 }
4598 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004599 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004600 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004601 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4602 cpu_fprintf(f, "TB count %d/%d\n",
4603 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004604 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004605 nb_tbs ? target_code_size / nb_tbs : 0,
4606 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004607 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004608 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4609 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004610 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4611 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004612 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4613 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004614 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004615 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4616 direct_jmp2_count,
4617 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004618 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004619 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4620 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4621 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004622 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004623}
4624
Avi Kivityd39e8222012-01-01 23:35:10 +02004625/* NOTE: this function can trigger an exception */
4626/* NOTE2: the returned address is not exactly the physical address: it
4627 is the offset relative to phys_ram_base */
4628tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4629{
4630 int mmu_idx, page_index, pd;
4631 void *p;
4632
4633 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4634 mmu_idx = cpu_mmu_index(env1);
4635 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4636 (addr & TARGET_PAGE_MASK))) {
4637 ldub_code(addr);
4638 }
4639 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004640 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004641 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004642#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4643 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4644#else
4645 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4646#endif
4647 }
4648 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4649 return qemu_ram_addr_from_host_nofail(p);
4650}
4651
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004652/*
4653 * A helper function for the _utterly broken_ virtio device model to find out if
4654 * it's running on a big endian machine. Don't do this at home kids!
4655 */
4656bool virtio_is_big_endian(void);
4657bool virtio_is_big_endian(void)
4658{
4659#if defined(TARGET_WORDS_BIGENDIAN)
4660 return true;
4661#else
4662 return false;
4663#endif
4664}
4665
bellard61382a52003-10-27 21:22:23 +00004666#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004667#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004668#define GETPC() NULL
4669#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004670#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004671
4672#define SHIFT 0
4673#include "softmmu_template.h"
4674
4675#define SHIFT 1
4676#include "softmmu_template.h"
4677
4678#define SHIFT 2
4679#include "softmmu_template.h"
4680
4681#define SHIFT 3
4682#include "softmmu_template.h"
4683
4684#undef env
4685
4686#endif