blob: 46a283071a2e908bd9f3aa77af7d2c2694909246 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
pbrook9656f322008-07-01 20:01:19 +0000222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100226 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000231 tlb_flush(env, 1);
232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
pbrook9656f322008-07-01 20:01:19 +0000248#endif
249
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100250CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400251{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100252 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100253 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400254
255 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100256 cpu = ENV_GET_CPU(env);
257 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400258 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100259 }
Glauber Costa950f1472009-06-09 12:15:18 -0400260 env = env->next_cpu;
261 }
262
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100263 return cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400264}
265
Andreas Färber9349b4f2012-03-14 01:38:32 +0100266void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000267{
Andreas Färber9f09e182012-05-03 06:59:07 +0200268 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100269 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000270 int cpu_index;
271
pbrookc2764712009-03-07 15:24:59 +0000272#if defined(CONFIG_USER_ONLY)
273 cpu_list_lock();
274#endif
bellard6a00d602005-11-21 23:25:50 +0000275 env->next_cpu = NULL;
276 penv = &first_cpu;
277 cpu_index = 0;
278 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700279 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000280 cpu_index++;
281 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100282 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100283 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000284 QTAILQ_INIT(&env->breakpoints);
285 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100286#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200287 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100288#endif
bellard6a00d602005-11-21 23:25:50 +0000289 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000290#if defined(CONFIG_USER_ONLY)
291 cpu_list_unlock();
292#endif
pbrookb3c77242008-06-30 16:31:04 +0000293#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600294 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
295 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000296 cpu_save, cpu_load, env);
297#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000298}
299
bellard1fddef42005-04-17 19:16:13 +0000300#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000301#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100302static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000303{
304 tb_invalidate_phys_page_range(pc, pc + 1, 0);
305}
306#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400307static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
308{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400309 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
310 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400311}
bellardc27004e2005-01-03 23:35:10 +0000312#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000313#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000314
Paul Brookc527ee82010-03-01 03:31:14 +0000315#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100316void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000317
318{
319}
320
Andreas Färber9349b4f2012-03-14 01:38:32 +0100321int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000322 int flags, CPUWatchpoint **watchpoint)
323{
324 return -ENOSYS;
325}
326#else
pbrook6658ffb2007-03-16 23:58:11 +0000327/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000329 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000330{
aliguorib4051332008-11-18 20:14:20 +0000331 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000332 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000333
aliguorib4051332008-11-18 20:14:20 +0000334 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400335 if ((len & (len - 1)) || (addr & ~len_mask) ||
336 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000337 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
338 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
339 return -EINVAL;
340 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500341 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000342
aliguoria1d1bb32008-11-18 20:07:32 +0000343 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000344 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000345 wp->flags = flags;
346
aliguori2dc9f412008-11-18 20:56:59 +0000347 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000348 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000349 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000350 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000351 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000352
pbrook6658ffb2007-03-16 23:58:11 +0000353 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000354
355 if (watchpoint)
356 *watchpoint = wp;
357 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000358}
359
aliguoria1d1bb32008-11-18 20:07:32 +0000360/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100361int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000362 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000363{
aliguorib4051332008-11-18 20:14:20 +0000364 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000365 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000366
Blue Swirl72cf2d42009-09-12 07:36:22 +0000367 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000368 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000369 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000370 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000371 return 0;
372 }
373 }
aliguoria1d1bb32008-11-18 20:07:32 +0000374 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000375}
376
aliguoria1d1bb32008-11-18 20:07:32 +0000377/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000379{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000380 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000381
aliguoria1d1bb32008-11-18 20:07:32 +0000382 tlb_flush_page(env, watchpoint->vaddr);
383
Anthony Liguori7267c092011-08-20 22:09:37 -0500384 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000385}
386
aliguoria1d1bb32008-11-18 20:07:32 +0000387/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100388void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000389{
aliguoric0ce9982008-11-25 22:13:57 +0000390 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000391
Blue Swirl72cf2d42009-09-12 07:36:22 +0000392 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000393 if (wp->flags & mask)
394 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000395 }
aliguoria1d1bb32008-11-18 20:07:32 +0000396}
Paul Brookc527ee82010-03-01 03:31:14 +0000397#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000398
399/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100400int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000401 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000402{
bellard1fddef42005-04-17 19:16:13 +0000403#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000404 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000405
Anthony Liguori7267c092011-08-20 22:09:37 -0500406 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000407
408 bp->pc = pc;
409 bp->flags = flags;
410
aliguori2dc9f412008-11-18 20:56:59 +0000411 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000412 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000413 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000414 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000415 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000416
417 breakpoint_invalidate(env, pc);
418
419 if (breakpoint)
420 *breakpoint = bp;
421 return 0;
422#else
423 return -ENOSYS;
424#endif
425}
426
427/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100428int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000429{
430#if defined(TARGET_HAS_ICE)
431 CPUBreakpoint *bp;
432
Blue Swirl72cf2d42009-09-12 07:36:22 +0000433 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000434 if (bp->pc == pc && bp->flags == flags) {
435 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000436 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000437 }
bellard4c3a88a2003-07-26 12:06:08 +0000438 }
aliguoria1d1bb32008-11-18 20:07:32 +0000439 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000440#else
aliguoria1d1bb32008-11-18 20:07:32 +0000441 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000442#endif
443}
444
aliguoria1d1bb32008-11-18 20:07:32 +0000445/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100446void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000447{
bellard1fddef42005-04-17 19:16:13 +0000448#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000449 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000450
aliguoria1d1bb32008-11-18 20:07:32 +0000451 breakpoint_invalidate(env, breakpoint->pc);
452
Anthony Liguori7267c092011-08-20 22:09:37 -0500453 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000454#endif
455}
456
457/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100458void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000459{
460#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000461 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000462
Blue Swirl72cf2d42009-09-12 07:36:22 +0000463 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000464 if (bp->flags & mask)
465 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000466 }
bellard4c3a88a2003-07-26 12:06:08 +0000467#endif
468}
469
bellardc33a3462003-07-29 20:50:33 +0000470/* enable or disable single step mode. EXCP_DEBUG is returned by the
471 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100472void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000473{
bellard1fddef42005-04-17 19:16:13 +0000474#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000475 if (env->singlestep_enabled != enabled) {
476 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000477 if (kvm_enabled())
478 kvm_update_guest_debug(env, 0);
479 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100480 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000481 /* XXX: only flush what is necessary */
482 tb_flush(env);
483 }
bellardc33a3462003-07-29 20:50:33 +0000484 }
485#endif
486}
487
Andreas Färber9349b4f2012-03-14 01:38:32 +0100488void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000489{
490 env->interrupt_request &= ~mask;
491}
492
Andreas Färber9349b4f2012-03-14 01:38:32 +0100493void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000494{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100495 CPUState *cpu = ENV_GET_CPU(env);
496
497 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000498 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000499}
500
Andreas Färber9349b4f2012-03-14 01:38:32 +0100501void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000502{
503 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000504 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000505
506 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000507 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000508 fprintf(stderr, "qemu: fatal: ");
509 vfprintf(stderr, fmt, ap);
510 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100511 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000512 if (qemu_log_enabled()) {
513 qemu_log("qemu: fatal: ");
514 qemu_log_vprintf(fmt, ap2);
515 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100516 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000517 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000518 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000519 }
pbrook493ae1f2007-11-23 16:53:59 +0000520 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000521 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200522#if defined(CONFIG_USER_ONLY)
523 {
524 struct sigaction act;
525 sigfillset(&act.sa_mask);
526 act.sa_handler = SIG_DFL;
527 sigaction(SIGABRT, &act, NULL);
528 }
529#endif
bellard75012672003-06-21 13:11:07 +0000530 abort();
531}
532
Andreas Färber9349b4f2012-03-14 01:38:32 +0100533CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000534{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100535 CPUArchState *new_env = cpu_init(env->cpu_model_str);
536 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000537#if defined(TARGET_HAS_ICE)
538 CPUBreakpoint *bp;
539 CPUWatchpoint *wp;
540#endif
541
Andreas Färber9349b4f2012-03-14 01:38:32 +0100542 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000543
Andreas Färber55e5c282012-12-17 06:18:02 +0100544 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000545 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000546
547 /* Clone all break/watchpoints.
548 Note: Once we support ptrace with hw-debug register access, make sure
549 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000550 QTAILQ_INIT(&env->breakpoints);
551 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000552#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000553 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000554 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
555 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000556 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000557 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
558 wp->flags, NULL);
559 }
560#endif
561
thsc5be9f02007-02-28 20:20:53 +0000562 return new_env;
563}
564
bellard01243112004-01-04 15:48:17 +0000565#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200566static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
567 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000568{
Juan Quintelad24981d2012-05-22 00:42:40 +0200569 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000570
bellard1ccde1c2004-02-06 19:46:14 +0000571 /* we modify the TLB cache so that the dirty bit will be set again
572 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200573 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200574 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000575 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200576 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000577 != (end - 1) - start) {
578 abort();
579 }
Blue Swirle5548612012-04-21 13:08:33 +0000580 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200581
582}
583
584/* Note: start and end must be within the same ram block. */
585void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
586 int dirty_flags)
587{
588 uintptr_t length;
589
590 start &= TARGET_PAGE_MASK;
591 end = TARGET_PAGE_ALIGN(end);
592
593 length = end - start;
594 if (length == 0)
595 return;
596 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
597
598 if (tcg_enabled()) {
599 tlb_reset_dirty_range_all(start, end, length);
600 }
bellard1ccde1c2004-02-06 19:46:14 +0000601}
602
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000603static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000604{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200605 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000606 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200607 return ret;
aliguori74576192008-10-06 14:02:03 +0000608}
609
Avi Kivitya8170e52012-10-23 12:30:10 +0200610hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000611 MemoryRegionSection *section,
612 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200613 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000614 int prot,
615 target_ulong *address)
616{
Avi Kivitya8170e52012-10-23 12:30:10 +0200617 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000618 CPUWatchpoint *wp;
619
Blue Swirlcc5bea62012-04-14 14:56:48 +0000620 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000621 /* Normal RAM. */
622 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000623 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000624 if (!section->readonly) {
625 iotlb |= phys_section_notdirty;
626 } else {
627 iotlb |= phys_section_rom;
628 }
629 } else {
630 /* IO handlers are currently passed a physical address.
631 It would be nice to pass an offset from the base address
632 of that region. This would avoid having to special case RAM,
633 and avoid full address decoding in every device.
634 We can't use the high bits of pd for this because
635 IO_MEM_ROMD uses these as a ram address. */
636 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000637 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000638 }
639
640 /* Make accesses to pages with watchpoints go via the
641 watchpoint trap routines. */
642 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
643 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
644 /* Avoid trapping reads of pages with a write breakpoint. */
645 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
646 iotlb = phys_section_watch + paddr;
647 *address |= TLB_MMIO;
648 break;
649 }
650 }
651 }
652
653 return iotlb;
654}
bellard9fa3e852004-01-04 18:06:42 +0000655#endif /* defined(CONFIG_USER_ONLY) */
656
pbrooke2eef172008-06-08 01:09:01 +0000657#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000658
Paul Brookc04b2b72010-03-01 03:31:14 +0000659#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
660typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200661 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200662 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200663 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000664} subpage_t;
665
Anthony Liguoric227f092009-10-01 16:12:16 -0500666static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200667 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200668static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200669static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200670{
Avi Kivity5312bd82012-02-12 18:32:55 +0200671 MemoryRegionSection *section = &phys_sections[section_index];
672 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200673
674 if (mr->subpage) {
675 subpage_t *subpage = container_of(mr, subpage_t, iomem);
676 memory_region_destroy(&subpage->iomem);
677 g_free(subpage);
678 }
679}
680
Avi Kivity4346ae32012-02-10 17:00:01 +0200681static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200682{
683 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200684 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200685
Avi Kivityc19e8802012-02-13 20:25:31 +0200686 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200687 return;
688 }
689
Avi Kivityc19e8802012-02-13 20:25:31 +0200690 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200691 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200692 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200693 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200694 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200695 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200696 }
Avi Kivity54688b12012-02-09 17:34:32 +0200697 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200698 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200699 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200700}
701
Avi Kivityac1970f2012-10-03 16:22:53 +0200702static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200703{
Avi Kivityac1970f2012-10-03 16:22:53 +0200704 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200705 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200706}
707
Avi Kivity5312bd82012-02-12 18:32:55 +0200708static uint16_t phys_section_add(MemoryRegionSection *section)
709{
710 if (phys_sections_nb == phys_sections_nb_alloc) {
711 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
712 phys_sections = g_renew(MemoryRegionSection, phys_sections,
713 phys_sections_nb_alloc);
714 }
715 phys_sections[phys_sections_nb] = *section;
716 return phys_sections_nb++;
717}
718
719static void phys_sections_clear(void)
720{
721 phys_sections_nb = 0;
722}
723
Avi Kivityac1970f2012-10-03 16:22:53 +0200724static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200725{
726 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200727 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200728 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200729 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200730 MemoryRegionSection subsection = {
731 .offset_within_address_space = base,
732 .size = TARGET_PAGE_SIZE,
733 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200734 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735
Avi Kivityf3705d52012-03-08 16:16:34 +0200736 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737
Avi Kivityf3705d52012-03-08 16:16:34 +0200738 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200739 subpage = subpage_init(base);
740 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200741 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200742 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200744 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 }
746 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400747 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200748 subpage_register(subpage, start, end, phys_section_add(section));
749}
750
751
Avi Kivityac1970f2012-10-03 16:22:53 +0200752static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000753{
Avi Kivitya8170e52012-10-23 12:30:10 +0200754 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200755 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200756 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200757 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200758
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200759 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200760
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200761 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200762 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200763 section_index);
bellard33417e72003-08-10 21:47:01 +0000764}
765
Avi Kivityac1970f2012-10-03 16:22:53 +0200766static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200767{
Avi Kivityac1970f2012-10-03 16:22:53 +0200768 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200769 MemoryRegionSection now = *section, remain = *section;
770
771 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
772 || (now.size < TARGET_PAGE_SIZE)) {
773 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
774 - now.offset_within_address_space,
775 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200776 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200777 remain.size -= now.size;
778 remain.offset_within_address_space += now.size;
779 remain.offset_within_region += now.size;
780 }
Tyler Hall69b67642012-07-25 18:45:04 -0400781 while (remain.size >= TARGET_PAGE_SIZE) {
782 now = remain;
783 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
784 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200785 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400786 } else {
787 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200788 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400789 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200790 remain.size -= now.size;
791 remain.offset_within_address_space += now.size;
792 remain.offset_within_region += now.size;
793 }
794 now = remain;
795 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200796 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200797 }
798}
799
Sheng Yang62a27442010-01-26 19:21:16 +0800800void qemu_flush_coalesced_mmio_buffer(void)
801{
802 if (kvm_enabled())
803 kvm_flush_coalesced_mmio_buffer();
804}
805
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700806void qemu_mutex_lock_ramlist(void)
807{
808 qemu_mutex_lock(&ram_list.mutex);
809}
810
811void qemu_mutex_unlock_ramlist(void)
812{
813 qemu_mutex_unlock(&ram_list.mutex);
814}
815
Marcelo Tosattic9027602010-03-01 20:25:08 -0300816#if defined(__linux__) && !defined(TARGET_S390X)
817
818#include <sys/vfs.h>
819
820#define HUGETLBFS_MAGIC 0x958458f6
821
822static long gethugepagesize(const char *path)
823{
824 struct statfs fs;
825 int ret;
826
827 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900828 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300829 } while (ret != 0 && errno == EINTR);
830
831 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900832 perror(path);
833 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300834 }
835
836 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900837 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300838
839 return fs.f_bsize;
840}
841
Alex Williamson04b16652010-07-02 11:13:17 -0600842static void *file_ram_alloc(RAMBlock *block,
843 ram_addr_t memory,
844 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300845{
846 char *filename;
847 void *area;
848 int fd;
849#ifdef MAP_POPULATE
850 int flags;
851#endif
852 unsigned long hpagesize;
853
854 hpagesize = gethugepagesize(path);
855 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900856 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300857 }
858
859 if (memory < hpagesize) {
860 return NULL;
861 }
862
863 if (kvm_enabled() && !kvm_has_sync_mmu()) {
864 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
865 return NULL;
866 }
867
Stefan Weile4ada482013-01-16 18:37:23 +0100868 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300869
870 fd = mkstemp(filename);
871 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900872 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100873 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900874 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300875 }
876 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100877 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300878
879 memory = (memory+hpagesize-1) & ~(hpagesize-1);
880
881 /*
882 * ftruncate is not supported by hugetlbfs in older
883 * hosts, so don't bother bailing out on errors.
884 * If anything goes wrong with it under other filesystems,
885 * mmap will fail.
886 */
887 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900888 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300889
890#ifdef MAP_POPULATE
891 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
892 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
893 * to sidestep this quirk.
894 */
895 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
896 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
897#else
898 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
899#endif
900 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900901 perror("file_ram_alloc: can't mmap RAM pages");
902 close(fd);
903 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300904 }
Alex Williamson04b16652010-07-02 11:13:17 -0600905 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300906 return area;
907}
908#endif
909
Alex Williamsond17b5282010-06-25 11:08:38 -0600910static ram_addr_t find_ram_offset(ram_addr_t size)
911{
Alex Williamson04b16652010-07-02 11:13:17 -0600912 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600913 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600914
Paolo Bonzinia3161032012-11-14 15:54:48 +0100915 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600916 return 0;
917
Paolo Bonzinia3161032012-11-14 15:54:48 +0100918 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000919 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600920
921 end = block->offset + block->length;
922
Paolo Bonzinia3161032012-11-14 15:54:48 +0100923 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600924 if (next_block->offset >= end) {
925 next = MIN(next, next_block->offset);
926 }
927 }
928 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600929 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600930 mingap = next - end;
931 }
932 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600933
934 if (offset == RAM_ADDR_MAX) {
935 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
936 (uint64_t)size);
937 abort();
938 }
939
Alex Williamson04b16652010-07-02 11:13:17 -0600940 return offset;
941}
942
Juan Quintela652d7ec2012-07-20 10:37:54 +0200943ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600944{
Alex Williamsond17b5282010-06-25 11:08:38 -0600945 RAMBlock *block;
946 ram_addr_t last = 0;
947
Paolo Bonzinia3161032012-11-14 15:54:48 +0100948 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600949 last = MAX(last, block->offset + block->length);
950
951 return last;
952}
953
Jason Baronddb97f12012-08-02 15:44:16 -0400954static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
955{
956 int ret;
957 QemuOpts *machine_opts;
958
959 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
960 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
961 if (machine_opts &&
962 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
963 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
964 if (ret) {
965 perror("qemu_madvise");
966 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
967 "but dump_guest_core=off specified\n");
968 }
969 }
970}
971
Avi Kivityc5705a72011-12-20 15:59:12 +0200972void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600973{
974 RAMBlock *new_block, *block;
975
Avi Kivityc5705a72011-12-20 15:59:12 +0200976 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100977 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200978 if (block->offset == addr) {
979 new_block = block;
980 break;
981 }
982 }
983 assert(new_block);
984 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600985
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600986 if (dev) {
987 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600988 if (id) {
989 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500990 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600991 }
992 }
993 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
994
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700995 /* This assumes the iothread lock is taken here too. */
996 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +0100997 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200998 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600999 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1000 new_block->idstr);
1001 abort();
1002 }
1003 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001004 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001005}
1006
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001007static int memory_try_enable_merging(void *addr, size_t len)
1008{
1009 QemuOpts *opts;
1010
1011 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1012 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1013 /* disabled by the user */
1014 return 0;
1015 }
1016
1017 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1018}
1019
Avi Kivityc5705a72011-12-20 15:59:12 +02001020ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1021 MemoryRegion *mr)
1022{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001023 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001024
1025 size = TARGET_PAGE_ALIGN(size);
1026 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001027
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001028 /* This assumes the iothread lock is taken here too. */
1029 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001030 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001031 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001032 if (host) {
1033 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001034 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001035 } else {
1036 if (mem_path) {
1037#if defined (__linux__) && !defined(TARGET_S390X)
1038 new_block->host = file_ram_alloc(new_block, size, mem_path);
1039 if (!new_block->host) {
1040 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001041 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001042 }
1043#else
1044 fprintf(stderr, "-mem-path option unsupported\n");
1045 exit(1);
1046#endif
1047 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001048 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001049 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001050 } else if (kvm_enabled()) {
1051 /* some s390/kvm configurations have special constraints */
1052 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001053 } else {
1054 new_block->host = qemu_vmalloc(size);
1055 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001056 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001057 }
1058 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001059 new_block->length = size;
1060
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001061 /* Keep the list sorted from biggest to smallest block. */
1062 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1063 if (block->length < new_block->length) {
1064 break;
1065 }
1066 }
1067 if (block) {
1068 QTAILQ_INSERT_BEFORE(block, new_block, next);
1069 } else {
1070 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1071 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001072 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001073
Umesh Deshpandef798b072011-08-18 11:41:17 -07001074 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001075 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001076
Anthony Liguori7267c092011-08-20 22:09:37 -05001077 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001078 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001079 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1080 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001081 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001082
Jason Baronddb97f12012-08-02 15:44:16 -04001083 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001084 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001085
Cam Macdonell84b89d72010-07-26 18:10:57 -06001086 if (kvm_enabled())
1087 kvm_setup_guest_memory(new_block->host, size);
1088
1089 return new_block->offset;
1090}
1091
Avi Kivityc5705a72011-12-20 15:59:12 +02001092ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001093{
Avi Kivityc5705a72011-12-20 15:59:12 +02001094 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001095}
bellarde9a1ab12007-02-08 23:08:38 +00001096
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001097void qemu_ram_free_from_ptr(ram_addr_t addr)
1098{
1099 RAMBlock *block;
1100
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001101 /* This assumes the iothread lock is taken here too. */
1102 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001103 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001104 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001105 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001106 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001107 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001108 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001109 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001110 }
1111 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001112 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001113}
1114
Anthony Liguoric227f092009-10-01 16:12:16 -05001115void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001116{
Alex Williamson04b16652010-07-02 11:13:17 -06001117 RAMBlock *block;
1118
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001119 /* This assumes the iothread lock is taken here too. */
1120 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001121 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001122 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001123 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001124 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001125 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001126 if (block->flags & RAM_PREALLOC_MASK) {
1127 ;
1128 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001129#if defined (__linux__) && !defined(TARGET_S390X)
1130 if (block->fd) {
1131 munmap(block->host, block->length);
1132 close(block->fd);
1133 } else {
1134 qemu_vfree(block->host);
1135 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001136#else
1137 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001138#endif
1139 } else {
1140#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1141 munmap(block->host, block->length);
1142#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001143 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001144 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001145 } else {
1146 qemu_vfree(block->host);
1147 }
Alex Williamson04b16652010-07-02 11:13:17 -06001148#endif
1149 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001150 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001151 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001152 }
1153 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001154 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001155
bellarde9a1ab12007-02-08 23:08:38 +00001156}
1157
Huang Yingcd19cfa2011-03-02 08:56:19 +01001158#ifndef _WIN32
1159void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1160{
1161 RAMBlock *block;
1162 ram_addr_t offset;
1163 int flags;
1164 void *area, *vaddr;
1165
Paolo Bonzinia3161032012-11-14 15:54:48 +01001166 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001167 offset = addr - block->offset;
1168 if (offset < block->length) {
1169 vaddr = block->host + offset;
1170 if (block->flags & RAM_PREALLOC_MASK) {
1171 ;
1172 } else {
1173 flags = MAP_FIXED;
1174 munmap(vaddr, length);
1175 if (mem_path) {
1176#if defined(__linux__) && !defined(TARGET_S390X)
1177 if (block->fd) {
1178#ifdef MAP_POPULATE
1179 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1180 MAP_PRIVATE;
1181#else
1182 flags |= MAP_PRIVATE;
1183#endif
1184 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1185 flags, block->fd, offset);
1186 } else {
1187 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1188 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1189 flags, -1, 0);
1190 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001191#else
1192 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001193#endif
1194 } else {
1195#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1196 flags |= MAP_SHARED | MAP_ANONYMOUS;
1197 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1198 flags, -1, 0);
1199#else
1200 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1201 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1202 flags, -1, 0);
1203#endif
1204 }
1205 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001206 fprintf(stderr, "Could not remap addr: "
1207 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001208 length, addr);
1209 exit(1);
1210 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001211 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001212 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001213 }
1214 return;
1215 }
1216 }
1217}
1218#endif /* !_WIN32 */
1219
pbrookdc828ca2009-04-09 22:21:07 +00001220/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001221 With the exception of the softmmu code in this file, this should
1222 only be used for local memory (e.g. video ram) that the device owns,
1223 and knows it isn't going to access beyond the end of the block.
1224
1225 It should not be used for general purpose DMA.
1226 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1227 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001228void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001229{
pbrook94a6b542009-04-11 17:15:54 +00001230 RAMBlock *block;
1231
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001232 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001233 block = ram_list.mru_block;
1234 if (block && addr - block->offset < block->length) {
1235 goto found;
1236 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001237 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001238 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001239 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001240 }
pbrook94a6b542009-04-11 17:15:54 +00001241 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001242
1243 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1244 abort();
1245
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001246found:
1247 ram_list.mru_block = block;
1248 if (xen_enabled()) {
1249 /* We need to check if the requested address is in the RAM
1250 * because we don't want to map the entire memory in QEMU.
1251 * In that case just map until the end of the page.
1252 */
1253 if (block->offset == 0) {
1254 return xen_map_cache(addr, 0, 0);
1255 } else if (block->host == NULL) {
1256 block->host =
1257 xen_map_cache(block->offset, block->length, 1);
1258 }
1259 }
1260 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001261}
1262
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001263/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1264 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1265 *
1266 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001267 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001268static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001269{
1270 RAMBlock *block;
1271
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001272 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001273 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001274 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001275 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001276 /* We need to check if the requested address is in the RAM
1277 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001278 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001279 */
1280 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001281 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001282 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001283 block->host =
1284 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001285 }
1286 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001287 return block->host + (addr - block->offset);
1288 }
1289 }
1290
1291 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1292 abort();
1293
1294 return NULL;
1295}
1296
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001297/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1298 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001299static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001300{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001301 if (*size == 0) {
1302 return NULL;
1303 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001304 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001305 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001306 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001307 RAMBlock *block;
1308
Paolo Bonzinia3161032012-11-14 15:54:48 +01001309 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001310 if (addr - block->offset < block->length) {
1311 if (addr - block->offset + *size > block->length)
1312 *size = block->length - addr + block->offset;
1313 return block->host + (addr - block->offset);
1314 }
1315 }
1316
1317 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1318 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001319 }
1320}
1321
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001322void qemu_put_ram_ptr(void *addr)
1323{
1324 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001325}
1326
Marcelo Tosattie8902612010-10-11 15:31:19 -03001327int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001328{
pbrook94a6b542009-04-11 17:15:54 +00001329 RAMBlock *block;
1330 uint8_t *host = ptr;
1331
Jan Kiszka868bb332011-06-21 22:59:09 +02001332 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001333 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001334 return 0;
1335 }
1336
Paolo Bonzinia3161032012-11-14 15:54:48 +01001337 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001338 /* This case append when the block is not mapped. */
1339 if (block->host == NULL) {
1340 continue;
1341 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001342 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001343 *ram_addr = block->offset + (host - block->host);
1344 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001345 }
pbrook94a6b542009-04-11 17:15:54 +00001346 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001347
Marcelo Tosattie8902612010-10-11 15:31:19 -03001348 return -1;
1349}
Alex Williamsonf471a172010-06-11 11:11:42 -06001350
Marcelo Tosattie8902612010-10-11 15:31:19 -03001351/* Some of the softmmu routines need to translate from a host pointer
1352 (typically a TLB entry) back to a ram offset. */
1353ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1354{
1355 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001356
Marcelo Tosattie8902612010-10-11 15:31:19 -03001357 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1358 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1359 abort();
1360 }
1361 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001362}
1363
Avi Kivitya8170e52012-10-23 12:30:10 +02001364static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001365 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001366{
pbrook67d3b952006-12-18 05:03:52 +00001367#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001368 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001369#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001370#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001371 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001372#endif
1373 return 0;
1374}
1375
Avi Kivitya8170e52012-10-23 12:30:10 +02001376static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001377 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001378{
1379#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001380 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001381#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001382#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001383 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001384#endif
1385}
1386
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001387static const MemoryRegionOps unassigned_mem_ops = {
1388 .read = unassigned_mem_read,
1389 .write = unassigned_mem_write,
1390 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001391};
1392
Avi Kivitya8170e52012-10-23 12:30:10 +02001393static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001394 unsigned size)
1395{
1396 abort();
1397}
1398
Avi Kivitya8170e52012-10-23 12:30:10 +02001399static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001400 uint64_t value, unsigned size)
1401{
1402 abort();
1403}
1404
1405static const MemoryRegionOps error_mem_ops = {
1406 .read = error_mem_read,
1407 .write = error_mem_write,
1408 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001409};
1410
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001411static const MemoryRegionOps rom_mem_ops = {
1412 .read = error_mem_read,
1413 .write = unassigned_mem_write,
1414 .endianness = DEVICE_NATIVE_ENDIAN,
1415};
1416
Avi Kivitya8170e52012-10-23 12:30:10 +02001417static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001418 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001419{
bellard3a7d9292005-08-21 09:26:42 +00001420 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001421 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001422 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1423#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001424 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001425 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001426#endif
1427 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001428 switch (size) {
1429 case 1:
1430 stb_p(qemu_get_ram_ptr(ram_addr), val);
1431 break;
1432 case 2:
1433 stw_p(qemu_get_ram_ptr(ram_addr), val);
1434 break;
1435 case 4:
1436 stl_p(qemu_get_ram_ptr(ram_addr), val);
1437 break;
1438 default:
1439 abort();
1440 }
bellardf23db162005-08-21 19:12:28 +00001441 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001442 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001443 /* we remove the notdirty callback only if the code has been
1444 flushed */
1445 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001446 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001447}
1448
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001449static const MemoryRegionOps notdirty_mem_ops = {
1450 .read = error_mem_read,
1451 .write = notdirty_mem_write,
1452 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001453};
1454
pbrook0f459d12008-06-09 00:20:13 +00001455/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001456static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001457{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001458 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001459 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001460 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001461 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001462 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001463
aliguori06d55cc2008-11-18 20:24:06 +00001464 if (env->watchpoint_hit) {
1465 /* We re-entered the check after replacing the TB. Now raise
1466 * the debug interrupt so that is will trigger after the
1467 * current instruction. */
1468 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1469 return;
1470 }
pbrook2e70f6e2008-06-29 01:03:05 +00001471 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001472 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001473 if ((vaddr == (wp->vaddr & len_mask) ||
1474 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001475 wp->flags |= BP_WATCHPOINT_HIT;
1476 if (!env->watchpoint_hit) {
1477 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001478 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001479 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1480 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001481 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001482 } else {
1483 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1484 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001485 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001486 }
aliguori06d55cc2008-11-18 20:24:06 +00001487 }
aliguori6e140f22008-11-18 20:37:55 +00001488 } else {
1489 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001490 }
1491 }
1492}
1493
pbrook6658ffb2007-03-16 23:58:11 +00001494/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1495 so these check for a hit then pass through to the normal out-of-line
1496 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001497static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001498 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001499{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001500 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1501 switch (size) {
1502 case 1: return ldub_phys(addr);
1503 case 2: return lduw_phys(addr);
1504 case 4: return ldl_phys(addr);
1505 default: abort();
1506 }
pbrook6658ffb2007-03-16 23:58:11 +00001507}
1508
Avi Kivitya8170e52012-10-23 12:30:10 +02001509static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001510 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001511{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001512 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1513 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001514 case 1:
1515 stb_phys(addr, val);
1516 break;
1517 case 2:
1518 stw_phys(addr, val);
1519 break;
1520 case 4:
1521 stl_phys(addr, val);
1522 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001523 default: abort();
1524 }
pbrook6658ffb2007-03-16 23:58:11 +00001525}
1526
Avi Kivity1ec9b902012-01-02 12:47:48 +02001527static const MemoryRegionOps watch_mem_ops = {
1528 .read = watch_mem_read,
1529 .write = watch_mem_write,
1530 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001531};
pbrook6658ffb2007-03-16 23:58:11 +00001532
Avi Kivitya8170e52012-10-23 12:30:10 +02001533static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001534 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001535{
Avi Kivity70c68e42012-01-02 12:32:48 +02001536 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001537 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001538 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001539#if defined(DEBUG_SUBPAGE)
1540 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1541 mmio, len, addr, idx);
1542#endif
blueswir1db7b5422007-05-26 17:36:03 +00001543
Avi Kivity5312bd82012-02-12 18:32:55 +02001544 section = &phys_sections[mmio->sub_section[idx]];
1545 addr += mmio->base;
1546 addr -= section->offset_within_address_space;
1547 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001548 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001549}
1550
Avi Kivitya8170e52012-10-23 12:30:10 +02001551static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001552 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001553{
Avi Kivity70c68e42012-01-02 12:32:48 +02001554 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001555 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001556 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001557#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001558 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1559 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001560 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001561#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001562
Avi Kivity5312bd82012-02-12 18:32:55 +02001563 section = &phys_sections[mmio->sub_section[idx]];
1564 addr += mmio->base;
1565 addr -= section->offset_within_address_space;
1566 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001567 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001568}
1569
Avi Kivity70c68e42012-01-02 12:32:48 +02001570static const MemoryRegionOps subpage_ops = {
1571 .read = subpage_read,
1572 .write = subpage_write,
1573 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001574};
1575
Avi Kivitya8170e52012-10-23 12:30:10 +02001576static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001577 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001578{
1579 ram_addr_t raddr = addr;
1580 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001581 switch (size) {
1582 case 1: return ldub_p(ptr);
1583 case 2: return lduw_p(ptr);
1584 case 4: return ldl_p(ptr);
1585 default: abort();
1586 }
Andreas Färber56384e82011-11-30 16:26:21 +01001587}
1588
Avi Kivitya8170e52012-10-23 12:30:10 +02001589static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001590 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001591{
1592 ram_addr_t raddr = addr;
1593 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001594 switch (size) {
1595 case 1: return stb_p(ptr, value);
1596 case 2: return stw_p(ptr, value);
1597 case 4: return stl_p(ptr, value);
1598 default: abort();
1599 }
Andreas Färber56384e82011-11-30 16:26:21 +01001600}
1601
Avi Kivityde712f92012-01-02 12:41:07 +02001602static const MemoryRegionOps subpage_ram_ops = {
1603 .read = subpage_ram_read,
1604 .write = subpage_ram_write,
1605 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001606};
1607
Anthony Liguoric227f092009-10-01 16:12:16 -05001608static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001609 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001610{
1611 int idx, eidx;
1612
1613 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1614 return -1;
1615 idx = SUBPAGE_IDX(start);
1616 eidx = SUBPAGE_IDX(end);
1617#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001618 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001619 mmio, start, end, idx, eidx, memory);
1620#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001621 if (memory_region_is_ram(phys_sections[section].mr)) {
1622 MemoryRegionSection new_section = phys_sections[section];
1623 new_section.mr = &io_mem_subpage_ram;
1624 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001625 }
blueswir1db7b5422007-05-26 17:36:03 +00001626 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001627 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001628 }
1629
1630 return 0;
1631}
1632
Avi Kivitya8170e52012-10-23 12:30:10 +02001633static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001634{
Anthony Liguoric227f092009-10-01 16:12:16 -05001635 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001636
Anthony Liguori7267c092011-08-20 22:09:37 -05001637 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001638
1639 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001640 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1641 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001642 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001643#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001644 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1645 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001646#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001647 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001648
1649 return mmio;
1650}
1651
Avi Kivity5312bd82012-02-12 18:32:55 +02001652static uint16_t dummy_section(MemoryRegion *mr)
1653{
1654 MemoryRegionSection section = {
1655 .mr = mr,
1656 .offset_within_address_space = 0,
1657 .offset_within_region = 0,
1658 .size = UINT64_MAX,
1659 };
1660
1661 return phys_section_add(&section);
1662}
1663
Avi Kivitya8170e52012-10-23 12:30:10 +02001664MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001665{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001666 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001667}
1668
Avi Kivitye9179ce2009-06-14 11:38:52 +03001669static void io_mem_init(void)
1670{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001671 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001672 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1673 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1674 "unassigned", UINT64_MAX);
1675 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1676 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001677 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1678 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001679 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1680 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001681}
1682
Avi Kivityac1970f2012-10-03 16:22:53 +02001683static void mem_begin(MemoryListener *listener)
1684{
1685 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1686
1687 destroy_all_mappings(d);
1688 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1689}
1690
Avi Kivity50c1e142012-02-08 21:36:02 +02001691static void core_begin(MemoryListener *listener)
1692{
Avi Kivity5312bd82012-02-12 18:32:55 +02001693 phys_sections_clear();
1694 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001695 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1696 phys_section_rom = dummy_section(&io_mem_rom);
1697 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001698}
1699
Avi Kivity1d711482012-10-02 18:54:45 +02001700static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001701{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001702 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001703
1704 /* since each CPU stores ram addresses in its TLB cache, we must
1705 reset the modified entries */
1706 /* XXX: slow ! */
1707 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1708 tlb_flush(env, 1);
1709 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001710}
1711
Avi Kivity93632742012-02-08 16:54:16 +02001712static void core_log_global_start(MemoryListener *listener)
1713{
1714 cpu_physical_memory_set_dirty_tracking(1);
1715}
1716
1717static void core_log_global_stop(MemoryListener *listener)
1718{
1719 cpu_physical_memory_set_dirty_tracking(0);
1720}
1721
Avi Kivity4855d412012-02-08 21:16:05 +02001722static void io_region_add(MemoryListener *listener,
1723 MemoryRegionSection *section)
1724{
Avi Kivitya2d33522012-03-05 17:40:12 +02001725 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1726
1727 mrio->mr = section->mr;
1728 mrio->offset = section->offset_within_region;
1729 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001730 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001731 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001732}
1733
1734static void io_region_del(MemoryListener *listener,
1735 MemoryRegionSection *section)
1736{
1737 isa_unassign_ioport(section->offset_within_address_space, section->size);
1738}
1739
Avi Kivity93632742012-02-08 16:54:16 +02001740static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001741 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001742 .log_global_start = core_log_global_start,
1743 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001744 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001745};
1746
Avi Kivity4855d412012-02-08 21:16:05 +02001747static MemoryListener io_memory_listener = {
1748 .region_add = io_region_add,
1749 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001750 .priority = 0,
1751};
1752
Avi Kivity1d711482012-10-02 18:54:45 +02001753static MemoryListener tcg_memory_listener = {
1754 .commit = tcg_commit,
1755};
1756
Avi Kivityac1970f2012-10-03 16:22:53 +02001757void address_space_init_dispatch(AddressSpace *as)
1758{
1759 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1760
1761 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1762 d->listener = (MemoryListener) {
1763 .begin = mem_begin,
1764 .region_add = mem_add,
1765 .region_nop = mem_add,
1766 .priority = 0,
1767 };
1768 as->dispatch = d;
1769 memory_listener_register(&d->listener, as);
1770}
1771
Avi Kivity83f3c252012-10-07 12:59:55 +02001772void address_space_destroy_dispatch(AddressSpace *as)
1773{
1774 AddressSpaceDispatch *d = as->dispatch;
1775
1776 memory_listener_unregister(&d->listener);
1777 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1778 g_free(d);
1779 as->dispatch = NULL;
1780}
1781
Avi Kivity62152b82011-07-26 14:26:14 +03001782static void memory_map_init(void)
1783{
Anthony Liguori7267c092011-08-20 22:09:37 -05001784 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001785 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001786 address_space_init(&address_space_memory, system_memory);
1787 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001788
Anthony Liguori7267c092011-08-20 22:09:37 -05001789 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001790 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001791 address_space_init(&address_space_io, system_io);
1792 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001793
Avi Kivityf6790af2012-10-02 20:13:51 +02001794 memory_listener_register(&core_memory_listener, &address_space_memory);
1795 memory_listener_register(&io_memory_listener, &address_space_io);
1796 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001797
1798 dma_context_init(&dma_context_memory, &address_space_memory,
1799 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001800}
1801
1802MemoryRegion *get_system_memory(void)
1803{
1804 return system_memory;
1805}
1806
Avi Kivity309cb472011-08-08 16:09:03 +03001807MemoryRegion *get_system_io(void)
1808{
1809 return system_io;
1810}
1811
pbrooke2eef172008-06-08 01:09:01 +00001812#endif /* !defined(CONFIG_USER_ONLY) */
1813
bellard13eb76e2004-01-24 15:23:36 +00001814/* physical memory access (slow version, mainly for debug) */
1815#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001816int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001817 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001818{
1819 int l, flags;
1820 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001821 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001822
1823 while (len > 0) {
1824 page = addr & TARGET_PAGE_MASK;
1825 l = (page + TARGET_PAGE_SIZE) - addr;
1826 if (l > len)
1827 l = len;
1828 flags = page_get_flags(page);
1829 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001830 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001831 if (is_write) {
1832 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001833 return -1;
bellard579a97f2007-11-11 14:26:47 +00001834 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001835 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001836 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001837 memcpy(p, buf, l);
1838 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001839 } else {
1840 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001841 return -1;
bellard579a97f2007-11-11 14:26:47 +00001842 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001843 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001844 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001845 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001846 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001847 }
1848 len -= l;
1849 buf += l;
1850 addr += l;
1851 }
Paul Brooka68fe892010-03-01 00:08:59 +00001852 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001853}
bellard8df1cd02005-01-28 22:37:22 +00001854
bellard13eb76e2004-01-24 15:23:36 +00001855#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001856
Avi Kivitya8170e52012-10-23 12:30:10 +02001857static void invalidate_and_set_dirty(hwaddr addr,
1858 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001859{
1860 if (!cpu_physical_memory_is_dirty(addr)) {
1861 /* invalidate code */
1862 tb_invalidate_phys_page_range(addr, addr + length, 0);
1863 /* set dirty bit */
1864 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1865 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001866 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001867}
1868
Avi Kivitya8170e52012-10-23 12:30:10 +02001869void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001870 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001871{
Avi Kivityac1970f2012-10-03 16:22:53 +02001872 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001873 int l;
bellard13eb76e2004-01-24 15:23:36 +00001874 uint8_t *ptr;
1875 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001876 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001877 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001878
bellard13eb76e2004-01-24 15:23:36 +00001879 while (len > 0) {
1880 page = addr & TARGET_PAGE_MASK;
1881 l = (page + TARGET_PAGE_SIZE) - addr;
1882 if (l > len)
1883 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001884 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001885
bellard13eb76e2004-01-24 15:23:36 +00001886 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001887 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001888 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001889 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001890 /* XXX: could force cpu_single_env to NULL to avoid
1891 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001892 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001893 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001894 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001895 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001896 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001897 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001898 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001899 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001900 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001901 l = 2;
1902 } else {
bellard1c213d12005-09-03 10:49:04 +00001903 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001904 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001905 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001906 l = 1;
1907 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001908 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001909 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001910 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001911 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001912 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001913 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001914 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001915 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001916 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001917 }
1918 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001919 if (!(memory_region_is_ram(section->mr) ||
1920 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001921 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001922 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001923 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001924 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001925 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001926 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001927 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001928 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001929 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001930 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001931 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001932 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001933 l = 2;
1934 } else {
bellard1c213d12005-09-03 10:49:04 +00001935 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001936 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001937 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001938 l = 1;
1939 }
1940 } else {
1941 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001942 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001943 + memory_region_section_addr(section,
1944 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001945 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001946 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001947 }
1948 }
1949 len -= l;
1950 buf += l;
1951 addr += l;
1952 }
1953}
bellard8df1cd02005-01-28 22:37:22 +00001954
Avi Kivitya8170e52012-10-23 12:30:10 +02001955void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001956 const uint8_t *buf, int len)
1957{
1958 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1959}
1960
1961/**
1962 * address_space_read: read from an address space.
1963 *
1964 * @as: #AddressSpace to be accessed
1965 * @addr: address within that address space
1966 * @buf: buffer with the data transferred
1967 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001968void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001969{
1970 address_space_rw(as, addr, buf, len, false);
1971}
1972
1973
Avi Kivitya8170e52012-10-23 12:30:10 +02001974void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001975 int len, int is_write)
1976{
1977 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1978}
1979
bellardd0ecd2a2006-04-23 17:14:48 +00001980/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001981void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001982 const uint8_t *buf, int len)
1983{
Avi Kivityac1970f2012-10-03 16:22:53 +02001984 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001985 int l;
1986 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001987 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001988 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001989
bellardd0ecd2a2006-04-23 17:14:48 +00001990 while (len > 0) {
1991 page = addr & TARGET_PAGE_MASK;
1992 l = (page + TARGET_PAGE_SIZE) - addr;
1993 if (l > len)
1994 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001995 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001996
Blue Swirlcc5bea62012-04-14 14:56:48 +00001997 if (!(memory_region_is_ram(section->mr) ||
1998 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001999 /* do nothing */
2000 } else {
2001 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002002 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002003 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002004 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002005 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002006 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002007 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002008 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002009 }
2010 len -= l;
2011 buf += l;
2012 addr += l;
2013 }
2014}
2015
aliguori6d16c2f2009-01-22 16:59:11 +00002016typedef struct {
2017 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002018 hwaddr addr;
2019 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002020} BounceBuffer;
2021
2022static BounceBuffer bounce;
2023
aliguoriba223c22009-01-22 16:59:16 +00002024typedef struct MapClient {
2025 void *opaque;
2026 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002027 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002028} MapClient;
2029
Blue Swirl72cf2d42009-09-12 07:36:22 +00002030static QLIST_HEAD(map_client_list, MapClient) map_client_list
2031 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002032
2033void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2034{
Anthony Liguori7267c092011-08-20 22:09:37 -05002035 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002036
2037 client->opaque = opaque;
2038 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002039 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002040 return client;
2041}
2042
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002043static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002044{
2045 MapClient *client = (MapClient *)_client;
2046
Blue Swirl72cf2d42009-09-12 07:36:22 +00002047 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002048 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002049}
2050
2051static void cpu_notify_map_clients(void)
2052{
2053 MapClient *client;
2054
Blue Swirl72cf2d42009-09-12 07:36:22 +00002055 while (!QLIST_EMPTY(&map_client_list)) {
2056 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002057 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002058 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002059 }
2060}
2061
aliguori6d16c2f2009-01-22 16:59:11 +00002062/* Map a physical memory region into a host virtual address.
2063 * May map a subset of the requested range, given by and returned in *plen.
2064 * May return NULL if resources needed to perform the mapping are exhausted.
2065 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002066 * Use cpu_register_map_client() to know when retrying the map operation is
2067 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002068 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002069void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002070 hwaddr addr,
2071 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002072 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002073{
Avi Kivityac1970f2012-10-03 16:22:53 +02002074 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002075 hwaddr len = *plen;
2076 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002077 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002078 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002079 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002080 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002081 ram_addr_t rlen;
2082 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002083
2084 while (len > 0) {
2085 page = addr & TARGET_PAGE_MASK;
2086 l = (page + TARGET_PAGE_SIZE) - addr;
2087 if (l > len)
2088 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002089 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002090
Avi Kivityf3705d52012-03-08 16:16:34 +02002091 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002092 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002093 break;
2094 }
2095 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2096 bounce.addr = addr;
2097 bounce.len = l;
2098 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002099 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002100 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002101
2102 *plen = l;
2103 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002104 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002105 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002106 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002107 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002108 }
aliguori6d16c2f2009-01-22 16:59:11 +00002109
2110 len -= l;
2111 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002112 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002113 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002114 rlen = todo;
2115 ret = qemu_ram_ptr_length(raddr, &rlen);
2116 *plen = rlen;
2117 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002118}
2119
Avi Kivityac1970f2012-10-03 16:22:53 +02002120/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002121 * Will also mark the memory as dirty if is_write == 1. access_len gives
2122 * the amount of memory that was actually read or written by the caller.
2123 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002124void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2125 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002126{
2127 if (buffer != bounce.buffer) {
2128 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002129 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002130 while (access_len) {
2131 unsigned l;
2132 l = TARGET_PAGE_SIZE;
2133 if (l > access_len)
2134 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002135 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002136 addr1 += l;
2137 access_len -= l;
2138 }
2139 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002140 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002141 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002142 }
aliguori6d16c2f2009-01-22 16:59:11 +00002143 return;
2144 }
2145 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002146 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002147 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002148 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002149 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002150 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002151}
bellardd0ecd2a2006-04-23 17:14:48 +00002152
Avi Kivitya8170e52012-10-23 12:30:10 +02002153void *cpu_physical_memory_map(hwaddr addr,
2154 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002155 int is_write)
2156{
2157 return address_space_map(&address_space_memory, addr, plen, is_write);
2158}
2159
Avi Kivitya8170e52012-10-23 12:30:10 +02002160void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2161 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002162{
2163 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2164}
2165
bellard8df1cd02005-01-28 22:37:22 +00002166/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002167static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002168 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002169{
bellard8df1cd02005-01-28 22:37:22 +00002170 uint8_t *ptr;
2171 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002172 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002173
Avi Kivityac1970f2012-10-03 16:22:53 +02002174 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002175
Blue Swirlcc5bea62012-04-14 14:56:48 +00002176 if (!(memory_region_is_ram(section->mr) ||
2177 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002178 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002179 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002180 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002181#if defined(TARGET_WORDS_BIGENDIAN)
2182 if (endian == DEVICE_LITTLE_ENDIAN) {
2183 val = bswap32(val);
2184 }
2185#else
2186 if (endian == DEVICE_BIG_ENDIAN) {
2187 val = bswap32(val);
2188 }
2189#endif
bellard8df1cd02005-01-28 22:37:22 +00002190 } else {
2191 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002192 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002193 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002194 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002195 switch (endian) {
2196 case DEVICE_LITTLE_ENDIAN:
2197 val = ldl_le_p(ptr);
2198 break;
2199 case DEVICE_BIG_ENDIAN:
2200 val = ldl_be_p(ptr);
2201 break;
2202 default:
2203 val = ldl_p(ptr);
2204 break;
2205 }
bellard8df1cd02005-01-28 22:37:22 +00002206 }
2207 return val;
2208}
2209
Avi Kivitya8170e52012-10-23 12:30:10 +02002210uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002211{
2212 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2213}
2214
Avi Kivitya8170e52012-10-23 12:30:10 +02002215uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002216{
2217 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2218}
2219
Avi Kivitya8170e52012-10-23 12:30:10 +02002220uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002221{
2222 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2223}
2224
bellard84b7b8e2005-11-28 21:19:04 +00002225/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002226static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002227 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002228{
bellard84b7b8e2005-11-28 21:19:04 +00002229 uint8_t *ptr;
2230 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002231 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002232
Avi Kivityac1970f2012-10-03 16:22:53 +02002233 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002234
Blue Swirlcc5bea62012-04-14 14:56:48 +00002235 if (!(memory_region_is_ram(section->mr) ||
2236 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002237 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002238 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002239
2240 /* XXX This is broken when device endian != cpu endian.
2241 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002242#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002243 val = io_mem_read(section->mr, addr, 4) << 32;
2244 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002245#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002246 val = io_mem_read(section->mr, addr, 4);
2247 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002248#endif
2249 } else {
2250 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002251 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002252 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002253 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002254 switch (endian) {
2255 case DEVICE_LITTLE_ENDIAN:
2256 val = ldq_le_p(ptr);
2257 break;
2258 case DEVICE_BIG_ENDIAN:
2259 val = ldq_be_p(ptr);
2260 break;
2261 default:
2262 val = ldq_p(ptr);
2263 break;
2264 }
bellard84b7b8e2005-11-28 21:19:04 +00002265 }
2266 return val;
2267}
2268
Avi Kivitya8170e52012-10-23 12:30:10 +02002269uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002270{
2271 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2272}
2273
Avi Kivitya8170e52012-10-23 12:30:10 +02002274uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002275{
2276 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2277}
2278
Avi Kivitya8170e52012-10-23 12:30:10 +02002279uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002280{
2281 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2282}
2283
bellardaab33092005-10-30 20:48:42 +00002284/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002285uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002286{
2287 uint8_t val;
2288 cpu_physical_memory_read(addr, &val, 1);
2289 return val;
2290}
2291
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002292/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002293static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002294 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002295{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002296 uint8_t *ptr;
2297 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002298 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002299
Avi Kivityac1970f2012-10-03 16:22:53 +02002300 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002301
Blue Swirlcc5bea62012-04-14 14:56:48 +00002302 if (!(memory_region_is_ram(section->mr) ||
2303 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002304 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002305 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002306 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002307#if defined(TARGET_WORDS_BIGENDIAN)
2308 if (endian == DEVICE_LITTLE_ENDIAN) {
2309 val = bswap16(val);
2310 }
2311#else
2312 if (endian == DEVICE_BIG_ENDIAN) {
2313 val = bswap16(val);
2314 }
2315#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002316 } else {
2317 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002318 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002319 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002320 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002321 switch (endian) {
2322 case DEVICE_LITTLE_ENDIAN:
2323 val = lduw_le_p(ptr);
2324 break;
2325 case DEVICE_BIG_ENDIAN:
2326 val = lduw_be_p(ptr);
2327 break;
2328 default:
2329 val = lduw_p(ptr);
2330 break;
2331 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002332 }
2333 return val;
bellardaab33092005-10-30 20:48:42 +00002334}
2335
Avi Kivitya8170e52012-10-23 12:30:10 +02002336uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002337{
2338 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2339}
2340
Avi Kivitya8170e52012-10-23 12:30:10 +02002341uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002342{
2343 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2344}
2345
Avi Kivitya8170e52012-10-23 12:30:10 +02002346uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002347{
2348 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2349}
2350
bellard8df1cd02005-01-28 22:37:22 +00002351/* warning: addr must be aligned. The ram page is not masked as dirty
2352 and the code inside is not invalidated. It is useful if the dirty
2353 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002354void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002355{
bellard8df1cd02005-01-28 22:37:22 +00002356 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002357 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002358
Avi Kivityac1970f2012-10-03 16:22:53 +02002359 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002360
Avi Kivityf3705d52012-03-08 16:16:34 +02002361 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002362 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002363 if (memory_region_is_ram(section->mr)) {
2364 section = &phys_sections[phys_section_rom];
2365 }
2366 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002367 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002368 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002369 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002370 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002371 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002372 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002373
2374 if (unlikely(in_migration)) {
2375 if (!cpu_physical_memory_is_dirty(addr1)) {
2376 /* invalidate code */
2377 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2378 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002379 cpu_physical_memory_set_dirty_flags(
2380 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002381 }
2382 }
bellard8df1cd02005-01-28 22:37:22 +00002383 }
2384}
2385
Avi Kivitya8170e52012-10-23 12:30:10 +02002386void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002387{
j_mayerbc98a7e2007-04-04 07:55:12 +00002388 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002389 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002390
Avi Kivityac1970f2012-10-03 16:22:53 +02002391 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002392
Avi Kivityf3705d52012-03-08 16:16:34 +02002393 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002394 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002395 if (memory_region_is_ram(section->mr)) {
2396 section = &phys_sections[phys_section_rom];
2397 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002398#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002399 io_mem_write(section->mr, addr, val >> 32, 4);
2400 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002401#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002402 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2403 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002404#endif
2405 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002406 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002407 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002408 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002409 stq_p(ptr, val);
2410 }
2411}
2412
bellard8df1cd02005-01-28 22:37:22 +00002413/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002414static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002415 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002416{
bellard8df1cd02005-01-28 22:37:22 +00002417 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002418 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002419
Avi Kivityac1970f2012-10-03 16:22:53 +02002420 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002421
Avi Kivityf3705d52012-03-08 16:16:34 +02002422 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002423 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002424 if (memory_region_is_ram(section->mr)) {
2425 section = &phys_sections[phys_section_rom];
2426 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002427#if defined(TARGET_WORDS_BIGENDIAN)
2428 if (endian == DEVICE_LITTLE_ENDIAN) {
2429 val = bswap32(val);
2430 }
2431#else
2432 if (endian == DEVICE_BIG_ENDIAN) {
2433 val = bswap32(val);
2434 }
2435#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002436 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002437 } else {
2438 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002439 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002440 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002441 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002442 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002443 switch (endian) {
2444 case DEVICE_LITTLE_ENDIAN:
2445 stl_le_p(ptr, val);
2446 break;
2447 case DEVICE_BIG_ENDIAN:
2448 stl_be_p(ptr, val);
2449 break;
2450 default:
2451 stl_p(ptr, val);
2452 break;
2453 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002454 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002455 }
2456}
2457
Avi Kivitya8170e52012-10-23 12:30:10 +02002458void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002459{
2460 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2461}
2462
Avi Kivitya8170e52012-10-23 12:30:10 +02002463void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002464{
2465 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2466}
2467
Avi Kivitya8170e52012-10-23 12:30:10 +02002468void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002469{
2470 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2471}
2472
bellardaab33092005-10-30 20:48:42 +00002473/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002474void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002475{
2476 uint8_t v = val;
2477 cpu_physical_memory_write(addr, &v, 1);
2478}
2479
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002480/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002481static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002483{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002484 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002485 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002486
Avi Kivityac1970f2012-10-03 16:22:53 +02002487 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002488
Avi Kivityf3705d52012-03-08 16:16:34 +02002489 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002490 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002491 if (memory_region_is_ram(section->mr)) {
2492 section = &phys_sections[phys_section_rom];
2493 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002494#if defined(TARGET_WORDS_BIGENDIAN)
2495 if (endian == DEVICE_LITTLE_ENDIAN) {
2496 val = bswap16(val);
2497 }
2498#else
2499 if (endian == DEVICE_BIG_ENDIAN) {
2500 val = bswap16(val);
2501 }
2502#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002503 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002504 } else {
2505 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002506 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002507 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002508 /* RAM case */
2509 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002510 switch (endian) {
2511 case DEVICE_LITTLE_ENDIAN:
2512 stw_le_p(ptr, val);
2513 break;
2514 case DEVICE_BIG_ENDIAN:
2515 stw_be_p(ptr, val);
2516 break;
2517 default:
2518 stw_p(ptr, val);
2519 break;
2520 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002521 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002522 }
bellardaab33092005-10-30 20:48:42 +00002523}
2524
Avi Kivitya8170e52012-10-23 12:30:10 +02002525void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002526{
2527 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2528}
2529
Avi Kivitya8170e52012-10-23 12:30:10 +02002530void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002531{
2532 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2533}
2534
Avi Kivitya8170e52012-10-23 12:30:10 +02002535void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002536{
2537 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2538}
2539
bellardaab33092005-10-30 20:48:42 +00002540/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002541void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002542{
2543 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002544 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002545}
2546
Avi Kivitya8170e52012-10-23 12:30:10 +02002547void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002548{
2549 val = cpu_to_le64(val);
2550 cpu_physical_memory_write(addr, &val, 8);
2551}
2552
Avi Kivitya8170e52012-10-23 12:30:10 +02002553void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002554{
2555 val = cpu_to_be64(val);
2556 cpu_physical_memory_write(addr, &val, 8);
2557}
2558
aliguori5e2972f2009-03-28 17:51:36 +00002559/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002560int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002561 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002562{
2563 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002564 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002565 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002566
2567 while (len > 0) {
2568 page = addr & TARGET_PAGE_MASK;
2569 phys_addr = cpu_get_phys_page_debug(env, page);
2570 /* if no physical page mapped, return an error */
2571 if (phys_addr == -1)
2572 return -1;
2573 l = (page + TARGET_PAGE_SIZE) - addr;
2574 if (l > len)
2575 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002576 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002577 if (is_write)
2578 cpu_physical_memory_write_rom(phys_addr, buf, l);
2579 else
aliguori5e2972f2009-03-28 17:51:36 +00002580 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002581 len -= l;
2582 buf += l;
2583 addr += l;
2584 }
2585 return 0;
2586}
Paul Brooka68fe892010-03-01 00:08:59 +00002587#endif
bellard13eb76e2004-01-24 15:23:36 +00002588
Blue Swirl8e4a4242013-01-06 18:30:17 +00002589#if !defined(CONFIG_USER_ONLY)
2590
2591/*
2592 * A helper function for the _utterly broken_ virtio device model to find out if
2593 * it's running on a big endian machine. Don't do this at home kids!
2594 */
2595bool virtio_is_big_endian(void);
2596bool virtio_is_big_endian(void)
2597{
2598#if defined(TARGET_WORDS_BIGENDIAN)
2599 return true;
2600#else
2601 return false;
2602#endif
2603}
2604
2605#endif
2606
Wen Congyang76f35532012-05-07 12:04:18 +08002607#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002608bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002609{
2610 MemoryRegionSection *section;
2611
Avi Kivityac1970f2012-10-03 16:22:53 +02002612 section = phys_page_find(address_space_memory.dispatch,
2613 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002614
2615 return !(memory_region_is_ram(section->mr) ||
2616 memory_region_is_romd(section->mr));
2617}
2618#endif