blob: 8a6aac36e347399ffa448fac4cab1fae2c135110 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber259186a2013-01-17 18:51:17 +0100226 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000269{
Andreas Färber9f09e182012-05-03 06:59:07 +0200270 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100271 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100272 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000273 int cpu_index;
274
pbrookc2764712009-03-07 15:24:59 +0000275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
bellard6a00d602005-11-21 23:25:50 +0000278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700282 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000283 cpu_index++;
284 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100285 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100286 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100289#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200290 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100291#endif
bellard6a00d602005-11-21 23:25:50 +0000292 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000299 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100300 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000301#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
bellardfd6ce8f2003-05-14 19:00:11 +0000305}
306
bellard1fddef42005-04-17 19:16:13 +0000307#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000308#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400318}
bellardc27004e2005-01-03 23:35:10 +0000319#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000320#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000321
Paul Brookc527ee82010-03-01 03:31:14 +0000322#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000324
325{
326}
327
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
pbrook6658ffb2007-03-16 23:58:11 +0000334/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000336 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000337{
aliguorib4051332008-11-18 20:14:20 +0000338 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000339 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000340
aliguorib4051332008-11-18 20:14:20 +0000341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500348 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000349
aliguoria1d1bb32008-11-18 20:07:32 +0000350 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000351 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000352 wp->flags = flags;
353
aliguori2dc9f412008-11-18 20:56:59 +0000354 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000355 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000357 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000359
pbrook6658ffb2007-03-16 23:58:11 +0000360 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000365}
366
aliguoria1d1bb32008-11-18 20:07:32 +0000367/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000369 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000370{
aliguorib4051332008-11-18 20:14:20 +0000371 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000372 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000373
Blue Swirl72cf2d42009-09-12 07:36:22 +0000374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000375 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000377 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000378 return 0;
379 }
380 }
aliguoria1d1bb32008-11-18 20:07:32 +0000381 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000382}
383
aliguoria1d1bb32008-11-18 20:07:32 +0000384/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000386{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000388
aliguoria1d1bb32008-11-18 20:07:32 +0000389 tlb_flush_page(env, watchpoint->vaddr);
390
Anthony Liguori7267c092011-08-20 22:09:37 -0500391 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
aliguoric0ce9982008-11-25 22:13:57 +0000397 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000398
Blue Swirl72cf2d42009-09-12 07:36:22 +0000399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000402 }
aliguoria1d1bb32008-11-18 20:07:32 +0000403}
Paul Brookc527ee82010-03-01 03:31:14 +0000404#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000405
406/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000408 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000409{
bellard1fddef42005-04-17 19:16:13 +0000410#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000411 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000412
Anthony Liguori7267c092011-08-20 22:09:37 -0500413 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000414
415 bp->pc = pc;
416 bp->flags = flags;
417
aliguori2dc9f412008-11-18 20:56:59 +0000418 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000419 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000421 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000423
424 breakpoint_invalidate(env, pc);
425
426 if (breakpoint)
427 *breakpoint = bp;
428 return 0;
429#else
430 return -ENOSYS;
431#endif
432}
433
434/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000436{
437#if defined(TARGET_HAS_ICE)
438 CPUBreakpoint *bp;
439
Blue Swirl72cf2d42009-09-12 07:36:22 +0000440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000443 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000444 }
bellard4c3a88a2003-07-26 12:06:08 +0000445 }
aliguoria1d1bb32008-11-18 20:07:32 +0000446 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000447#else
aliguoria1d1bb32008-11-18 20:07:32 +0000448 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000449#endif
450}
451
aliguoria1d1bb32008-11-18 20:07:32 +0000452/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000454{
bellard1fddef42005-04-17 19:16:13 +0000455#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000457
aliguoria1d1bb32008-11-18 20:07:32 +0000458 breakpoint_invalidate(env, breakpoint->pc);
459
Anthony Liguori7267c092011-08-20 22:09:37 -0500460 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000461#endif
462}
463
464/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000466{
467#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000468 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000469
Blue Swirl72cf2d42009-09-12 07:36:22 +0000470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000473 }
bellard4c3a88a2003-07-26 12:06:08 +0000474#endif
475}
476
bellardc33a3462003-07-29 20:50:33 +0000477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100479void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000480{
bellard1fddef42005-04-17 19:16:13 +0000481#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100487 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
bellardc33a3462003-07-29 20:50:33 +0000491 }
492#endif
493}
494
Andreas Färber9349b4f2012-03-14 01:38:32 +0100495void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000496{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100497 CPUState *cpu = ENV_GET_CPU(env);
498
499 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000500 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000501}
502
Andreas Färber9349b4f2012-03-14 01:38:32 +0100503void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000504{
505 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000506 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000507
508 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000509 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000510 fprintf(stderr, "qemu: fatal: ");
511 vfprintf(stderr, fmt, ap);
512 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100513 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000514 if (qemu_log_enabled()) {
515 qemu_log("qemu: fatal: ");
516 qemu_log_vprintf(fmt, ap2);
517 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100518 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000519 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000520 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000521 }
pbrook493ae1f2007-11-23 16:53:59 +0000522 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000523 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200524#if defined(CONFIG_USER_ONLY)
525 {
526 struct sigaction act;
527 sigfillset(&act.sa_mask);
528 act.sa_handler = SIG_DFL;
529 sigaction(SIGABRT, &act, NULL);
530 }
531#endif
bellard75012672003-06-21 13:11:07 +0000532 abort();
533}
534
Andreas Färber9349b4f2012-03-14 01:38:32 +0100535CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000536{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100537 CPUArchState *new_env = cpu_init(env->cpu_model_str);
538 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000539#if defined(TARGET_HAS_ICE)
540 CPUBreakpoint *bp;
541 CPUWatchpoint *wp;
542#endif
543
Andreas Färber9349b4f2012-03-14 01:38:32 +0100544 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000545
Andreas Färber55e5c282012-12-17 06:18:02 +0100546 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000547 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000548
549 /* Clone all break/watchpoints.
550 Note: Once we support ptrace with hw-debug register access, make sure
551 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000552 QTAILQ_INIT(&env->breakpoints);
553 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000554#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000555 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000556 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
557 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000558 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000559 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
560 wp->flags, NULL);
561 }
562#endif
563
thsc5be9f02007-02-28 20:20:53 +0000564 return new_env;
565}
566
bellard01243112004-01-04 15:48:17 +0000567#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200568static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
569 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000570{
Juan Quintelad24981d2012-05-22 00:42:40 +0200571 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000572
bellard1ccde1c2004-02-06 19:46:14 +0000573 /* we modify the TLB cache so that the dirty bit will be set again
574 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200575 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200576 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000577 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200578 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000579 != (end - 1) - start) {
580 abort();
581 }
Blue Swirle5548612012-04-21 13:08:33 +0000582 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200583
584}
585
586/* Note: start and end must be within the same ram block. */
587void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
588 int dirty_flags)
589{
590 uintptr_t length;
591
592 start &= TARGET_PAGE_MASK;
593 end = TARGET_PAGE_ALIGN(end);
594
595 length = end - start;
596 if (length == 0)
597 return;
598 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
599
600 if (tcg_enabled()) {
601 tlb_reset_dirty_range_all(start, end, length);
602 }
bellard1ccde1c2004-02-06 19:46:14 +0000603}
604
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000605static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000606{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200607 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000608 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200609 return ret;
aliguori74576192008-10-06 14:02:03 +0000610}
611
Avi Kivitya8170e52012-10-23 12:30:10 +0200612hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000613 MemoryRegionSection *section,
614 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200615 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000616 int prot,
617 target_ulong *address)
618{
Avi Kivitya8170e52012-10-23 12:30:10 +0200619 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000620 CPUWatchpoint *wp;
621
Blue Swirlcc5bea62012-04-14 14:56:48 +0000622 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000623 /* Normal RAM. */
624 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000625 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000626 if (!section->readonly) {
627 iotlb |= phys_section_notdirty;
628 } else {
629 iotlb |= phys_section_rom;
630 }
631 } else {
632 /* IO handlers are currently passed a physical address.
633 It would be nice to pass an offset from the base address
634 of that region. This would avoid having to special case RAM,
635 and avoid full address decoding in every device.
636 We can't use the high bits of pd for this because
637 IO_MEM_ROMD uses these as a ram address. */
638 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000639 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000640 }
641
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 iotlb = phys_section_watch + paddr;
649 *address |= TLB_MMIO;
650 break;
651 }
652 }
653 }
654
655 return iotlb;
656}
bellard9fa3e852004-01-04 18:06:42 +0000657#endif /* defined(CONFIG_USER_ONLY) */
658
pbrooke2eef172008-06-08 01:09:01 +0000659#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000660
Paul Brookc04b2b72010-03-01 03:31:14 +0000661#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200663 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200664 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000666} subpage_t;
667
Anthony Liguoric227f092009-10-01 16:12:16 -0500668static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200670static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200671static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200672{
Avi Kivity5312bd82012-02-12 18:32:55 +0200673 MemoryRegionSection *section = &phys_sections[section_index];
674 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200675
676 if (mr->subpage) {
677 subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 memory_region_destroy(&subpage->iomem);
679 g_free(subpage);
680 }
681}
682
Avi Kivity4346ae32012-02-10 17:00:01 +0200683static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200684{
685 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200686 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200689 return;
690 }
691
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200693 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200694 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200695 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200696 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200698 }
Avi Kivity54688b12012-02-09 17:34:32 +0200699 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200700 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200701 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200702}
703
Avi Kivityac1970f2012-10-03 16:22:53 +0200704static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200705{
Avi Kivityac1970f2012-10-03 16:22:53 +0200706 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200707 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200708}
709
Avi Kivity5312bd82012-02-12 18:32:55 +0200710static uint16_t phys_section_add(MemoryRegionSection *section)
711{
712 if (phys_sections_nb == phys_sections_nb_alloc) {
713 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
714 phys_sections = g_renew(MemoryRegionSection, phys_sections,
715 phys_sections_nb_alloc);
716 }
717 phys_sections[phys_sections_nb] = *section;
718 return phys_sections_nb++;
719}
720
721static void phys_sections_clear(void)
722{
723 phys_sections_nb = 0;
724}
725
Avi Kivityac1970f2012-10-03 16:22:53 +0200726static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200727{
728 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200729 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200730 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200731 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200732 MemoryRegionSection subsection = {
733 .offset_within_address_space = base,
734 .size = TARGET_PAGE_SIZE,
735 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200736 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737
Avi Kivityf3705d52012-03-08 16:16:34 +0200738 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200739
Avi Kivityf3705d52012-03-08 16:16:34 +0200740 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 subpage = subpage_init(base);
742 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200743 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200744 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200746 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747 }
748 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400749 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200750 subpage_register(subpage, start, end, phys_section_add(section));
751}
752
753
Avi Kivityac1970f2012-10-03 16:22:53 +0200754static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000755{
Avi Kivitya8170e52012-10-23 12:30:10 +0200756 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200757 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200758 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200759 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200760
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200761 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200762
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200763 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200764 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200765 section_index);
bellard33417e72003-08-10 21:47:01 +0000766}
767
Avi Kivityac1970f2012-10-03 16:22:53 +0200768static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200769{
Avi Kivityac1970f2012-10-03 16:22:53 +0200770 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200771 MemoryRegionSection now = *section, remain = *section;
772
773 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
774 || (now.size < TARGET_PAGE_SIZE)) {
775 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
776 - now.offset_within_address_space,
777 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200778 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200779 remain.size -= now.size;
780 remain.offset_within_address_space += now.size;
781 remain.offset_within_region += now.size;
782 }
Tyler Hall69b67642012-07-25 18:45:04 -0400783 while (remain.size >= TARGET_PAGE_SIZE) {
784 now = remain;
785 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
786 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200787 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400788 } else {
789 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200790 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400791 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200792 remain.size -= now.size;
793 remain.offset_within_address_space += now.size;
794 remain.offset_within_region += now.size;
795 }
796 now = remain;
797 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200798 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200799 }
800}
801
Sheng Yang62a27442010-01-26 19:21:16 +0800802void qemu_flush_coalesced_mmio_buffer(void)
803{
804 if (kvm_enabled())
805 kvm_flush_coalesced_mmio_buffer();
806}
807
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700808void qemu_mutex_lock_ramlist(void)
809{
810 qemu_mutex_lock(&ram_list.mutex);
811}
812
813void qemu_mutex_unlock_ramlist(void)
814{
815 qemu_mutex_unlock(&ram_list.mutex);
816}
817
Marcelo Tosattic9027602010-03-01 20:25:08 -0300818#if defined(__linux__) && !defined(TARGET_S390X)
819
820#include <sys/vfs.h>
821
822#define HUGETLBFS_MAGIC 0x958458f6
823
824static long gethugepagesize(const char *path)
825{
826 struct statfs fs;
827 int ret;
828
829 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900830 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300831 } while (ret != 0 && errno == EINTR);
832
833 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900834 perror(path);
835 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300836 }
837
838 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900839 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300840
841 return fs.f_bsize;
842}
843
Alex Williamson04b16652010-07-02 11:13:17 -0600844static void *file_ram_alloc(RAMBlock *block,
845 ram_addr_t memory,
846 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300847{
848 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500849 char *sanitized_name;
850 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300851 void *area;
852 int fd;
853#ifdef MAP_POPULATE
854 int flags;
855#endif
856 unsigned long hpagesize;
857
858 hpagesize = gethugepagesize(path);
859 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900860 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300861 }
862
863 if (memory < hpagesize) {
864 return NULL;
865 }
866
867 if (kvm_enabled() && !kvm_has_sync_mmu()) {
868 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
869 return NULL;
870 }
871
Peter Feiner8ca761f2013-03-04 13:54:25 -0500872 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
873 sanitized_name = g_strdup(block->mr->name);
874 for (c = sanitized_name; *c != '\0'; c++) {
875 if (*c == '/')
876 *c = '_';
877 }
878
879 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
880 sanitized_name);
881 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300882
883 fd = mkstemp(filename);
884 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900885 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100886 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900887 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300888 }
889 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100890 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300891
892 memory = (memory+hpagesize-1) & ~(hpagesize-1);
893
894 /*
895 * ftruncate is not supported by hugetlbfs in older
896 * hosts, so don't bother bailing out on errors.
897 * If anything goes wrong with it under other filesystems,
898 * mmap will fail.
899 */
900 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900901 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300902
903#ifdef MAP_POPULATE
904 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
905 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
906 * to sidestep this quirk.
907 */
908 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
909 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
910#else
911 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
912#endif
913 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900914 perror("file_ram_alloc: can't mmap RAM pages");
915 close(fd);
916 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300917 }
Alex Williamson04b16652010-07-02 11:13:17 -0600918 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300919 return area;
920}
921#endif
922
Alex Williamsond17b5282010-06-25 11:08:38 -0600923static ram_addr_t find_ram_offset(ram_addr_t size)
924{
Alex Williamson04b16652010-07-02 11:13:17 -0600925 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600926 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600927
Paolo Bonzinia3161032012-11-14 15:54:48 +0100928 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600929 return 0;
930
Paolo Bonzinia3161032012-11-14 15:54:48 +0100931 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000932 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600933
934 end = block->offset + block->length;
935
Paolo Bonzinia3161032012-11-14 15:54:48 +0100936 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600937 if (next_block->offset >= end) {
938 next = MIN(next, next_block->offset);
939 }
940 }
941 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600942 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600943 mingap = next - end;
944 }
945 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600946
947 if (offset == RAM_ADDR_MAX) {
948 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
949 (uint64_t)size);
950 abort();
951 }
952
Alex Williamson04b16652010-07-02 11:13:17 -0600953 return offset;
954}
955
Juan Quintela652d7ec2012-07-20 10:37:54 +0200956ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600957{
Alex Williamsond17b5282010-06-25 11:08:38 -0600958 RAMBlock *block;
959 ram_addr_t last = 0;
960
Paolo Bonzinia3161032012-11-14 15:54:48 +0100961 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600962 last = MAX(last, block->offset + block->length);
963
964 return last;
965}
966
Jason Baronddb97f12012-08-02 15:44:16 -0400967static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
968{
969 int ret;
970 QemuOpts *machine_opts;
971
972 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
973 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
974 if (machine_opts &&
975 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
976 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
977 if (ret) {
978 perror("qemu_madvise");
979 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
980 "but dump_guest_core=off specified\n");
981 }
982 }
983}
984
Avi Kivityc5705a72011-12-20 15:59:12 +0200985void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600986{
987 RAMBlock *new_block, *block;
988
Avi Kivityc5705a72011-12-20 15:59:12 +0200989 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100990 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200991 if (block->offset == addr) {
992 new_block = block;
993 break;
994 }
995 }
996 assert(new_block);
997 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600998
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600999 if (dev) {
1000 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001001 if (id) {
1002 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001003 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001004 }
1005 }
1006 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1007
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001008 /* This assumes the iothread lock is taken here too. */
1009 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001010 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001011 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001012 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1013 new_block->idstr);
1014 abort();
1015 }
1016 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001017 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001018}
1019
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001020static int memory_try_enable_merging(void *addr, size_t len)
1021{
1022 QemuOpts *opts;
1023
1024 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1025 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1026 /* disabled by the user */
1027 return 0;
1028 }
1029
1030 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1031}
1032
Avi Kivityc5705a72011-12-20 15:59:12 +02001033ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1034 MemoryRegion *mr)
1035{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001036 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001037
1038 size = TARGET_PAGE_ALIGN(size);
1039 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001040
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001041 /* This assumes the iothread lock is taken here too. */
1042 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001043 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001044 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001045 if (host) {
1046 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001047 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001048 } else {
1049 if (mem_path) {
1050#if defined (__linux__) && !defined(TARGET_S390X)
1051 new_block->host = file_ram_alloc(new_block, size, mem_path);
1052 if (!new_block->host) {
1053 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001054 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001055 }
1056#else
1057 fprintf(stderr, "-mem-path option unsupported\n");
1058 exit(1);
1059#endif
1060 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001061 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001062 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001063 } else if (kvm_enabled()) {
1064 /* some s390/kvm configurations have special constraints */
1065 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001066 } else {
1067 new_block->host = qemu_vmalloc(size);
1068 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001069 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001070 }
1071 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001072 new_block->length = size;
1073
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001074 /* Keep the list sorted from biggest to smallest block. */
1075 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1076 if (block->length < new_block->length) {
1077 break;
1078 }
1079 }
1080 if (block) {
1081 QTAILQ_INSERT_BEFORE(block, new_block, next);
1082 } else {
1083 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1084 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001085 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001086
Umesh Deshpandef798b072011-08-18 11:41:17 -07001087 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001088 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001089
Anthony Liguori7267c092011-08-20 22:09:37 -05001090 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001091 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001092 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1093 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001094 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001095
Jason Baronddb97f12012-08-02 15:44:16 -04001096 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001097 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001098
Cam Macdonell84b89d72010-07-26 18:10:57 -06001099 if (kvm_enabled())
1100 kvm_setup_guest_memory(new_block->host, size);
1101
1102 return new_block->offset;
1103}
1104
Avi Kivityc5705a72011-12-20 15:59:12 +02001105ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001106{
Avi Kivityc5705a72011-12-20 15:59:12 +02001107 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001108}
bellarde9a1ab12007-02-08 23:08:38 +00001109
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001110void qemu_ram_free_from_ptr(ram_addr_t addr)
1111{
1112 RAMBlock *block;
1113
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001114 /* This assumes the iothread lock is taken here too. */
1115 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001116 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001117 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001118 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001119 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001120 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001121 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001122 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001123 }
1124 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001125 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001126}
1127
Anthony Liguoric227f092009-10-01 16:12:16 -05001128void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001129{
Alex Williamson04b16652010-07-02 11:13:17 -06001130 RAMBlock *block;
1131
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001132 /* This assumes the iothread lock is taken here too. */
1133 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001134 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001135 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001136 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001137 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001138 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001139 if (block->flags & RAM_PREALLOC_MASK) {
1140 ;
1141 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001142#if defined (__linux__) && !defined(TARGET_S390X)
1143 if (block->fd) {
1144 munmap(block->host, block->length);
1145 close(block->fd);
1146 } else {
1147 qemu_vfree(block->host);
1148 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001149#else
1150 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001151#endif
1152 } else {
1153#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1154 munmap(block->host, block->length);
1155#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001156 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001157 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001158 } else {
1159 qemu_vfree(block->host);
1160 }
Alex Williamson04b16652010-07-02 11:13:17 -06001161#endif
1162 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001163 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001164 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001165 }
1166 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001167 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001168
bellarde9a1ab12007-02-08 23:08:38 +00001169}
1170
Huang Yingcd19cfa2011-03-02 08:56:19 +01001171#ifndef _WIN32
1172void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1173{
1174 RAMBlock *block;
1175 ram_addr_t offset;
1176 int flags;
1177 void *area, *vaddr;
1178
Paolo Bonzinia3161032012-11-14 15:54:48 +01001179 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001180 offset = addr - block->offset;
1181 if (offset < block->length) {
1182 vaddr = block->host + offset;
1183 if (block->flags & RAM_PREALLOC_MASK) {
1184 ;
1185 } else {
1186 flags = MAP_FIXED;
1187 munmap(vaddr, length);
1188 if (mem_path) {
1189#if defined(__linux__) && !defined(TARGET_S390X)
1190 if (block->fd) {
1191#ifdef MAP_POPULATE
1192 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1193 MAP_PRIVATE;
1194#else
1195 flags |= MAP_PRIVATE;
1196#endif
1197 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1198 flags, block->fd, offset);
1199 } else {
1200 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1201 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1202 flags, -1, 0);
1203 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001204#else
1205 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001206#endif
1207 } else {
1208#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1209 flags |= MAP_SHARED | MAP_ANONYMOUS;
1210 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1211 flags, -1, 0);
1212#else
1213 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1214 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1215 flags, -1, 0);
1216#endif
1217 }
1218 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001219 fprintf(stderr, "Could not remap addr: "
1220 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001221 length, addr);
1222 exit(1);
1223 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001224 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001225 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001226 }
1227 return;
1228 }
1229 }
1230}
1231#endif /* !_WIN32 */
1232
pbrookdc828ca2009-04-09 22:21:07 +00001233/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001234 With the exception of the softmmu code in this file, this should
1235 only be used for local memory (e.g. video ram) that the device owns,
1236 and knows it isn't going to access beyond the end of the block.
1237
1238 It should not be used for general purpose DMA.
1239 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1240 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001241void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001242{
pbrook94a6b542009-04-11 17:15:54 +00001243 RAMBlock *block;
1244
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001245 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001246 block = ram_list.mru_block;
1247 if (block && addr - block->offset < block->length) {
1248 goto found;
1249 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001250 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001251 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001252 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001253 }
pbrook94a6b542009-04-11 17:15:54 +00001254 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001255
1256 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1257 abort();
1258
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001259found:
1260 ram_list.mru_block = block;
1261 if (xen_enabled()) {
1262 /* We need to check if the requested address is in the RAM
1263 * because we don't want to map the entire memory in QEMU.
1264 * In that case just map until the end of the page.
1265 */
1266 if (block->offset == 0) {
1267 return xen_map_cache(addr, 0, 0);
1268 } else if (block->host == NULL) {
1269 block->host =
1270 xen_map_cache(block->offset, block->length, 1);
1271 }
1272 }
1273 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001274}
1275
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001276/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1277 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1278 *
1279 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001280 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001281static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001282{
1283 RAMBlock *block;
1284
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001285 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001286 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001287 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001288 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001289 /* We need to check if the requested address is in the RAM
1290 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001291 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001292 */
1293 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001294 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001295 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001296 block->host =
1297 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001298 }
1299 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001300 return block->host + (addr - block->offset);
1301 }
1302 }
1303
1304 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1305 abort();
1306
1307 return NULL;
1308}
1309
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001310/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1311 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001312static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001313{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001314 if (*size == 0) {
1315 return NULL;
1316 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001317 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001318 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001319 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001320 RAMBlock *block;
1321
Paolo Bonzinia3161032012-11-14 15:54:48 +01001322 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001323 if (addr - block->offset < block->length) {
1324 if (addr - block->offset + *size > block->length)
1325 *size = block->length - addr + block->offset;
1326 return block->host + (addr - block->offset);
1327 }
1328 }
1329
1330 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1331 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001332 }
1333}
1334
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001335void qemu_put_ram_ptr(void *addr)
1336{
1337 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001338}
1339
Marcelo Tosattie8902612010-10-11 15:31:19 -03001340int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001341{
pbrook94a6b542009-04-11 17:15:54 +00001342 RAMBlock *block;
1343 uint8_t *host = ptr;
1344
Jan Kiszka868bb332011-06-21 22:59:09 +02001345 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001346 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001347 return 0;
1348 }
1349
Paolo Bonzinia3161032012-11-14 15:54:48 +01001350 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001351 /* This case append when the block is not mapped. */
1352 if (block->host == NULL) {
1353 continue;
1354 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001355 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001356 *ram_addr = block->offset + (host - block->host);
1357 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001358 }
pbrook94a6b542009-04-11 17:15:54 +00001359 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001360
Marcelo Tosattie8902612010-10-11 15:31:19 -03001361 return -1;
1362}
Alex Williamsonf471a172010-06-11 11:11:42 -06001363
Marcelo Tosattie8902612010-10-11 15:31:19 -03001364/* Some of the softmmu routines need to translate from a host pointer
1365 (typically a TLB entry) back to a ram offset. */
1366ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1367{
1368 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001369
Marcelo Tosattie8902612010-10-11 15:31:19 -03001370 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1371 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1372 abort();
1373 }
1374 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001375}
1376
Avi Kivitya8170e52012-10-23 12:30:10 +02001377static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001378 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001379{
pbrook67d3b952006-12-18 05:03:52 +00001380#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001381 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001382#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001383#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001384 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001385#endif
1386 return 0;
1387}
1388
Avi Kivitya8170e52012-10-23 12:30:10 +02001389static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001390 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001391{
1392#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001393 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001394#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001395#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001396 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001397#endif
1398}
1399
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001400static const MemoryRegionOps unassigned_mem_ops = {
1401 .read = unassigned_mem_read,
1402 .write = unassigned_mem_write,
1403 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001404};
1405
Avi Kivitya8170e52012-10-23 12:30:10 +02001406static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001407 unsigned size)
1408{
1409 abort();
1410}
1411
Avi Kivitya8170e52012-10-23 12:30:10 +02001412static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001413 uint64_t value, unsigned size)
1414{
1415 abort();
1416}
1417
1418static const MemoryRegionOps error_mem_ops = {
1419 .read = error_mem_read,
1420 .write = error_mem_write,
1421 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001422};
1423
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001424static const MemoryRegionOps rom_mem_ops = {
1425 .read = error_mem_read,
1426 .write = unassigned_mem_write,
1427 .endianness = DEVICE_NATIVE_ENDIAN,
1428};
1429
Avi Kivitya8170e52012-10-23 12:30:10 +02001430static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001431 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001432{
bellard3a7d9292005-08-21 09:26:42 +00001433 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001434 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001435 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1436#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001437 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001438 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001439#endif
1440 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001441 switch (size) {
1442 case 1:
1443 stb_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 case 2:
1446 stw_p(qemu_get_ram_ptr(ram_addr), val);
1447 break;
1448 case 4:
1449 stl_p(qemu_get_ram_ptr(ram_addr), val);
1450 break;
1451 default:
1452 abort();
1453 }
bellardf23db162005-08-21 19:12:28 +00001454 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001455 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001456 /* we remove the notdirty callback only if the code has been
1457 flushed */
1458 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001459 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001460}
1461
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001462static const MemoryRegionOps notdirty_mem_ops = {
1463 .read = error_mem_read,
1464 .write = notdirty_mem_write,
1465 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001466};
1467
pbrook0f459d12008-06-09 00:20:13 +00001468/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001469static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001470{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001471 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001472 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001473 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001474 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001475 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001476
aliguori06d55cc2008-11-18 20:24:06 +00001477 if (env->watchpoint_hit) {
1478 /* We re-entered the check after replacing the TB. Now raise
1479 * the debug interrupt so that is will trigger after the
1480 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001481 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001482 return;
1483 }
pbrook2e70f6e2008-06-29 01:03:05 +00001484 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001485 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001486 if ((vaddr == (wp->vaddr & len_mask) ||
1487 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001488 wp->flags |= BP_WATCHPOINT_HIT;
1489 if (!env->watchpoint_hit) {
1490 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001491 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001492 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1493 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001494 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001495 } else {
1496 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1497 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001498 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001499 }
aliguori06d55cc2008-11-18 20:24:06 +00001500 }
aliguori6e140f22008-11-18 20:37:55 +00001501 } else {
1502 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001503 }
1504 }
1505}
1506
pbrook6658ffb2007-03-16 23:58:11 +00001507/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1508 so these check for a hit then pass through to the normal out-of-line
1509 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001510static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001511 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001512{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1514 switch (size) {
1515 case 1: return ldub_phys(addr);
1516 case 2: return lduw_phys(addr);
1517 case 4: return ldl_phys(addr);
1518 default: abort();
1519 }
pbrook6658ffb2007-03-16 23:58:11 +00001520}
1521
Avi Kivitya8170e52012-10-23 12:30:10 +02001522static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001523 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001524{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001525 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1526 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001527 case 1:
1528 stb_phys(addr, val);
1529 break;
1530 case 2:
1531 stw_phys(addr, val);
1532 break;
1533 case 4:
1534 stl_phys(addr, val);
1535 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001536 default: abort();
1537 }
pbrook6658ffb2007-03-16 23:58:11 +00001538}
1539
Avi Kivity1ec9b902012-01-02 12:47:48 +02001540static const MemoryRegionOps watch_mem_ops = {
1541 .read = watch_mem_read,
1542 .write = watch_mem_write,
1543 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001544};
pbrook6658ffb2007-03-16 23:58:11 +00001545
Avi Kivitya8170e52012-10-23 12:30:10 +02001546static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001547 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001548{
Avi Kivity70c68e42012-01-02 12:32:48 +02001549 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001550 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001551 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001552#if defined(DEBUG_SUBPAGE)
1553 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1554 mmio, len, addr, idx);
1555#endif
blueswir1db7b5422007-05-26 17:36:03 +00001556
Avi Kivity5312bd82012-02-12 18:32:55 +02001557 section = &phys_sections[mmio->sub_section[idx]];
1558 addr += mmio->base;
1559 addr -= section->offset_within_address_space;
1560 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001561 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001562}
1563
Avi Kivitya8170e52012-10-23 12:30:10 +02001564static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001565 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001566{
Avi Kivity70c68e42012-01-02 12:32:48 +02001567 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001568 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001569 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001570#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001571 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1572 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001573 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001574#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001575
Avi Kivity5312bd82012-02-12 18:32:55 +02001576 section = &phys_sections[mmio->sub_section[idx]];
1577 addr += mmio->base;
1578 addr -= section->offset_within_address_space;
1579 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001580 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001581}
1582
Avi Kivity70c68e42012-01-02 12:32:48 +02001583static const MemoryRegionOps subpage_ops = {
1584 .read = subpage_read,
1585 .write = subpage_write,
1586 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001587};
1588
Avi Kivitya8170e52012-10-23 12:30:10 +02001589static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001590 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001591{
1592 ram_addr_t raddr = addr;
1593 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001594 switch (size) {
1595 case 1: return ldub_p(ptr);
1596 case 2: return lduw_p(ptr);
1597 case 4: return ldl_p(ptr);
1598 default: abort();
1599 }
Andreas Färber56384e82011-11-30 16:26:21 +01001600}
1601
Avi Kivitya8170e52012-10-23 12:30:10 +02001602static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001603 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001604{
1605 ram_addr_t raddr = addr;
1606 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001607 switch (size) {
1608 case 1: return stb_p(ptr, value);
1609 case 2: return stw_p(ptr, value);
1610 case 4: return stl_p(ptr, value);
1611 default: abort();
1612 }
Andreas Färber56384e82011-11-30 16:26:21 +01001613}
1614
Avi Kivityde712f92012-01-02 12:41:07 +02001615static const MemoryRegionOps subpage_ram_ops = {
1616 .read = subpage_ram_read,
1617 .write = subpage_ram_write,
1618 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001619};
1620
Anthony Liguoric227f092009-10-01 16:12:16 -05001621static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001622 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001623{
1624 int idx, eidx;
1625
1626 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1627 return -1;
1628 idx = SUBPAGE_IDX(start);
1629 eidx = SUBPAGE_IDX(end);
1630#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001631 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001632 mmio, start, end, idx, eidx, memory);
1633#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001634 if (memory_region_is_ram(phys_sections[section].mr)) {
1635 MemoryRegionSection new_section = phys_sections[section];
1636 new_section.mr = &io_mem_subpage_ram;
1637 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001638 }
blueswir1db7b5422007-05-26 17:36:03 +00001639 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001640 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001641 }
1642
1643 return 0;
1644}
1645
Avi Kivitya8170e52012-10-23 12:30:10 +02001646static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001647{
Anthony Liguoric227f092009-10-01 16:12:16 -05001648 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001649
Anthony Liguori7267c092011-08-20 22:09:37 -05001650 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001651
1652 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001653 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1654 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001655 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001656#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001657 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1658 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001659#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001660 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001661
1662 return mmio;
1663}
1664
Avi Kivity5312bd82012-02-12 18:32:55 +02001665static uint16_t dummy_section(MemoryRegion *mr)
1666{
1667 MemoryRegionSection section = {
1668 .mr = mr,
1669 .offset_within_address_space = 0,
1670 .offset_within_region = 0,
1671 .size = UINT64_MAX,
1672 };
1673
1674 return phys_section_add(&section);
1675}
1676
Avi Kivitya8170e52012-10-23 12:30:10 +02001677MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001678{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001679 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001680}
1681
Avi Kivitye9179ce2009-06-14 11:38:52 +03001682static void io_mem_init(void)
1683{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001684 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001685 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1686 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1687 "unassigned", UINT64_MAX);
1688 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1689 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001690 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1691 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001692 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1693 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001694}
1695
Avi Kivityac1970f2012-10-03 16:22:53 +02001696static void mem_begin(MemoryListener *listener)
1697{
1698 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1699
1700 destroy_all_mappings(d);
1701 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1702}
1703
Avi Kivity50c1e142012-02-08 21:36:02 +02001704static void core_begin(MemoryListener *listener)
1705{
Avi Kivity5312bd82012-02-12 18:32:55 +02001706 phys_sections_clear();
1707 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001708 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1709 phys_section_rom = dummy_section(&io_mem_rom);
1710 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001711}
1712
Avi Kivity1d711482012-10-02 18:54:45 +02001713static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001714{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001715 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001716
1717 /* since each CPU stores ram addresses in its TLB cache, we must
1718 reset the modified entries */
1719 /* XXX: slow ! */
1720 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1721 tlb_flush(env, 1);
1722 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001723}
1724
Avi Kivity93632742012-02-08 16:54:16 +02001725static void core_log_global_start(MemoryListener *listener)
1726{
1727 cpu_physical_memory_set_dirty_tracking(1);
1728}
1729
1730static void core_log_global_stop(MemoryListener *listener)
1731{
1732 cpu_physical_memory_set_dirty_tracking(0);
1733}
1734
Avi Kivity4855d412012-02-08 21:16:05 +02001735static void io_region_add(MemoryListener *listener,
1736 MemoryRegionSection *section)
1737{
Avi Kivitya2d33522012-03-05 17:40:12 +02001738 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1739
1740 mrio->mr = section->mr;
1741 mrio->offset = section->offset_within_region;
1742 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001743 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001744 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001745}
1746
1747static void io_region_del(MemoryListener *listener,
1748 MemoryRegionSection *section)
1749{
1750 isa_unassign_ioport(section->offset_within_address_space, section->size);
1751}
1752
Avi Kivity93632742012-02-08 16:54:16 +02001753static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001754 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001755 .log_global_start = core_log_global_start,
1756 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001757 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001758};
1759
Avi Kivity4855d412012-02-08 21:16:05 +02001760static MemoryListener io_memory_listener = {
1761 .region_add = io_region_add,
1762 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001763 .priority = 0,
1764};
1765
Avi Kivity1d711482012-10-02 18:54:45 +02001766static MemoryListener tcg_memory_listener = {
1767 .commit = tcg_commit,
1768};
1769
Avi Kivityac1970f2012-10-03 16:22:53 +02001770void address_space_init_dispatch(AddressSpace *as)
1771{
1772 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1773
1774 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1775 d->listener = (MemoryListener) {
1776 .begin = mem_begin,
1777 .region_add = mem_add,
1778 .region_nop = mem_add,
1779 .priority = 0,
1780 };
1781 as->dispatch = d;
1782 memory_listener_register(&d->listener, as);
1783}
1784
Avi Kivity83f3c252012-10-07 12:59:55 +02001785void address_space_destroy_dispatch(AddressSpace *as)
1786{
1787 AddressSpaceDispatch *d = as->dispatch;
1788
1789 memory_listener_unregister(&d->listener);
1790 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1791 g_free(d);
1792 as->dispatch = NULL;
1793}
1794
Avi Kivity62152b82011-07-26 14:26:14 +03001795static void memory_map_init(void)
1796{
Anthony Liguori7267c092011-08-20 22:09:37 -05001797 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001798 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001799 address_space_init(&address_space_memory, system_memory);
1800 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001801
Anthony Liguori7267c092011-08-20 22:09:37 -05001802 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001803 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001804 address_space_init(&address_space_io, system_io);
1805 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001806
Avi Kivityf6790af2012-10-02 20:13:51 +02001807 memory_listener_register(&core_memory_listener, &address_space_memory);
1808 memory_listener_register(&io_memory_listener, &address_space_io);
1809 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001810
1811 dma_context_init(&dma_context_memory, &address_space_memory,
1812 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001813}
1814
1815MemoryRegion *get_system_memory(void)
1816{
1817 return system_memory;
1818}
1819
Avi Kivity309cb472011-08-08 16:09:03 +03001820MemoryRegion *get_system_io(void)
1821{
1822 return system_io;
1823}
1824
pbrooke2eef172008-06-08 01:09:01 +00001825#endif /* !defined(CONFIG_USER_ONLY) */
1826
bellard13eb76e2004-01-24 15:23:36 +00001827/* physical memory access (slow version, mainly for debug) */
1828#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001829int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001830 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001831{
1832 int l, flags;
1833 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001834 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001835
1836 while (len > 0) {
1837 page = addr & TARGET_PAGE_MASK;
1838 l = (page + TARGET_PAGE_SIZE) - addr;
1839 if (l > len)
1840 l = len;
1841 flags = page_get_flags(page);
1842 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001844 if (is_write) {
1845 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001846 return -1;
bellard579a97f2007-11-11 14:26:47 +00001847 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001848 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001849 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001850 memcpy(p, buf, l);
1851 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001852 } else {
1853 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001854 return -1;
bellard579a97f2007-11-11 14:26:47 +00001855 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001856 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001857 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001858 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001859 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001860 }
1861 len -= l;
1862 buf += l;
1863 addr += l;
1864 }
Paul Brooka68fe892010-03-01 00:08:59 +00001865 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001866}
bellard8df1cd02005-01-28 22:37:22 +00001867
bellard13eb76e2004-01-24 15:23:36 +00001868#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001869
Avi Kivitya8170e52012-10-23 12:30:10 +02001870static void invalidate_and_set_dirty(hwaddr addr,
1871 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001872{
1873 if (!cpu_physical_memory_is_dirty(addr)) {
1874 /* invalidate code */
1875 tb_invalidate_phys_page_range(addr, addr + length, 0);
1876 /* set dirty bit */
1877 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1878 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001879 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001880}
1881
Avi Kivitya8170e52012-10-23 12:30:10 +02001882void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001883 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001884{
Avi Kivityac1970f2012-10-03 16:22:53 +02001885 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001886 int l;
bellard13eb76e2004-01-24 15:23:36 +00001887 uint8_t *ptr;
1888 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001889 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001890 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001891
bellard13eb76e2004-01-24 15:23:36 +00001892 while (len > 0) {
1893 page = addr & TARGET_PAGE_MASK;
1894 l = (page + TARGET_PAGE_SIZE) - addr;
1895 if (l > len)
1896 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001897 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001898
bellard13eb76e2004-01-24 15:23:36 +00001899 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001900 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001901 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001902 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001903 /* XXX: could force cpu_single_env to NULL to avoid
1904 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001905 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001906 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001907 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001908 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001909 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001910 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001911 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001912 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001913 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001914 l = 2;
1915 } else {
bellard1c213d12005-09-03 10:49:04 +00001916 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001917 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001918 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001919 l = 1;
1920 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001921 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001922 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001923 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001924 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001925 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001926 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001927 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001928 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001929 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001930 }
1931 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001932 if (!(memory_region_is_ram(section->mr) ||
1933 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001934 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001935 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001936 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001937 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001938 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001939 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001940 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001941 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001942 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001943 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001944 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001945 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001946 l = 2;
1947 } else {
bellard1c213d12005-09-03 10:49:04 +00001948 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001949 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001950 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001951 l = 1;
1952 }
1953 } else {
1954 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001955 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001956 + memory_region_section_addr(section,
1957 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001958 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001959 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001960 }
1961 }
1962 len -= l;
1963 buf += l;
1964 addr += l;
1965 }
1966}
bellard8df1cd02005-01-28 22:37:22 +00001967
Avi Kivitya8170e52012-10-23 12:30:10 +02001968void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001969 const uint8_t *buf, int len)
1970{
1971 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1972}
1973
1974/**
1975 * address_space_read: read from an address space.
1976 *
1977 * @as: #AddressSpace to be accessed
1978 * @addr: address within that address space
1979 * @buf: buffer with the data transferred
1980 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001981void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001982{
1983 address_space_rw(as, addr, buf, len, false);
1984}
1985
1986
Avi Kivitya8170e52012-10-23 12:30:10 +02001987void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001988 int len, int is_write)
1989{
1990 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1991}
1992
bellardd0ecd2a2006-04-23 17:14:48 +00001993/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001994void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001995 const uint8_t *buf, int len)
1996{
Avi Kivityac1970f2012-10-03 16:22:53 +02001997 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001998 int l;
1999 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02002000 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002001 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002002
bellardd0ecd2a2006-04-23 17:14:48 +00002003 while (len > 0) {
2004 page = addr & TARGET_PAGE_MASK;
2005 l = (page + TARGET_PAGE_SIZE) - addr;
2006 if (l > len)
2007 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002008 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002009
Blue Swirlcc5bea62012-04-14 14:56:48 +00002010 if (!(memory_region_is_ram(section->mr) ||
2011 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002012 /* do nothing */
2013 } else {
2014 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002015 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002016 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002017 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002018 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002019 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002020 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002021 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002022 }
2023 len -= l;
2024 buf += l;
2025 addr += l;
2026 }
2027}
2028
aliguori6d16c2f2009-01-22 16:59:11 +00002029typedef struct {
2030 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002031 hwaddr addr;
2032 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002033} BounceBuffer;
2034
2035static BounceBuffer bounce;
2036
aliguoriba223c22009-01-22 16:59:16 +00002037typedef struct MapClient {
2038 void *opaque;
2039 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002040 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002041} MapClient;
2042
Blue Swirl72cf2d42009-09-12 07:36:22 +00002043static QLIST_HEAD(map_client_list, MapClient) map_client_list
2044 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002045
2046void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2047{
Anthony Liguori7267c092011-08-20 22:09:37 -05002048 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002049
2050 client->opaque = opaque;
2051 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002052 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002053 return client;
2054}
2055
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002056static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002057{
2058 MapClient *client = (MapClient *)_client;
2059
Blue Swirl72cf2d42009-09-12 07:36:22 +00002060 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002061 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002062}
2063
2064static void cpu_notify_map_clients(void)
2065{
2066 MapClient *client;
2067
Blue Swirl72cf2d42009-09-12 07:36:22 +00002068 while (!QLIST_EMPTY(&map_client_list)) {
2069 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002070 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002071 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002072 }
2073}
2074
aliguori6d16c2f2009-01-22 16:59:11 +00002075/* Map a physical memory region into a host virtual address.
2076 * May map a subset of the requested range, given by and returned in *plen.
2077 * May return NULL if resources needed to perform the mapping are exhausted.
2078 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002079 * Use cpu_register_map_client() to know when retrying the map operation is
2080 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002081 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002082void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002083 hwaddr addr,
2084 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002085 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002086{
Avi Kivityac1970f2012-10-03 16:22:53 +02002087 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002088 hwaddr len = *plen;
2089 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002090 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002091 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002092 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002093 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002094 ram_addr_t rlen;
2095 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002096
2097 while (len > 0) {
2098 page = addr & TARGET_PAGE_MASK;
2099 l = (page + TARGET_PAGE_SIZE) - addr;
2100 if (l > len)
2101 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002102 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002103
Avi Kivityf3705d52012-03-08 16:16:34 +02002104 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002105 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002106 break;
2107 }
2108 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2109 bounce.addr = addr;
2110 bounce.len = l;
2111 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002112 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002113 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002114
2115 *plen = l;
2116 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002117 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002118 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002119 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002120 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002121 }
aliguori6d16c2f2009-01-22 16:59:11 +00002122
2123 len -= l;
2124 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002125 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002126 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002127 rlen = todo;
2128 ret = qemu_ram_ptr_length(raddr, &rlen);
2129 *plen = rlen;
2130 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002131}
2132
Avi Kivityac1970f2012-10-03 16:22:53 +02002133/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002134 * Will also mark the memory as dirty if is_write == 1. access_len gives
2135 * the amount of memory that was actually read or written by the caller.
2136 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002137void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2138 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002139{
2140 if (buffer != bounce.buffer) {
2141 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002142 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002143 while (access_len) {
2144 unsigned l;
2145 l = TARGET_PAGE_SIZE;
2146 if (l > access_len)
2147 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002148 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002149 addr1 += l;
2150 access_len -= l;
2151 }
2152 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002153 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002154 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002155 }
aliguori6d16c2f2009-01-22 16:59:11 +00002156 return;
2157 }
2158 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002159 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002160 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002161 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002162 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002163 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002164}
bellardd0ecd2a2006-04-23 17:14:48 +00002165
Avi Kivitya8170e52012-10-23 12:30:10 +02002166void *cpu_physical_memory_map(hwaddr addr,
2167 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002168 int is_write)
2169{
2170 return address_space_map(&address_space_memory, addr, plen, is_write);
2171}
2172
Avi Kivitya8170e52012-10-23 12:30:10 +02002173void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2174 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002175{
2176 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2177}
2178
bellard8df1cd02005-01-28 22:37:22 +00002179/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002180static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002181 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002182{
bellard8df1cd02005-01-28 22:37:22 +00002183 uint8_t *ptr;
2184 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002185 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002186
Avi Kivityac1970f2012-10-03 16:22:53 +02002187 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002188
Blue Swirlcc5bea62012-04-14 14:56:48 +00002189 if (!(memory_region_is_ram(section->mr) ||
2190 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002191 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002192 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002193 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002194#if defined(TARGET_WORDS_BIGENDIAN)
2195 if (endian == DEVICE_LITTLE_ENDIAN) {
2196 val = bswap32(val);
2197 }
2198#else
2199 if (endian == DEVICE_BIG_ENDIAN) {
2200 val = bswap32(val);
2201 }
2202#endif
bellard8df1cd02005-01-28 22:37:22 +00002203 } else {
2204 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002205 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002206 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002207 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002208 switch (endian) {
2209 case DEVICE_LITTLE_ENDIAN:
2210 val = ldl_le_p(ptr);
2211 break;
2212 case DEVICE_BIG_ENDIAN:
2213 val = ldl_be_p(ptr);
2214 break;
2215 default:
2216 val = ldl_p(ptr);
2217 break;
2218 }
bellard8df1cd02005-01-28 22:37:22 +00002219 }
2220 return val;
2221}
2222
Avi Kivitya8170e52012-10-23 12:30:10 +02002223uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002224{
2225 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2226}
2227
Avi Kivitya8170e52012-10-23 12:30:10 +02002228uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002229{
2230 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2231}
2232
Avi Kivitya8170e52012-10-23 12:30:10 +02002233uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002234{
2235 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2236}
2237
bellard84b7b8e2005-11-28 21:19:04 +00002238/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002239static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002240 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002241{
bellard84b7b8e2005-11-28 21:19:04 +00002242 uint8_t *ptr;
2243 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002244 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002245
Avi Kivityac1970f2012-10-03 16:22:53 +02002246 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002247
Blue Swirlcc5bea62012-04-14 14:56:48 +00002248 if (!(memory_region_is_ram(section->mr) ||
2249 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002250 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002251 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002252
2253 /* XXX This is broken when device endian != cpu endian.
2254 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002255#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002256 val = io_mem_read(section->mr, addr, 4) << 32;
2257 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002258#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002259 val = io_mem_read(section->mr, addr, 4);
2260 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002261#endif
2262 } else {
2263 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002264 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002265 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002266 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002267 switch (endian) {
2268 case DEVICE_LITTLE_ENDIAN:
2269 val = ldq_le_p(ptr);
2270 break;
2271 case DEVICE_BIG_ENDIAN:
2272 val = ldq_be_p(ptr);
2273 break;
2274 default:
2275 val = ldq_p(ptr);
2276 break;
2277 }
bellard84b7b8e2005-11-28 21:19:04 +00002278 }
2279 return val;
2280}
2281
Avi Kivitya8170e52012-10-23 12:30:10 +02002282uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002283{
2284 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2285}
2286
Avi Kivitya8170e52012-10-23 12:30:10 +02002287uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002288{
2289 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2290}
2291
Avi Kivitya8170e52012-10-23 12:30:10 +02002292uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002293{
2294 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2295}
2296
bellardaab33092005-10-30 20:48:42 +00002297/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002298uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002299{
2300 uint8_t val;
2301 cpu_physical_memory_read(addr, &val, 1);
2302 return val;
2303}
2304
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002305/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002306static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002307 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002308{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002309 uint8_t *ptr;
2310 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002311 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002312
Avi Kivityac1970f2012-10-03 16:22:53 +02002313 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002314
Blue Swirlcc5bea62012-04-14 14:56:48 +00002315 if (!(memory_region_is_ram(section->mr) ||
2316 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002317 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002318 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002319 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002320#if defined(TARGET_WORDS_BIGENDIAN)
2321 if (endian == DEVICE_LITTLE_ENDIAN) {
2322 val = bswap16(val);
2323 }
2324#else
2325 if (endian == DEVICE_BIG_ENDIAN) {
2326 val = bswap16(val);
2327 }
2328#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002329 } else {
2330 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002331 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002332 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002333 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002334 switch (endian) {
2335 case DEVICE_LITTLE_ENDIAN:
2336 val = lduw_le_p(ptr);
2337 break;
2338 case DEVICE_BIG_ENDIAN:
2339 val = lduw_be_p(ptr);
2340 break;
2341 default:
2342 val = lduw_p(ptr);
2343 break;
2344 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002345 }
2346 return val;
bellardaab33092005-10-30 20:48:42 +00002347}
2348
Avi Kivitya8170e52012-10-23 12:30:10 +02002349uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002350{
2351 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2352}
2353
Avi Kivitya8170e52012-10-23 12:30:10 +02002354uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002355{
2356 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2357}
2358
Avi Kivitya8170e52012-10-23 12:30:10 +02002359uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002360{
2361 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2362}
2363
bellard8df1cd02005-01-28 22:37:22 +00002364/* warning: addr must be aligned. The ram page is not masked as dirty
2365 and the code inside is not invalidated. It is useful if the dirty
2366 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002367void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002368{
bellard8df1cd02005-01-28 22:37:22 +00002369 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002370 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002371
Avi Kivityac1970f2012-10-03 16:22:53 +02002372 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002373
Avi Kivityf3705d52012-03-08 16:16:34 +02002374 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002375 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002376 if (memory_region_is_ram(section->mr)) {
2377 section = &phys_sections[phys_section_rom];
2378 }
2379 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002380 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002381 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002382 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002383 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002384 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002385 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002386
2387 if (unlikely(in_migration)) {
2388 if (!cpu_physical_memory_is_dirty(addr1)) {
2389 /* invalidate code */
2390 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2391 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002392 cpu_physical_memory_set_dirty_flags(
2393 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002394 }
2395 }
bellard8df1cd02005-01-28 22:37:22 +00002396 }
2397}
2398
Avi Kivitya8170e52012-10-23 12:30:10 +02002399void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002400{
j_mayerbc98a7e2007-04-04 07:55:12 +00002401 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002402 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002403
Avi Kivityac1970f2012-10-03 16:22:53 +02002404 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002405
Avi Kivityf3705d52012-03-08 16:16:34 +02002406 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002407 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002408 if (memory_region_is_ram(section->mr)) {
2409 section = &phys_sections[phys_section_rom];
2410 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002411#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002412 io_mem_write(section->mr, addr, val >> 32, 4);
2413 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002414#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002415 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2416 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002417#endif
2418 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002419 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002420 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002421 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002422 stq_p(ptr, val);
2423 }
2424}
2425
bellard8df1cd02005-01-28 22:37:22 +00002426/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002427static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002428 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002429{
bellard8df1cd02005-01-28 22:37:22 +00002430 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002431 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002432
Avi Kivityac1970f2012-10-03 16:22:53 +02002433 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002434
Avi Kivityf3705d52012-03-08 16:16:34 +02002435 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002436 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002437 if (memory_region_is_ram(section->mr)) {
2438 section = &phys_sections[phys_section_rom];
2439 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440#if defined(TARGET_WORDS_BIGENDIAN)
2441 if (endian == DEVICE_LITTLE_ENDIAN) {
2442 val = bswap32(val);
2443 }
2444#else
2445 if (endian == DEVICE_BIG_ENDIAN) {
2446 val = bswap32(val);
2447 }
2448#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002449 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002450 } else {
2451 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002452 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002453 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002454 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002455 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002456 switch (endian) {
2457 case DEVICE_LITTLE_ENDIAN:
2458 stl_le_p(ptr, val);
2459 break;
2460 case DEVICE_BIG_ENDIAN:
2461 stl_be_p(ptr, val);
2462 break;
2463 default:
2464 stl_p(ptr, val);
2465 break;
2466 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002467 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002468 }
2469}
2470
Avi Kivitya8170e52012-10-23 12:30:10 +02002471void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002472{
2473 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2474}
2475
Avi Kivitya8170e52012-10-23 12:30:10 +02002476void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002477{
2478 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2479}
2480
Avi Kivitya8170e52012-10-23 12:30:10 +02002481void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482{
2483 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2484}
2485
bellardaab33092005-10-30 20:48:42 +00002486/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002487void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002488{
2489 uint8_t v = val;
2490 cpu_physical_memory_write(addr, &v, 1);
2491}
2492
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002493/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002494static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002495 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002496{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002497 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002498 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002499
Avi Kivityac1970f2012-10-03 16:22:53 +02002500 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002501
Avi Kivityf3705d52012-03-08 16:16:34 +02002502 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002503 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002504 if (memory_region_is_ram(section->mr)) {
2505 section = &phys_sections[phys_section_rom];
2506 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002507#if defined(TARGET_WORDS_BIGENDIAN)
2508 if (endian == DEVICE_LITTLE_ENDIAN) {
2509 val = bswap16(val);
2510 }
2511#else
2512 if (endian == DEVICE_BIG_ENDIAN) {
2513 val = bswap16(val);
2514 }
2515#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002516 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002517 } else {
2518 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002519 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002520 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002521 /* RAM case */
2522 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002523 switch (endian) {
2524 case DEVICE_LITTLE_ENDIAN:
2525 stw_le_p(ptr, val);
2526 break;
2527 case DEVICE_BIG_ENDIAN:
2528 stw_be_p(ptr, val);
2529 break;
2530 default:
2531 stw_p(ptr, val);
2532 break;
2533 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002534 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002535 }
bellardaab33092005-10-30 20:48:42 +00002536}
2537
Avi Kivitya8170e52012-10-23 12:30:10 +02002538void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002539{
2540 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2541}
2542
Avi Kivitya8170e52012-10-23 12:30:10 +02002543void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002544{
2545 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2546}
2547
Avi Kivitya8170e52012-10-23 12:30:10 +02002548void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002549{
2550 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2551}
2552
bellardaab33092005-10-30 20:48:42 +00002553/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002554void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002555{
2556 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002557 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002558}
2559
Avi Kivitya8170e52012-10-23 12:30:10 +02002560void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002561{
2562 val = cpu_to_le64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2564}
2565
Avi Kivitya8170e52012-10-23 12:30:10 +02002566void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002567{
2568 val = cpu_to_be64(val);
2569 cpu_physical_memory_write(addr, &val, 8);
2570}
2571
aliguori5e2972f2009-03-28 17:51:36 +00002572/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002573int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002574 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002575{
2576 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002577 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002578 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002579
2580 while (len > 0) {
2581 page = addr & TARGET_PAGE_MASK;
2582 phys_addr = cpu_get_phys_page_debug(env, page);
2583 /* if no physical page mapped, return an error */
2584 if (phys_addr == -1)
2585 return -1;
2586 l = (page + TARGET_PAGE_SIZE) - addr;
2587 if (l > len)
2588 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002589 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002590 if (is_write)
2591 cpu_physical_memory_write_rom(phys_addr, buf, l);
2592 else
aliguori5e2972f2009-03-28 17:51:36 +00002593 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002594 len -= l;
2595 buf += l;
2596 addr += l;
2597 }
2598 return 0;
2599}
Paul Brooka68fe892010-03-01 00:08:59 +00002600#endif
bellard13eb76e2004-01-24 15:23:36 +00002601
Blue Swirl8e4a4242013-01-06 18:30:17 +00002602#if !defined(CONFIG_USER_ONLY)
2603
2604/*
2605 * A helper function for the _utterly broken_ virtio device model to find out if
2606 * it's running on a big endian machine. Don't do this at home kids!
2607 */
2608bool virtio_is_big_endian(void);
2609bool virtio_is_big_endian(void)
2610{
2611#if defined(TARGET_WORDS_BIGENDIAN)
2612 return true;
2613#else
2614 return false;
2615#endif
2616}
2617
2618#endif
2619
Wen Congyang76f35532012-05-07 12:04:18 +08002620#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002621bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002622{
2623 MemoryRegionSection *section;
2624
Avi Kivityac1970f2012-10-03 16:22:53 +02002625 section = phys_page_find(address_space_memory.dispatch,
2626 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002627
2628 return !(memory_region_is_ram(section->mr) ||
2629 memory_region_is_romd(section->mr));
2630}
2631#endif