blob: b7f7f9818faca2ff8f047070d42b26561a9cc4a1 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020093DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andreas Färber1a1562f2013-06-17 04:09:11 +0200481const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200487 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200490 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200495 }
496};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200497
pbrook9656f322008-07-01 20:01:19 +0000498#endif
499
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100500CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400501{
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400503
Andreas Färberbdc44642013-06-24 23:50:24 +0200504 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100505 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200506 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100507 }
Glauber Costa950f1472009-06-09 12:15:18 -0400508 }
509
Andreas Färberbdc44642013-06-24 23:50:24 +0200510 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400511}
512
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
Andreas Färber9349b4f2012-03-14 01:38:32 +0100529void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000530{
Andreas Färber9f09e182012-05-03 06:59:07 +0200531 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100532 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200533 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000534 int cpu_index;
535
pbrookc2764712009-03-07 15:24:59 +0000536#if defined(CONFIG_USER_ONLY)
537 cpu_list_lock();
538#endif
bellard6a00d602005-11-21 23:25:50 +0000539 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200540 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000541 cpu_index++;
542 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100543 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100544 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200545 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200546 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100547#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000548 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200549 cpu->thread_id = qemu_get_thread_id();
Paolo Bonzinicba70542015-03-09 15:28:37 +0100550 cpu_reload_memory_map(cpu);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100551#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000553#if defined(CONFIG_USER_ONLY)
554 cpu_list_unlock();
555#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200556 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
557 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
558 }
pbrookb3c77242008-06-30 16:31:04 +0000559#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600560 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000561 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100562 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200563 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000564#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100565 if (cc->vmsd != NULL) {
566 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
567 }
bellardfd6ce8f2003-05-14 19:00:11 +0000568}
569
Paul Brook94df27f2010-02-28 23:47:45 +0000570#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200571static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000572{
573 tb_invalidate_phys_page_range(pc, pc + 1, 0);
574}
575#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200576static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400577{
Max Filippove8262a12013-09-27 22:29:17 +0400578 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
579 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100581 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400582 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400583}
bellardc27004e2005-01-03 23:35:10 +0000584#endif
bellardd720b932004-04-25 17:57:43 +0000585
Paul Brookc527ee82010-03-01 03:31:14 +0000586#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200587void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000588
589{
590}
591
Peter Maydell3ee887e2014-09-12 14:06:48 +0100592int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
593 int flags)
594{
595 return -ENOSYS;
596}
597
598void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
599{
600}
601
Andreas Färber75a34032013-09-02 16:57:02 +0200602int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000603 int flags, CPUWatchpoint **watchpoint)
604{
605 return -ENOSYS;
606}
607#else
pbrook6658ffb2007-03-16 23:58:11 +0000608/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200609int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000610 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000611{
aliguoric0ce9982008-11-25 22:13:57 +0000612 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000613
Peter Maydell05068c02014-09-12 14:06:48 +0100614 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700615 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200616 error_report("tried to set invalid watchpoint at %"
617 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000618 return -EINVAL;
619 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000621
aliguoria1d1bb32008-11-18 20:07:32 +0000622 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100623 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000624 wp->flags = flags;
625
aliguori2dc9f412008-11-18 20:56:59 +0000626 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200627 if (flags & BP_GDB) {
628 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
629 } else {
630 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
631 }
aliguoria1d1bb32008-11-18 20:07:32 +0000632
Andreas Färber31b030d2013-09-04 01:29:02 +0200633 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000634
635 if (watchpoint)
636 *watchpoint = wp;
637 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000638}
639
aliguoria1d1bb32008-11-18 20:07:32 +0000640/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200641int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000642 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000643{
aliguoria1d1bb32008-11-18 20:07:32 +0000644 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000645
Andreas Färberff4700b2013-08-26 18:23:18 +0200646 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100647 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000648 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200649 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000650 return 0;
651 }
652 }
aliguoria1d1bb32008-11-18 20:07:32 +0000653 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000654}
655
aliguoria1d1bb32008-11-18 20:07:32 +0000656/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200657void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000658{
Andreas Färberff4700b2013-08-26 18:23:18 +0200659 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000660
Andreas Färber31b030d2013-09-04 01:29:02 +0200661 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000662
Anthony Liguori7267c092011-08-20 22:09:37 -0500663 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000664}
665
aliguoria1d1bb32008-11-18 20:07:32 +0000666/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200667void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000668{
aliguoric0ce9982008-11-25 22:13:57 +0000669 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000670
Andreas Färberff4700b2013-08-26 18:23:18 +0200671 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200672 if (wp->flags & mask) {
673 cpu_watchpoint_remove_by_ref(cpu, wp);
674 }
aliguoric0ce9982008-11-25 22:13:57 +0000675 }
aliguoria1d1bb32008-11-18 20:07:32 +0000676}
Peter Maydell05068c02014-09-12 14:06:48 +0100677
678/* Return true if this watchpoint address matches the specified
679 * access (ie the address range covered by the watchpoint overlaps
680 * partially or completely with the address range covered by the
681 * access).
682 */
683static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
684 vaddr addr,
685 vaddr len)
686{
687 /* We know the lengths are non-zero, but a little caution is
688 * required to avoid errors in the case where the range ends
689 * exactly at the top of the address space and so addr + len
690 * wraps round to zero.
691 */
692 vaddr wpend = wp->vaddr + wp->len - 1;
693 vaddr addrend = addr + len - 1;
694
695 return !(addr > wpend || wp->vaddr > addrend);
696}
697
Paul Brookc527ee82010-03-01 03:31:14 +0000698#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000699
700/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200701int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000702 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000703{
aliguoric0ce9982008-11-25 22:13:57 +0000704 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000705
Anthony Liguori7267c092011-08-20 22:09:37 -0500706 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000707
708 bp->pc = pc;
709 bp->flags = flags;
710
aliguori2dc9f412008-11-18 20:56:59 +0000711 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200712 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200713 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200714 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200715 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200716 }
aliguoria1d1bb32008-11-18 20:07:32 +0000717
Andreas Färberf0c3c502013-08-26 21:22:53 +0200718 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000719
Andreas Färber00b941e2013-06-29 18:55:54 +0200720 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000721 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200722 }
aliguoria1d1bb32008-11-18 20:07:32 +0000723 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000724}
725
726/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200727int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000728{
aliguoria1d1bb32008-11-18 20:07:32 +0000729 CPUBreakpoint *bp;
730
Andreas Färberf0c3c502013-08-26 21:22:53 +0200731 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000732 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200733 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000734 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000735 }
bellard4c3a88a2003-07-26 12:06:08 +0000736 }
aliguoria1d1bb32008-11-18 20:07:32 +0000737 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000738}
739
aliguoria1d1bb32008-11-18 20:07:32 +0000740/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200741void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000742{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200743 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
744
745 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000746
Anthony Liguori7267c092011-08-20 22:09:37 -0500747 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000748}
749
750/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200751void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000752{
aliguoric0ce9982008-11-25 22:13:57 +0000753 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000754
Andreas Färberf0c3c502013-08-26 21:22:53 +0200755 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200756 if (bp->flags & mask) {
757 cpu_breakpoint_remove_by_ref(cpu, bp);
758 }
aliguoric0ce9982008-11-25 22:13:57 +0000759 }
bellard4c3a88a2003-07-26 12:06:08 +0000760}
761
bellardc33a3462003-07-29 20:50:33 +0000762/* enable or disable single step mode. EXCP_DEBUG is returned by the
763 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200764void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000765{
Andreas Färbered2803d2013-06-21 20:20:45 +0200766 if (cpu->singlestep_enabled != enabled) {
767 cpu->singlestep_enabled = enabled;
768 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200769 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200770 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100771 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000772 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200773 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000774 tb_flush(env);
775 }
bellardc33a3462003-07-29 20:50:33 +0000776 }
bellardc33a3462003-07-29 20:50:33 +0000777}
778
Andreas Färbera47dddd2013-09-03 17:38:47 +0200779void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000780{
781 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000782 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000783
784 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000785 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000786 fprintf(stderr, "qemu: fatal: ");
787 vfprintf(stderr, fmt, ap);
788 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200789 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000790 if (qemu_log_enabled()) {
791 qemu_log("qemu: fatal: ");
792 qemu_log_vprintf(fmt, ap2);
793 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200794 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000795 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000796 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000797 }
pbrook493ae1f2007-11-23 16:53:59 +0000798 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000799 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200800#if defined(CONFIG_USER_ONLY)
801 {
802 struct sigaction act;
803 sigfillset(&act.sa_mask);
804 act.sa_handler = SIG_DFL;
805 sigaction(SIGABRT, &act, NULL);
806 }
807#endif
bellard75012672003-06-21 13:11:07 +0000808 abort();
809}
810
bellard01243112004-01-04 15:48:17 +0000811#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400812/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200813static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
814{
815 RAMBlock *block;
816
Paolo Bonzini43771532013-09-09 17:58:40 +0200817 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200818 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200819 goto found;
820 }
Mike Day0dc3f442013-09-05 14:41:35 -0400821 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200822 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200823 goto found;
824 }
825 }
826
827 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
828 abort();
829
830found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200831 /* It is safe to write mru_block outside the iothread lock. This
832 * is what happens:
833 *
834 * mru_block = xxx
835 * rcu_read_unlock()
836 * xxx removed from list
837 * rcu_read_lock()
838 * read mru_block
839 * mru_block = NULL;
840 * call_rcu(reclaim_ramblock, xxx);
841 * rcu_read_unlock()
842 *
843 * atomic_rcu_set is not needed here. The block was already published
844 * when it was placed into the list. Here we're just making an extra
845 * copy of the pointer.
846 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200847 ram_list.mru_block = block;
848 return block;
849}
850
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200851static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000852{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200853 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200854 RAMBlock *block;
855 ram_addr_t end;
856
857 end = TARGET_PAGE_ALIGN(start + length);
858 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000859
Mike Day0dc3f442013-09-05 14:41:35 -0400860 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200861 block = qemu_get_ram_block(start);
862 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200863 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000864 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400865 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200866}
867
868/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000869bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
870 ram_addr_t length,
871 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200872{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000873 unsigned long end, page;
874 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200875
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000876 if (length == 0) {
877 return false;
878 }
879
880 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
881 page = start >> TARGET_PAGE_BITS;
882 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
883 page, end - page);
884
885 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200886 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200887 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000888
889 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000890}
891
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100892/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200893hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200894 MemoryRegionSection *section,
895 target_ulong vaddr,
896 hwaddr paddr, hwaddr xlat,
897 int prot,
898 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000899{
Avi Kivitya8170e52012-10-23 12:30:10 +0200900 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000901 CPUWatchpoint *wp;
902
Blue Swirlcc5bea62012-04-14 14:56:48 +0000903 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000904 /* Normal RAM. */
905 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200906 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000907 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200908 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000909 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200910 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000911 }
912 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100913 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200914 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000915 }
916
917 /* Make accesses to pages with watchpoints go via the
918 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200919 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100920 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000921 /* Avoid trapping reads of pages with a write breakpoint. */
922 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200923 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000924 *address |= TLB_MMIO;
925 break;
926 }
927 }
928 }
929
930 return iotlb;
931}
bellard9fa3e852004-01-04 18:06:42 +0000932#endif /* defined(CONFIG_USER_ONLY) */
933
pbrooke2eef172008-06-08 01:09:01 +0000934#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000935
Anthony Liguoric227f092009-10-01 16:12:16 -0500936static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200937 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200938static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200939
Igor Mammedova2b257d2014-10-31 16:38:37 +0000940static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
941 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200942
943/*
944 * Set a custom physical guest memory alloator.
945 * Accelerators with unusual needs may need this. Hopefully, we can
946 * get rid of it eventually.
947 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000948void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200949{
950 phys_mem_alloc = alloc;
951}
952
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200953static uint16_t phys_section_add(PhysPageMap *map,
954 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200955{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200956 /* The physical section number is ORed with a page-aligned
957 * pointer to produce the iotlb entries. Thus it should
958 * never overflow into the page-aligned value.
959 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200960 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200961
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200962 if (map->sections_nb == map->sections_nb_alloc) {
963 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
964 map->sections = g_renew(MemoryRegionSection, map->sections,
965 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200966 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200967 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200968 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200969 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200970}
971
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200972static void phys_section_destroy(MemoryRegion *mr)
973{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200974 memory_region_unref(mr);
975
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200976 if (mr->subpage) {
977 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700978 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200979 g_free(subpage);
980 }
981}
982
Paolo Bonzini60926662013-05-29 12:30:26 +0200983static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200984{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200985 while (map->sections_nb > 0) {
986 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200987 phys_section_destroy(section->mr);
988 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200989 g_free(map->sections);
990 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200991}
992
Avi Kivityac1970f2012-10-03 16:22:53 +0200993static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200994{
995 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200996 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200997 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200998 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200999 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001000 MemoryRegionSection subsection = {
1001 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001002 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001003 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001004 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001005
Avi Kivityf3705d52012-03-08 16:16:34 +02001006 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001007
Avi Kivityf3705d52012-03-08 16:16:34 +02001008 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001009 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001010 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001011 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001012 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001013 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001014 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001015 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001016 }
1017 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001018 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001019 subpage_register(subpage, start, end,
1020 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001021}
1022
1023
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001024static void register_multipage(AddressSpaceDispatch *d,
1025 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001026{
Avi Kivitya8170e52012-10-23 12:30:10 +02001027 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001028 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001029 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1030 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001031
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001032 assert(num_pages);
1033 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001034}
1035
Avi Kivityac1970f2012-10-03 16:22:53 +02001036static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001037{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001038 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001039 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001040 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001041 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001042
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001043 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1044 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1045 - now.offset_within_address_space;
1046
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001047 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001048 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001049 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001050 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001051 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001052 while (int128_ne(remain.size, now.size)) {
1053 remain.size = int128_sub(remain.size, now.size);
1054 remain.offset_within_address_space += int128_get64(now.size);
1055 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001056 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001057 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001058 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001059 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001060 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001061 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001062 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001063 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001064 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001065 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001066 }
1067}
1068
Sheng Yang62a27442010-01-26 19:21:16 +08001069void qemu_flush_coalesced_mmio_buffer(void)
1070{
1071 if (kvm_enabled())
1072 kvm_flush_coalesced_mmio_buffer();
1073}
1074
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001075void qemu_mutex_lock_ramlist(void)
1076{
1077 qemu_mutex_lock(&ram_list.mutex);
1078}
1079
1080void qemu_mutex_unlock_ramlist(void)
1081{
1082 qemu_mutex_unlock(&ram_list.mutex);
1083}
1084
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001085#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001086
1087#include <sys/vfs.h>
1088
1089#define HUGETLBFS_MAGIC 0x958458f6
1090
Hu Taofc7a5802014-09-09 13:28:01 +08001091static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001092{
1093 struct statfs fs;
1094 int ret;
1095
1096 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001097 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001098 } while (ret != 0 && errno == EINTR);
1099
1100 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001101 error_setg_errno(errp, errno, "failed to get page size of file %s",
1102 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001103 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001104 }
1105
1106 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001107 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001108
1109 return fs.f_bsize;
1110}
1111
Alex Williamson04b16652010-07-02 11:13:17 -06001112static void *file_ram_alloc(RAMBlock *block,
1113 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001114 const char *path,
1115 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001116{
1117 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001118 char *sanitized_name;
1119 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001120 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001121 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001122 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001123 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001124
Hu Taofc7a5802014-09-09 13:28:01 +08001125 hpagesize = gethugepagesize(path, &local_err);
1126 if (local_err) {
1127 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001128 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001129 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001130 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001131
1132 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001133 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1134 "or larger than huge page size 0x%" PRIx64,
1135 memory, hpagesize);
1136 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001137 }
1138
1139 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001140 error_setg(errp,
1141 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001142 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001143 }
1144
Peter Feiner8ca761f2013-03-04 13:54:25 -05001145 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001146 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001147 for (c = sanitized_name; *c != '\0'; c++) {
1148 if (*c == '/')
1149 *c = '_';
1150 }
1151
1152 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1153 sanitized_name);
1154 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001155
1156 fd = mkstemp(filename);
1157 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001158 error_setg_errno(errp, errno,
1159 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001160 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001161 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001162 }
1163 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001164 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001165
1166 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1167
1168 /*
1169 * ftruncate is not supported by hugetlbfs in older
1170 * hosts, so don't bother bailing out on errors.
1171 * If anything goes wrong with it under other filesystems,
1172 * mmap will fail.
1173 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001174 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001175 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001176 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001177
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001178 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1179 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1180 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001181 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001182 error_setg_errno(errp, errno,
1183 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001184 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001185 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001186 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001187
1188 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001189 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001190 }
1191
Alex Williamson04b16652010-07-02 11:13:17 -06001192 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001193 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001194
1195error:
1196 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001197 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001198 exit(1);
1199 }
1200 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001201}
1202#endif
1203
Mike Day0dc3f442013-09-05 14:41:35 -04001204/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001205static ram_addr_t find_ram_offset(ram_addr_t size)
1206{
Alex Williamson04b16652010-07-02 11:13:17 -06001207 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001208 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001209
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001210 assert(size != 0); /* it would hand out same offset multiple times */
1211
Mike Day0dc3f442013-09-05 14:41:35 -04001212 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001213 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001214 }
Alex Williamson04b16652010-07-02 11:13:17 -06001215
Mike Day0dc3f442013-09-05 14:41:35 -04001216 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001217 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001218
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001219 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001220
Mike Day0dc3f442013-09-05 14:41:35 -04001221 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001222 if (next_block->offset >= end) {
1223 next = MIN(next, next_block->offset);
1224 }
1225 }
1226 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001227 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001228 mingap = next - end;
1229 }
1230 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001231
1232 if (offset == RAM_ADDR_MAX) {
1233 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1234 (uint64_t)size);
1235 abort();
1236 }
1237
Alex Williamson04b16652010-07-02 11:13:17 -06001238 return offset;
1239}
1240
Juan Quintela652d7ec2012-07-20 10:37:54 +02001241ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001242{
Alex Williamsond17b5282010-06-25 11:08:38 -06001243 RAMBlock *block;
1244 ram_addr_t last = 0;
1245
Mike Day0dc3f442013-09-05 14:41:35 -04001246 rcu_read_lock();
1247 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001248 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001249 }
Mike Day0dc3f442013-09-05 14:41:35 -04001250 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001251 return last;
1252}
1253
Jason Baronddb97f12012-08-02 15:44:16 -04001254static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1255{
1256 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001257
1258 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001259 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001260 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1261 if (ret) {
1262 perror("qemu_madvise");
1263 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1264 "but dump_guest_core=off specified\n");
1265 }
1266 }
1267}
1268
Mike Day0dc3f442013-09-05 14:41:35 -04001269/* Called within an RCU critical section, or while the ramlist lock
1270 * is held.
1271 */
Hu Tao20cfe882014-04-02 15:13:26 +08001272static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001273{
Hu Tao20cfe882014-04-02 15:13:26 +08001274 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001275
Mike Day0dc3f442013-09-05 14:41:35 -04001276 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001277 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001278 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001279 }
1280 }
Hu Tao20cfe882014-04-02 15:13:26 +08001281
1282 return NULL;
1283}
1284
Mike Dayae3a7042013-09-05 14:41:35 -04001285/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001286void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1287{
Mike Dayae3a7042013-09-05 14:41:35 -04001288 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001289
Mike Day0dc3f442013-09-05 14:41:35 -04001290 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001291 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001292 assert(new_block);
1293 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001294
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001295 if (dev) {
1296 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001297 if (id) {
1298 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001299 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001300 }
1301 }
1302 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1303
Mike Day0dc3f442013-09-05 14:41:35 -04001304 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001305 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001306 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1307 new_block->idstr);
1308 abort();
1309 }
1310 }
Mike Day0dc3f442013-09-05 14:41:35 -04001311 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001312}
1313
Mike Dayae3a7042013-09-05 14:41:35 -04001314/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001315void qemu_ram_unset_idstr(ram_addr_t addr)
1316{
Mike Dayae3a7042013-09-05 14:41:35 -04001317 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001318
Mike Dayae3a7042013-09-05 14:41:35 -04001319 /* FIXME: arch_init.c assumes that this is not called throughout
1320 * migration. Ignore the problem since hot-unplug during migration
1321 * does not work anyway.
1322 */
1323
Mike Day0dc3f442013-09-05 14:41:35 -04001324 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001325 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001326 if (block) {
1327 memset(block->idstr, 0, sizeof(block->idstr));
1328 }
Mike Day0dc3f442013-09-05 14:41:35 -04001329 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001330}
1331
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001332static int memory_try_enable_merging(void *addr, size_t len)
1333{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001334 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001335 /* disabled by the user */
1336 return 0;
1337 }
1338
1339 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1340}
1341
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001342/* Only legal before guest might have detected the memory size: e.g. on
1343 * incoming migration, or right after reset.
1344 *
1345 * As memory core doesn't know how is memory accessed, it is up to
1346 * resize callback to update device state and/or add assertions to detect
1347 * misuse, if necessary.
1348 */
1349int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1350{
1351 RAMBlock *block = find_ram_block(base);
1352
1353 assert(block);
1354
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001355 newsize = TARGET_PAGE_ALIGN(newsize);
1356
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001357 if (block->used_length == newsize) {
1358 return 0;
1359 }
1360
1361 if (!(block->flags & RAM_RESIZEABLE)) {
1362 error_setg_errno(errp, EINVAL,
1363 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1364 " in != 0x" RAM_ADDR_FMT, block->idstr,
1365 newsize, block->used_length);
1366 return -EINVAL;
1367 }
1368
1369 if (block->max_length < newsize) {
1370 error_setg_errno(errp, EINVAL,
1371 "Length too large: %s: 0x" RAM_ADDR_FMT
1372 " > 0x" RAM_ADDR_FMT, block->idstr,
1373 newsize, block->max_length);
1374 return -EINVAL;
1375 }
1376
1377 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1378 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001379 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1380 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001381 memory_region_set_size(block->mr, newsize);
1382 if (block->resized) {
1383 block->resized(block->idstr, newsize, block->host);
1384 }
1385 return 0;
1386}
1387
Hu Taoef701d72014-09-09 13:27:54 +08001388static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001389{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001390 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001391 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001392 ram_addr_t old_ram_size, new_ram_size;
1393
1394 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001395
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001396 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001397 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001398
1399 if (!new_block->host) {
1400 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001401 xen_ram_alloc(new_block->offset, new_block->max_length,
1402 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001403 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001404 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001405 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001406 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001407 error_setg_errno(errp, errno,
1408 "cannot set up guest memory '%s'",
1409 memory_region_name(new_block->mr));
1410 qemu_mutex_unlock_ramlist();
1411 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001412 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001413 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001414 }
1415 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001416
Li Zhijiandd631692015-07-02 20:18:06 +08001417 new_ram_size = MAX(old_ram_size,
1418 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1419 if (new_ram_size > old_ram_size) {
1420 migration_bitmap_extend(old_ram_size, new_ram_size);
1421 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001422 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1423 * QLIST (which has an RCU-friendly variant) does not have insertion at
1424 * tail, so save the last element in last_block.
1425 */
Mike Day0dc3f442013-09-05 14:41:35 -04001426 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001427 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001428 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001429 break;
1430 }
1431 }
1432 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001433 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001434 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001435 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001436 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001437 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001438 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001439 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001440
Mike Day0dc3f442013-09-05 14:41:35 -04001441 /* Write list before version */
1442 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001443 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001444 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001445
Juan Quintela2152f5c2013-10-08 13:52:02 +02001446 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1447
1448 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001449 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001450
1451 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001452 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1453 ram_list.dirty_memory[i] =
1454 bitmap_zero_extend(ram_list.dirty_memory[i],
1455 old_ram_size, new_ram_size);
1456 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001457 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001458 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001459 new_block->used_length,
1460 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001461
Paolo Bonzinia904c912015-01-21 16:18:35 +01001462 if (new_block->host) {
1463 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1464 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1465 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1466 if (kvm_enabled()) {
1467 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1468 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001469 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001470
1471 return new_block->offset;
1472}
1473
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001474#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001475ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001476 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001477 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001478{
1479 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001480 ram_addr_t addr;
1481 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001482
1483 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001484 error_setg(errp, "-mem-path not supported with Xen");
1485 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001486 }
1487
1488 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1489 /*
1490 * file_ram_alloc() needs to allocate just like
1491 * phys_mem_alloc, but we haven't bothered to provide
1492 * a hook there.
1493 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001494 error_setg(errp,
1495 "-mem-path not supported with this accelerator");
1496 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001497 }
1498
1499 size = TARGET_PAGE_ALIGN(size);
1500 new_block = g_malloc0(sizeof(*new_block));
1501 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001502 new_block->used_length = size;
1503 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001504 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001505 new_block->host = file_ram_alloc(new_block, size,
1506 mem_path, errp);
1507 if (!new_block->host) {
1508 g_free(new_block);
1509 return -1;
1510 }
1511
Hu Taoef701d72014-09-09 13:27:54 +08001512 addr = ram_block_add(new_block, &local_err);
1513 if (local_err) {
1514 g_free(new_block);
1515 error_propagate(errp, local_err);
1516 return -1;
1517 }
1518 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001519}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001520#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001521
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001522static
1523ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1524 void (*resized)(const char*,
1525 uint64_t length,
1526 void *host),
1527 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001528 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001529{
1530 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001531 ram_addr_t addr;
1532 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001533
1534 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001535 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001536 new_block = g_malloc0(sizeof(*new_block));
1537 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001538 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001539 new_block->used_length = size;
1540 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001541 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001542 new_block->fd = -1;
1543 new_block->host = host;
1544 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001545 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001546 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001547 if (resizeable) {
1548 new_block->flags |= RAM_RESIZEABLE;
1549 }
Hu Taoef701d72014-09-09 13:27:54 +08001550 addr = ram_block_add(new_block, &local_err);
1551 if (local_err) {
1552 g_free(new_block);
1553 error_propagate(errp, local_err);
1554 return -1;
1555 }
1556 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001557}
1558
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001559ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1560 MemoryRegion *mr, Error **errp)
1561{
1562 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1563}
1564
Hu Taoef701d72014-09-09 13:27:54 +08001565ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001566{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001567 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1568}
1569
1570ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1571 void (*resized)(const char*,
1572 uint64_t length,
1573 void *host),
1574 MemoryRegion *mr, Error **errp)
1575{
1576 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001577}
bellarde9a1ab12007-02-08 23:08:38 +00001578
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001579void qemu_ram_free_from_ptr(ram_addr_t addr)
1580{
1581 RAMBlock *block;
1582
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001583 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001584 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001585 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001586 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001587 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001588 /* Write list before version */
1589 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001590 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001591 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001592 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001593 }
1594 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001595 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001596}
1597
Paolo Bonzini43771532013-09-09 17:58:40 +02001598static void reclaim_ramblock(RAMBlock *block)
1599{
1600 if (block->flags & RAM_PREALLOC) {
1601 ;
1602 } else if (xen_enabled()) {
1603 xen_invalidate_map_cache_entry(block->host);
1604#ifndef _WIN32
1605 } else if (block->fd >= 0) {
1606 munmap(block->host, block->max_length);
1607 close(block->fd);
1608#endif
1609 } else {
1610 qemu_anon_ram_free(block->host, block->max_length);
1611 }
1612 g_free(block);
1613}
1614
Anthony Liguoric227f092009-10-01 16:12:16 -05001615void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001616{
Alex Williamson04b16652010-07-02 11:13:17 -06001617 RAMBlock *block;
1618
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001619 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001620 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001621 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001622 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001623 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001624 /* Write list before version */
1625 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001626 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001627 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001628 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001629 }
1630 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001631 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001632}
1633
Huang Yingcd19cfa2011-03-02 08:56:19 +01001634#ifndef _WIN32
1635void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1636{
1637 RAMBlock *block;
1638 ram_addr_t offset;
1639 int flags;
1640 void *area, *vaddr;
1641
Mike Day0dc3f442013-09-05 14:41:35 -04001642 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001643 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001644 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001645 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001646 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001647 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001648 } else if (xen_enabled()) {
1649 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001650 } else {
1651 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001652 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001653 flags |= (block->flags & RAM_SHARED ?
1654 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001655 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1656 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001657 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001658 /*
1659 * Remap needs to match alloc. Accelerators that
1660 * set phys_mem_alloc never remap. If they did,
1661 * we'd need a remap hook here.
1662 */
1663 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1664
Huang Yingcd19cfa2011-03-02 08:56:19 +01001665 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1666 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1667 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001668 }
1669 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001670 fprintf(stderr, "Could not remap addr: "
1671 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001672 length, addr);
1673 exit(1);
1674 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001675 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001676 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001677 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001678 }
1679 }
1680}
1681#endif /* !_WIN32 */
1682
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001683int qemu_get_ram_fd(ram_addr_t addr)
1684{
Mike Dayae3a7042013-09-05 14:41:35 -04001685 RAMBlock *block;
1686 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001687
Mike Day0dc3f442013-09-05 14:41:35 -04001688 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001689 block = qemu_get_ram_block(addr);
1690 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001691 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001692 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001693}
1694
Damjan Marion3fd74b82014-06-26 23:01:32 +02001695void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1696{
Mike Dayae3a7042013-09-05 14:41:35 -04001697 RAMBlock *block;
1698 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001699
Mike Day0dc3f442013-09-05 14:41:35 -04001700 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001701 block = qemu_get_ram_block(addr);
1702 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001703 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001704 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001705}
1706
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001707/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001708 * This should not be used for general purpose DMA. Use address_space_map
1709 * or address_space_rw instead. For local memory (e.g. video ram) that the
1710 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001711 *
1712 * By the time this function returns, the returned pointer is not protected
1713 * by RCU anymore. If the caller is not within an RCU critical section and
1714 * does not hold the iothread lock, it must have other means of protecting the
1715 * pointer, such as a reference to the region that includes the incoming
1716 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001717 */
1718void *qemu_get_ram_ptr(ram_addr_t addr)
1719{
Mike Dayae3a7042013-09-05 14:41:35 -04001720 RAMBlock *block;
1721 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001722
Mike Day0dc3f442013-09-05 14:41:35 -04001723 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001724 block = qemu_get_ram_block(addr);
1725
1726 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001727 /* We need to check if the requested address is in the RAM
1728 * because we don't want to map the entire memory in QEMU.
1729 * In that case just map until the end of the page.
1730 */
1731 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001732 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001733 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001734 }
Mike Dayae3a7042013-09-05 14:41:35 -04001735
1736 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001737 }
Mike Dayae3a7042013-09-05 14:41:35 -04001738 ptr = ramblock_ptr(block, addr - block->offset);
1739
Mike Day0dc3f442013-09-05 14:41:35 -04001740unlock:
1741 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001742 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001743}
1744
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001745/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001746 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001747 *
1748 * By the time this function returns, the returned pointer is not protected
1749 * by RCU anymore. If the caller is not within an RCU critical section and
1750 * does not hold the iothread lock, it must have other means of protecting the
1751 * pointer, such as a reference to the region that includes the incoming
1752 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001753 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001754static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001755{
Mike Dayae3a7042013-09-05 14:41:35 -04001756 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001757 if (*size == 0) {
1758 return NULL;
1759 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001760 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001761 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001762 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001763 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001764 rcu_read_lock();
1765 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001766 if (addr - block->offset < block->max_length) {
1767 if (addr - block->offset + *size > block->max_length)
1768 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001769 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001770 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001771 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001772 }
1773 }
1774
1775 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1776 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001777 }
1778}
1779
Paolo Bonzini7443b432013-06-03 12:44:02 +02001780/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001781 * (typically a TLB entry) back to a ram offset.
1782 *
1783 * By the time this function returns, the returned pointer is not protected
1784 * by RCU anymore. If the caller is not within an RCU critical section and
1785 * does not hold the iothread lock, it must have other means of protecting the
1786 * pointer, such as a reference to the region that includes the incoming
1787 * ram_addr_t.
1788 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001789MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001790{
pbrook94a6b542009-04-11 17:15:54 +00001791 RAMBlock *block;
1792 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001793 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001794
Jan Kiszka868bb332011-06-21 22:59:09 +02001795 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001796 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001797 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001798 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001799 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001800 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001801 }
1802
Mike Day0dc3f442013-09-05 14:41:35 -04001803 rcu_read_lock();
1804 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001805 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001806 goto found;
1807 }
1808
Mike Day0dc3f442013-09-05 14:41:35 -04001809 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001810 /* This case append when the block is not mapped. */
1811 if (block->host == NULL) {
1812 continue;
1813 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001814 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001815 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001816 }
pbrook94a6b542009-04-11 17:15:54 +00001817 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001818
Mike Day0dc3f442013-09-05 14:41:35 -04001819 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001820 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001821
1822found:
1823 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001824 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001825 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001826 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001827}
Alex Williamsonf471a172010-06-11 11:11:42 -06001828
Avi Kivitya8170e52012-10-23 12:30:10 +02001829static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001830 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001831{
Juan Quintela52159192013-10-08 12:44:04 +02001832 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001833 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001834 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001835 switch (size) {
1836 case 1:
1837 stb_p(qemu_get_ram_ptr(ram_addr), val);
1838 break;
1839 case 2:
1840 stw_p(qemu_get_ram_ptr(ram_addr), val);
1841 break;
1842 case 4:
1843 stl_p(qemu_get_ram_ptr(ram_addr), val);
1844 break;
1845 default:
1846 abort();
1847 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001848 /* Set both VGA and migration bits for simplicity and to remove
1849 * the notdirty callback faster.
1850 */
1851 cpu_physical_memory_set_dirty_range(ram_addr, size,
1852 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001853 /* we remove the notdirty callback only if the code has been
1854 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001855 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001856 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001857 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001858 }
bellard1ccde1c2004-02-06 19:46:14 +00001859}
1860
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001861static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1862 unsigned size, bool is_write)
1863{
1864 return is_write;
1865}
1866
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001867static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001868 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001869 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001870 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001871};
1872
pbrook0f459d12008-06-09 00:20:13 +00001873/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001874static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001875{
Andreas Färber93afead2013-08-26 03:41:01 +02001876 CPUState *cpu = current_cpu;
1877 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001878 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001879 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001880 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001881 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001882
Andreas Färberff4700b2013-08-26 18:23:18 +02001883 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001884 /* We re-entered the check after replacing the TB. Now raise
1885 * the debug interrupt so that is will trigger after the
1886 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001887 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001888 return;
1889 }
Andreas Färber93afead2013-08-26 03:41:01 +02001890 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001891 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001892 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1893 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001894 if (flags == BP_MEM_READ) {
1895 wp->flags |= BP_WATCHPOINT_HIT_READ;
1896 } else {
1897 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1898 }
1899 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001900 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001901 if (!cpu->watchpoint_hit) {
1902 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001903 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001904 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001905 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001906 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001907 } else {
1908 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001909 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001910 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001911 }
aliguori06d55cc2008-11-18 20:24:06 +00001912 }
aliguori6e140f22008-11-18 20:37:55 +00001913 } else {
1914 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001915 }
1916 }
1917}
1918
pbrook6658ffb2007-03-16 23:58:11 +00001919/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1920 so these check for a hit then pass through to the normal out-of-line
1921 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001922static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1923 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001924{
Peter Maydell66b9b432015-04-26 16:49:24 +01001925 MemTxResult res;
1926 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001927
Peter Maydell66b9b432015-04-26 16:49:24 +01001928 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001929 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001930 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001931 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001932 break;
1933 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001934 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001935 break;
1936 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001937 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001938 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001939 default: abort();
1940 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001941 *pdata = data;
1942 return res;
1943}
1944
1945static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1946 uint64_t val, unsigned size,
1947 MemTxAttrs attrs)
1948{
1949 MemTxResult res;
1950
1951 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1952 switch (size) {
1953 case 1:
1954 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1955 break;
1956 case 2:
1957 address_space_stw(&address_space_memory, addr, val, attrs, &res);
1958 break;
1959 case 4:
1960 address_space_stl(&address_space_memory, addr, val, attrs, &res);
1961 break;
1962 default: abort();
1963 }
1964 return res;
pbrook6658ffb2007-03-16 23:58:11 +00001965}
1966
Avi Kivity1ec9b902012-01-02 12:47:48 +02001967static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01001968 .read_with_attrs = watch_mem_read,
1969 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001970 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001971};
pbrook6658ffb2007-03-16 23:58:11 +00001972
Peter Maydellf25a49e2015-04-26 16:49:24 +01001973static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1974 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001975{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001976 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001977 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01001978 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001979
blueswir1db7b5422007-05-26 17:36:03 +00001980#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001981 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001982 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001983#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01001984 res = address_space_read(subpage->as, addr + subpage->base,
1985 attrs, buf, len);
1986 if (res) {
1987 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01001988 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001989 switch (len) {
1990 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001991 *data = ldub_p(buf);
1992 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001993 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001994 *data = lduw_p(buf);
1995 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001996 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001997 *data = ldl_p(buf);
1998 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001999 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002000 *data = ldq_p(buf);
2001 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002002 default:
2003 abort();
2004 }
blueswir1db7b5422007-05-26 17:36:03 +00002005}
2006
Peter Maydellf25a49e2015-04-26 16:49:24 +01002007static MemTxResult subpage_write(void *opaque, hwaddr addr,
2008 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002009{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002010 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002011 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002012
blueswir1db7b5422007-05-26 17:36:03 +00002013#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002014 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002015 " value %"PRIx64"\n",
2016 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002017#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002018 switch (len) {
2019 case 1:
2020 stb_p(buf, value);
2021 break;
2022 case 2:
2023 stw_p(buf, value);
2024 break;
2025 case 4:
2026 stl_p(buf, value);
2027 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002028 case 8:
2029 stq_p(buf, value);
2030 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002031 default:
2032 abort();
2033 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002034 return address_space_write(subpage->as, addr + subpage->base,
2035 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002036}
2037
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002038static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002039 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002040{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002041 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002042#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002043 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002044 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002045#endif
2046
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002047 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002048 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002049}
2050
Avi Kivity70c68e42012-01-02 12:32:48 +02002051static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002052 .read_with_attrs = subpage_read,
2053 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002054 .impl.min_access_size = 1,
2055 .impl.max_access_size = 8,
2056 .valid.min_access_size = 1,
2057 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002058 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002059 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002060};
2061
Anthony Liguoric227f092009-10-01 16:12:16 -05002062static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002063 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002064{
2065 int idx, eidx;
2066
2067 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2068 return -1;
2069 idx = SUBPAGE_IDX(start);
2070 eidx = SUBPAGE_IDX(end);
2071#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002072 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2073 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002074#endif
blueswir1db7b5422007-05-26 17:36:03 +00002075 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002076 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002077 }
2078
2079 return 0;
2080}
2081
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002082static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002083{
Anthony Liguoric227f092009-10-01 16:12:16 -05002084 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002085
Anthony Liguori7267c092011-08-20 22:09:37 -05002086 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002087
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002088 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002089 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002090 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002091 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002092 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002093#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002094 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2095 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002096#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002097 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002098
2099 return mmio;
2100}
2101
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002102static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2103 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002104{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002105 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002106 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002107 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002108 .mr = mr,
2109 .offset_within_address_space = 0,
2110 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002111 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002112 };
2113
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002114 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002115}
2116
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002117MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002118{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002119 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2120 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002121
2122 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002123}
2124
Avi Kivitye9179ce2009-06-14 11:38:52 +03002125static void io_mem_init(void)
2126{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002127 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002128 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002129 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002130 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002131 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002132 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002133 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002134}
2135
Avi Kivityac1970f2012-10-03 16:22:53 +02002136static void mem_begin(MemoryListener *listener)
2137{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002138 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002139 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2140 uint16_t n;
2141
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002142 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002143 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002144 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002145 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002146 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002147 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002148 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002149 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002150
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002151 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002152 d->as = as;
2153 as->next_dispatch = d;
2154}
2155
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002156static void address_space_dispatch_free(AddressSpaceDispatch *d)
2157{
2158 phys_sections_free(&d->map);
2159 g_free(d);
2160}
2161
Paolo Bonzini00752702013-05-29 12:13:54 +02002162static void mem_commit(MemoryListener *listener)
2163{
2164 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002165 AddressSpaceDispatch *cur = as->dispatch;
2166 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002167
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002168 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002169
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002170 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002171 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002172 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002173 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002174}
2175
Avi Kivity1d711482012-10-02 18:54:45 +02002176static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002177{
Andreas Färber182735e2013-05-29 22:29:20 +02002178 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002179
2180 /* since each CPU stores ram addresses in its TLB cache, we must
2181 reset the modified entries */
2182 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002183 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002184 /* FIXME: Disentangle the cpu.h circular files deps so we can
2185 directly get the right CPU from listener. */
2186 if (cpu->tcg_as_listener != listener) {
2187 continue;
2188 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002189 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002190 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002191}
2192
Avi Kivityac1970f2012-10-03 16:22:53 +02002193void address_space_init_dispatch(AddressSpace *as)
2194{
Paolo Bonzini00752702013-05-29 12:13:54 +02002195 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002196 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002197 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002198 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002199 .region_add = mem_add,
2200 .region_nop = mem_add,
2201 .priority = 0,
2202 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002203 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002204}
2205
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002206void address_space_unregister(AddressSpace *as)
2207{
2208 memory_listener_unregister(&as->dispatch_listener);
2209}
2210
Avi Kivity83f3c252012-10-07 12:59:55 +02002211void address_space_destroy_dispatch(AddressSpace *as)
2212{
2213 AddressSpaceDispatch *d = as->dispatch;
2214
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002215 atomic_rcu_set(&as->dispatch, NULL);
2216 if (d) {
2217 call_rcu(d, address_space_dispatch_free, rcu);
2218 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002219}
2220
Avi Kivity62152b82011-07-26 14:26:14 +03002221static void memory_map_init(void)
2222{
Anthony Liguori7267c092011-08-20 22:09:37 -05002223 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002224
Paolo Bonzini57271d62013-11-07 17:14:37 +01002225 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002226 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002227
Anthony Liguori7267c092011-08-20 22:09:37 -05002228 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002229 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2230 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002231 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002232}
2233
2234MemoryRegion *get_system_memory(void)
2235{
2236 return system_memory;
2237}
2238
Avi Kivity309cb472011-08-08 16:09:03 +03002239MemoryRegion *get_system_io(void)
2240{
2241 return system_io;
2242}
2243
pbrooke2eef172008-06-08 01:09:01 +00002244#endif /* !defined(CONFIG_USER_ONLY) */
2245
bellard13eb76e2004-01-24 15:23:36 +00002246/* physical memory access (slow version, mainly for debug) */
2247#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002248int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002249 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002250{
2251 int l, flags;
2252 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002253 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002254
2255 while (len > 0) {
2256 page = addr & TARGET_PAGE_MASK;
2257 l = (page + TARGET_PAGE_SIZE) - addr;
2258 if (l > len)
2259 l = len;
2260 flags = page_get_flags(page);
2261 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002262 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002263 if (is_write) {
2264 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002265 return -1;
bellard579a97f2007-11-11 14:26:47 +00002266 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002267 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002268 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002269 memcpy(p, buf, l);
2270 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002271 } else {
2272 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002273 return -1;
bellard579a97f2007-11-11 14:26:47 +00002274 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002275 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002276 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002277 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002278 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002279 }
2280 len -= l;
2281 buf += l;
2282 addr += l;
2283 }
Paul Brooka68fe892010-03-01 00:08:59 +00002284 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002285}
bellard8df1cd02005-01-28 22:37:22 +00002286
bellard13eb76e2004-01-24 15:23:36 +00002287#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002288
Paolo Bonzini845b6212015-03-23 11:45:53 +01002289static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002290 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002291{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002292 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2293 /* No early return if dirty_log_mask is or becomes 0, because
2294 * cpu_physical_memory_set_dirty_range will still call
2295 * xen_modified_memory.
2296 */
2297 if (dirty_log_mask) {
2298 dirty_log_mask =
2299 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002300 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002301 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2302 tb_invalidate_phys_range(addr, addr + length);
2303 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2304 }
2305 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002306}
2307
Richard Henderson23326162013-07-08 14:55:59 -07002308static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002309{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002310 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002311
2312 /* Regions are assumed to support 1-4 byte accesses unless
2313 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002314 if (access_size_max == 0) {
2315 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002316 }
Richard Henderson23326162013-07-08 14:55:59 -07002317
2318 /* Bound the maximum access by the alignment of the address. */
2319 if (!mr->ops->impl.unaligned) {
2320 unsigned align_size_max = addr & -addr;
2321 if (align_size_max != 0 && align_size_max < access_size_max) {
2322 access_size_max = align_size_max;
2323 }
2324 }
2325
2326 /* Don't attempt accesses larger than the maximum. */
2327 if (l > access_size_max) {
2328 l = access_size_max;
2329 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002330 if (l & (l - 1)) {
2331 l = 1 << (qemu_fls(l) - 1);
2332 }
Richard Henderson23326162013-07-08 14:55:59 -07002333
2334 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002335}
2336
Jan Kiszka4840f102015-06-18 18:47:22 +02002337static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002338{
Jan Kiszka4840f102015-06-18 18:47:22 +02002339 bool unlocked = !qemu_mutex_iothread_locked();
2340 bool release_lock = false;
2341
2342 if (unlocked && mr->global_locking) {
2343 qemu_mutex_lock_iothread();
2344 unlocked = false;
2345 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002346 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002347 if (mr->flush_coalesced_mmio) {
2348 if (unlocked) {
2349 qemu_mutex_lock_iothread();
2350 }
2351 qemu_flush_coalesced_mmio_buffer();
2352 if (unlocked) {
2353 qemu_mutex_unlock_iothread();
2354 }
2355 }
2356
2357 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002358}
2359
Peter Maydell5c9eb022015-04-26 16:49:24 +01002360MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2361 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002362{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002363 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002364 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002365 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002366 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002367 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002368 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002369 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002370
Paolo Bonzini41063e12015-03-18 14:21:43 +01002371 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002372 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002373 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002374 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002375
bellard13eb76e2004-01-24 15:23:36 +00002376 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002377 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002378 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002379 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002380 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002381 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002382 switch (l) {
2383 case 8:
2384 /* 64 bit write access */
2385 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002386 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2387 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002388 break;
2389 case 4:
bellard1c213d12005-09-03 10:49:04 +00002390 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002391 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002392 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2393 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002394 break;
2395 case 2:
bellard1c213d12005-09-03 10:49:04 +00002396 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002397 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002398 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2399 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002400 break;
2401 case 1:
bellard1c213d12005-09-03 10:49:04 +00002402 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002403 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002404 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2405 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002406 break;
2407 default:
2408 abort();
bellard13eb76e2004-01-24 15:23:36 +00002409 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002410 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002411 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002412 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002413 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002414 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002415 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002416 }
2417 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002418 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002419 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002420 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002421 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002422 switch (l) {
2423 case 8:
2424 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002425 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2426 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002427 stq_p(buf, val);
2428 break;
2429 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002430 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002431 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2432 attrs);
bellardc27004e2005-01-03 23:35:10 +00002433 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002434 break;
2435 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002436 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002437 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2438 attrs);
bellardc27004e2005-01-03 23:35:10 +00002439 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002440 break;
2441 case 1:
bellard1c213d12005-09-03 10:49:04 +00002442 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002443 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2444 attrs);
bellardc27004e2005-01-03 23:35:10 +00002445 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002446 break;
2447 default:
2448 abort();
bellard13eb76e2004-01-24 15:23:36 +00002449 }
2450 } else {
2451 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002452 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002453 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002454 }
2455 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002456
2457 if (release_lock) {
2458 qemu_mutex_unlock_iothread();
2459 release_lock = false;
2460 }
2461
bellard13eb76e2004-01-24 15:23:36 +00002462 len -= l;
2463 buf += l;
2464 addr += l;
2465 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002466 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002467
Peter Maydell3b643492015-04-26 16:49:23 +01002468 return result;
bellard13eb76e2004-01-24 15:23:36 +00002469}
bellard8df1cd02005-01-28 22:37:22 +00002470
Peter Maydell5c9eb022015-04-26 16:49:24 +01002471MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2472 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002473{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002474 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002475}
2476
Peter Maydell5c9eb022015-04-26 16:49:24 +01002477MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2478 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002479{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002480 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002481}
2482
2483
Avi Kivitya8170e52012-10-23 12:30:10 +02002484void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002485 int len, int is_write)
2486{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002487 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2488 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002489}
2490
Alexander Graf582b55a2013-12-11 14:17:44 +01002491enum write_rom_type {
2492 WRITE_DATA,
2493 FLUSH_CACHE,
2494};
2495
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002496static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002497 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002498{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002499 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002500 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002501 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002502 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002503
Paolo Bonzini41063e12015-03-18 14:21:43 +01002504 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002505 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002506 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002507 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002508
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002509 if (!(memory_region_is_ram(mr) ||
2510 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002511 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002512 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002513 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002514 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002515 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002516 switch (type) {
2517 case WRITE_DATA:
2518 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002519 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002520 break;
2521 case FLUSH_CACHE:
2522 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2523 break;
2524 }
bellardd0ecd2a2006-04-23 17:14:48 +00002525 }
2526 len -= l;
2527 buf += l;
2528 addr += l;
2529 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002530 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002531}
2532
Alexander Graf582b55a2013-12-11 14:17:44 +01002533/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002534void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002535 const uint8_t *buf, int len)
2536{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002537 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002538}
2539
2540void cpu_flush_icache_range(hwaddr start, int len)
2541{
2542 /*
2543 * This function should do the same thing as an icache flush that was
2544 * triggered from within the guest. For TCG we are always cache coherent,
2545 * so there is no need to flush anything. For KVM / Xen we need to flush
2546 * the host's instruction cache at least.
2547 */
2548 if (tcg_enabled()) {
2549 return;
2550 }
2551
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002552 cpu_physical_memory_write_rom_internal(&address_space_memory,
2553 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002554}
2555
aliguori6d16c2f2009-01-22 16:59:11 +00002556typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002557 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002558 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002559 hwaddr addr;
2560 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002561 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002562} BounceBuffer;
2563
2564static BounceBuffer bounce;
2565
aliguoriba223c22009-01-22 16:59:16 +00002566typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002567 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002568 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002569} MapClient;
2570
Fam Zheng38e047b2015-03-16 17:03:35 +08002571QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002572static QLIST_HEAD(map_client_list, MapClient) map_client_list
2573 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002574
Fam Zhenge95205e2015-03-16 17:03:37 +08002575static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002576{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002577 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002578 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002579}
2580
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002581static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002582{
2583 MapClient *client;
2584
Blue Swirl72cf2d42009-09-12 07:36:22 +00002585 while (!QLIST_EMPTY(&map_client_list)) {
2586 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002587 qemu_bh_schedule(client->bh);
2588 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002589 }
2590}
2591
Fam Zhenge95205e2015-03-16 17:03:37 +08002592void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002593{
2594 MapClient *client = g_malloc(sizeof(*client));
2595
Fam Zheng38e047b2015-03-16 17:03:35 +08002596 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002597 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002598 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002599 if (!atomic_read(&bounce.in_use)) {
2600 cpu_notify_map_clients_locked();
2601 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002602 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002603}
2604
Fam Zheng38e047b2015-03-16 17:03:35 +08002605void cpu_exec_init_all(void)
2606{
2607 qemu_mutex_init(&ram_list.mutex);
2608 memory_map_init();
2609 io_mem_init();
2610 qemu_mutex_init(&map_client_list_lock);
2611}
2612
Fam Zhenge95205e2015-03-16 17:03:37 +08002613void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002614{
Fam Zhenge95205e2015-03-16 17:03:37 +08002615 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002616
Fam Zhenge95205e2015-03-16 17:03:37 +08002617 qemu_mutex_lock(&map_client_list_lock);
2618 QLIST_FOREACH(client, &map_client_list, link) {
2619 if (client->bh == bh) {
2620 cpu_unregister_map_client_do(client);
2621 break;
2622 }
2623 }
2624 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002625}
2626
2627static void cpu_notify_map_clients(void)
2628{
Fam Zheng38e047b2015-03-16 17:03:35 +08002629 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002630 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002631 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002632}
2633
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002634bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2635{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002636 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002637 hwaddr l, xlat;
2638
Paolo Bonzini41063e12015-03-18 14:21:43 +01002639 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002640 while (len > 0) {
2641 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002642 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2643 if (!memory_access_is_direct(mr, is_write)) {
2644 l = memory_access_size(mr, l, addr);
2645 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002646 return false;
2647 }
2648 }
2649
2650 len -= l;
2651 addr += l;
2652 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002653 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002654 return true;
2655}
2656
aliguori6d16c2f2009-01-22 16:59:11 +00002657/* Map a physical memory region into a host virtual address.
2658 * May map a subset of the requested range, given by and returned in *plen.
2659 * May return NULL if resources needed to perform the mapping are exhausted.
2660 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002661 * Use cpu_register_map_client() to know when retrying the map operation is
2662 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002663 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002664void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002665 hwaddr addr,
2666 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002667 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002668{
Avi Kivitya8170e52012-10-23 12:30:10 +02002669 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002670 hwaddr done = 0;
2671 hwaddr l, xlat, base;
2672 MemoryRegion *mr, *this_mr;
2673 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002674
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002675 if (len == 0) {
2676 return NULL;
2677 }
aliguori6d16c2f2009-01-22 16:59:11 +00002678
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002679 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002680 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002681 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002682
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002683 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002684 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002685 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002686 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002687 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002688 /* Avoid unbounded allocations */
2689 l = MIN(l, TARGET_PAGE_SIZE);
2690 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002691 bounce.addr = addr;
2692 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002693
2694 memory_region_ref(mr);
2695 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002696 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002697 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2698 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002699 }
aliguori6d16c2f2009-01-22 16:59:11 +00002700
Paolo Bonzini41063e12015-03-18 14:21:43 +01002701 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002702 *plen = l;
2703 return bounce.buffer;
2704 }
2705
2706 base = xlat;
2707 raddr = memory_region_get_ram_addr(mr);
2708
2709 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002710 len -= l;
2711 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002712 done += l;
2713 if (len == 0) {
2714 break;
2715 }
2716
2717 l = len;
2718 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2719 if (this_mr != mr || xlat != base + done) {
2720 break;
2721 }
aliguori6d16c2f2009-01-22 16:59:11 +00002722 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002723
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002724 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002725 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002726 *plen = done;
2727 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002728}
2729
Avi Kivityac1970f2012-10-03 16:22:53 +02002730/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002731 * Will also mark the memory as dirty if is_write == 1. access_len gives
2732 * the amount of memory that was actually read or written by the caller.
2733 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002734void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2735 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002736{
2737 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002738 MemoryRegion *mr;
2739 ram_addr_t addr1;
2740
2741 mr = qemu_ram_addr_from_host(buffer, &addr1);
2742 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002743 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002744 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002745 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002746 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002747 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002748 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002749 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002750 return;
2751 }
2752 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002753 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2754 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002755 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002756 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002757 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002758 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002759 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002760 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002761}
bellardd0ecd2a2006-04-23 17:14:48 +00002762
Avi Kivitya8170e52012-10-23 12:30:10 +02002763void *cpu_physical_memory_map(hwaddr addr,
2764 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002765 int is_write)
2766{
2767 return address_space_map(&address_space_memory, addr, plen, is_write);
2768}
2769
Avi Kivitya8170e52012-10-23 12:30:10 +02002770void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2771 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002772{
2773 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2774}
2775
bellard8df1cd02005-01-28 22:37:22 +00002776/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002777static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2778 MemTxAttrs attrs,
2779 MemTxResult *result,
2780 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002781{
bellard8df1cd02005-01-28 22:37:22 +00002782 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002783 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002784 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002785 hwaddr l = 4;
2786 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002787 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002788 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002789
Paolo Bonzini41063e12015-03-18 14:21:43 +01002790 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002791 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002792 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002793 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002794
bellard8df1cd02005-01-28 22:37:22 +00002795 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002796 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002797#if defined(TARGET_WORDS_BIGENDIAN)
2798 if (endian == DEVICE_LITTLE_ENDIAN) {
2799 val = bswap32(val);
2800 }
2801#else
2802 if (endian == DEVICE_BIG_ENDIAN) {
2803 val = bswap32(val);
2804 }
2805#endif
bellard8df1cd02005-01-28 22:37:22 +00002806 } else {
2807 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002808 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002809 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002810 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002811 switch (endian) {
2812 case DEVICE_LITTLE_ENDIAN:
2813 val = ldl_le_p(ptr);
2814 break;
2815 case DEVICE_BIG_ENDIAN:
2816 val = ldl_be_p(ptr);
2817 break;
2818 default:
2819 val = ldl_p(ptr);
2820 break;
2821 }
Peter Maydell50013112015-04-26 16:49:24 +01002822 r = MEMTX_OK;
2823 }
2824 if (result) {
2825 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002826 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002827 if (release_lock) {
2828 qemu_mutex_unlock_iothread();
2829 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002830 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002831 return val;
2832}
2833
Peter Maydell50013112015-04-26 16:49:24 +01002834uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2835 MemTxAttrs attrs, MemTxResult *result)
2836{
2837 return address_space_ldl_internal(as, addr, attrs, result,
2838 DEVICE_NATIVE_ENDIAN);
2839}
2840
2841uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2842 MemTxAttrs attrs, MemTxResult *result)
2843{
2844 return address_space_ldl_internal(as, addr, attrs, result,
2845 DEVICE_LITTLE_ENDIAN);
2846}
2847
2848uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2849 MemTxAttrs attrs, MemTxResult *result)
2850{
2851 return address_space_ldl_internal(as, addr, attrs, result,
2852 DEVICE_BIG_ENDIAN);
2853}
2854
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002855uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002856{
Peter Maydell50013112015-04-26 16:49:24 +01002857 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002858}
2859
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002860uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002861{
Peter Maydell50013112015-04-26 16:49:24 +01002862 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002863}
2864
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002865uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002866{
Peter Maydell50013112015-04-26 16:49:24 +01002867 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002868}
2869
bellard84b7b8e2005-11-28 21:19:04 +00002870/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002871static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2872 MemTxAttrs attrs,
2873 MemTxResult *result,
2874 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002875{
bellard84b7b8e2005-11-28 21:19:04 +00002876 uint8_t *ptr;
2877 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002878 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002879 hwaddr l = 8;
2880 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002881 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002882 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002883
Paolo Bonzini41063e12015-03-18 14:21:43 +01002884 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002885 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002886 false);
2887 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002888 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002889
bellard84b7b8e2005-11-28 21:19:04 +00002890 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002891 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002892#if defined(TARGET_WORDS_BIGENDIAN)
2893 if (endian == DEVICE_LITTLE_ENDIAN) {
2894 val = bswap64(val);
2895 }
2896#else
2897 if (endian == DEVICE_BIG_ENDIAN) {
2898 val = bswap64(val);
2899 }
2900#endif
bellard84b7b8e2005-11-28 21:19:04 +00002901 } else {
2902 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002903 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002904 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002905 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002906 switch (endian) {
2907 case DEVICE_LITTLE_ENDIAN:
2908 val = ldq_le_p(ptr);
2909 break;
2910 case DEVICE_BIG_ENDIAN:
2911 val = ldq_be_p(ptr);
2912 break;
2913 default:
2914 val = ldq_p(ptr);
2915 break;
2916 }
Peter Maydell50013112015-04-26 16:49:24 +01002917 r = MEMTX_OK;
2918 }
2919 if (result) {
2920 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002921 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002922 if (release_lock) {
2923 qemu_mutex_unlock_iothread();
2924 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002925 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002926 return val;
2927}
2928
Peter Maydell50013112015-04-26 16:49:24 +01002929uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2930 MemTxAttrs attrs, MemTxResult *result)
2931{
2932 return address_space_ldq_internal(as, addr, attrs, result,
2933 DEVICE_NATIVE_ENDIAN);
2934}
2935
2936uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2937 MemTxAttrs attrs, MemTxResult *result)
2938{
2939 return address_space_ldq_internal(as, addr, attrs, result,
2940 DEVICE_LITTLE_ENDIAN);
2941}
2942
2943uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2944 MemTxAttrs attrs, MemTxResult *result)
2945{
2946 return address_space_ldq_internal(as, addr, attrs, result,
2947 DEVICE_BIG_ENDIAN);
2948}
2949
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002950uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002951{
Peter Maydell50013112015-04-26 16:49:24 +01002952 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002953}
2954
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002955uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002956{
Peter Maydell50013112015-04-26 16:49:24 +01002957 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002958}
2959
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002960uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002961{
Peter Maydell50013112015-04-26 16:49:24 +01002962 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002963}
2964
bellardaab33092005-10-30 20:48:42 +00002965/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01002966uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2967 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00002968{
2969 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01002970 MemTxResult r;
2971
2972 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2973 if (result) {
2974 *result = r;
2975 }
bellardaab33092005-10-30 20:48:42 +00002976 return val;
2977}
2978
Peter Maydell50013112015-04-26 16:49:24 +01002979uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2980{
2981 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2982}
2983
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002984/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002985static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2986 hwaddr addr,
2987 MemTxAttrs attrs,
2988 MemTxResult *result,
2989 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002990{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002991 uint8_t *ptr;
2992 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002993 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002994 hwaddr l = 2;
2995 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002996 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002997 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002998
Paolo Bonzini41063e12015-03-18 14:21:43 +01002999 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003000 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003001 false);
3002 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003003 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003004
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003005 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003006 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007#if defined(TARGET_WORDS_BIGENDIAN)
3008 if (endian == DEVICE_LITTLE_ENDIAN) {
3009 val = bswap16(val);
3010 }
3011#else
3012 if (endian == DEVICE_BIG_ENDIAN) {
3013 val = bswap16(val);
3014 }
3015#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003016 } else {
3017 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003018 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003019 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003020 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003021 switch (endian) {
3022 case DEVICE_LITTLE_ENDIAN:
3023 val = lduw_le_p(ptr);
3024 break;
3025 case DEVICE_BIG_ENDIAN:
3026 val = lduw_be_p(ptr);
3027 break;
3028 default:
3029 val = lduw_p(ptr);
3030 break;
3031 }
Peter Maydell50013112015-04-26 16:49:24 +01003032 r = MEMTX_OK;
3033 }
3034 if (result) {
3035 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003036 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003037 if (release_lock) {
3038 qemu_mutex_unlock_iothread();
3039 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003040 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003041 return val;
bellardaab33092005-10-30 20:48:42 +00003042}
3043
Peter Maydell50013112015-04-26 16:49:24 +01003044uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3045 MemTxAttrs attrs, MemTxResult *result)
3046{
3047 return address_space_lduw_internal(as, addr, attrs, result,
3048 DEVICE_NATIVE_ENDIAN);
3049}
3050
3051uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3052 MemTxAttrs attrs, MemTxResult *result)
3053{
3054 return address_space_lduw_internal(as, addr, attrs, result,
3055 DEVICE_LITTLE_ENDIAN);
3056}
3057
3058uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3059 MemTxAttrs attrs, MemTxResult *result)
3060{
3061 return address_space_lduw_internal(as, addr, attrs, result,
3062 DEVICE_BIG_ENDIAN);
3063}
3064
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003065uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003066{
Peter Maydell50013112015-04-26 16:49:24 +01003067 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003068}
3069
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003070uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003071{
Peter Maydell50013112015-04-26 16:49:24 +01003072 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003073}
3074
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003075uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003076{
Peter Maydell50013112015-04-26 16:49:24 +01003077 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003078}
3079
bellard8df1cd02005-01-28 22:37:22 +00003080/* warning: addr must be aligned. The ram page is not masked as dirty
3081 and the code inside is not invalidated. It is useful if the dirty
3082 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003083void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3084 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003085{
bellard8df1cd02005-01-28 22:37:22 +00003086 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003087 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003088 hwaddr l = 4;
3089 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003090 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003091 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003092 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003093
Paolo Bonzini41063e12015-03-18 14:21:43 +01003094 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003095 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003096 true);
3097 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003098 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003099
Peter Maydell50013112015-04-26 16:49:24 +01003100 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003101 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003102 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003103 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003104 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003105
Paolo Bonzini845b6212015-03-23 11:45:53 +01003106 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3107 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003108 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003109 r = MEMTX_OK;
3110 }
3111 if (result) {
3112 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003113 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003114 if (release_lock) {
3115 qemu_mutex_unlock_iothread();
3116 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003117 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003118}
3119
Peter Maydell50013112015-04-26 16:49:24 +01003120void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3121{
3122 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3123}
3124
bellard8df1cd02005-01-28 22:37:22 +00003125/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003126static inline void address_space_stl_internal(AddressSpace *as,
3127 hwaddr addr, uint32_t val,
3128 MemTxAttrs attrs,
3129 MemTxResult *result,
3130 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003131{
bellard8df1cd02005-01-28 22:37:22 +00003132 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003133 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003134 hwaddr l = 4;
3135 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003136 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003137 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003138
Paolo Bonzini41063e12015-03-18 14:21:43 +01003139 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003140 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003141 true);
3142 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003143 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003144
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003145#if defined(TARGET_WORDS_BIGENDIAN)
3146 if (endian == DEVICE_LITTLE_ENDIAN) {
3147 val = bswap32(val);
3148 }
3149#else
3150 if (endian == DEVICE_BIG_ENDIAN) {
3151 val = bswap32(val);
3152 }
3153#endif
Peter Maydell50013112015-04-26 16:49:24 +01003154 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003155 } else {
bellard8df1cd02005-01-28 22:37:22 +00003156 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003157 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003158 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159 switch (endian) {
3160 case DEVICE_LITTLE_ENDIAN:
3161 stl_le_p(ptr, val);
3162 break;
3163 case DEVICE_BIG_ENDIAN:
3164 stl_be_p(ptr, val);
3165 break;
3166 default:
3167 stl_p(ptr, val);
3168 break;
3169 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003170 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003171 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003172 }
Peter Maydell50013112015-04-26 16:49:24 +01003173 if (result) {
3174 *result = r;
3175 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003176 if (release_lock) {
3177 qemu_mutex_unlock_iothread();
3178 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003179 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003180}
3181
3182void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3183 MemTxAttrs attrs, MemTxResult *result)
3184{
3185 address_space_stl_internal(as, addr, val, attrs, result,
3186 DEVICE_NATIVE_ENDIAN);
3187}
3188
3189void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3190 MemTxAttrs attrs, MemTxResult *result)
3191{
3192 address_space_stl_internal(as, addr, val, attrs, result,
3193 DEVICE_LITTLE_ENDIAN);
3194}
3195
3196void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3197 MemTxAttrs attrs, MemTxResult *result)
3198{
3199 address_space_stl_internal(as, addr, val, attrs, result,
3200 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003201}
3202
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003203void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003204{
Peter Maydell50013112015-04-26 16:49:24 +01003205 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003206}
3207
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003208void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003209{
Peter Maydell50013112015-04-26 16:49:24 +01003210 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003211}
3212
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003213void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003214{
Peter Maydell50013112015-04-26 16:49:24 +01003215 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003216}
3217
bellardaab33092005-10-30 20:48:42 +00003218/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003219void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3220 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003221{
3222 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003223 MemTxResult r;
3224
3225 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3226 if (result) {
3227 *result = r;
3228 }
3229}
3230
3231void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3232{
3233 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003234}
3235
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003236/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003237static inline void address_space_stw_internal(AddressSpace *as,
3238 hwaddr addr, uint32_t val,
3239 MemTxAttrs attrs,
3240 MemTxResult *result,
3241 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003242{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003243 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003244 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003245 hwaddr l = 2;
3246 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003247 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003248 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003249
Paolo Bonzini41063e12015-03-18 14:21:43 +01003250 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003251 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003252 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003253 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003254
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255#if defined(TARGET_WORDS_BIGENDIAN)
3256 if (endian == DEVICE_LITTLE_ENDIAN) {
3257 val = bswap16(val);
3258 }
3259#else
3260 if (endian == DEVICE_BIG_ENDIAN) {
3261 val = bswap16(val);
3262 }
3263#endif
Peter Maydell50013112015-04-26 16:49:24 +01003264 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003265 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003266 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003267 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003268 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003269 switch (endian) {
3270 case DEVICE_LITTLE_ENDIAN:
3271 stw_le_p(ptr, val);
3272 break;
3273 case DEVICE_BIG_ENDIAN:
3274 stw_be_p(ptr, val);
3275 break;
3276 default:
3277 stw_p(ptr, val);
3278 break;
3279 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003280 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003281 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003282 }
Peter Maydell50013112015-04-26 16:49:24 +01003283 if (result) {
3284 *result = r;
3285 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003286 if (release_lock) {
3287 qemu_mutex_unlock_iothread();
3288 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003289 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003290}
3291
3292void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3293 MemTxAttrs attrs, MemTxResult *result)
3294{
3295 address_space_stw_internal(as, addr, val, attrs, result,
3296 DEVICE_NATIVE_ENDIAN);
3297}
3298
3299void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3300 MemTxAttrs attrs, MemTxResult *result)
3301{
3302 address_space_stw_internal(as, addr, val, attrs, result,
3303 DEVICE_LITTLE_ENDIAN);
3304}
3305
3306void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3307 MemTxAttrs attrs, MemTxResult *result)
3308{
3309 address_space_stw_internal(as, addr, val, attrs, result,
3310 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003311}
3312
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003313void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003314{
Peter Maydell50013112015-04-26 16:49:24 +01003315 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003316}
3317
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003318void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003319{
Peter Maydell50013112015-04-26 16:49:24 +01003320 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003321}
3322
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003323void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003324{
Peter Maydell50013112015-04-26 16:49:24 +01003325 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003326}
3327
bellardaab33092005-10-30 20:48:42 +00003328/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003329void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3330 MemTxAttrs attrs, MemTxResult *result)
3331{
3332 MemTxResult r;
3333 val = tswap64(val);
3334 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3335 if (result) {
3336 *result = r;
3337 }
3338}
3339
3340void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3341 MemTxAttrs attrs, MemTxResult *result)
3342{
3343 MemTxResult r;
3344 val = cpu_to_le64(val);
3345 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3346 if (result) {
3347 *result = r;
3348 }
3349}
3350void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3351 MemTxAttrs attrs, MemTxResult *result)
3352{
3353 MemTxResult r;
3354 val = cpu_to_be64(val);
3355 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3356 if (result) {
3357 *result = r;
3358 }
3359}
3360
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003361void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003362{
Peter Maydell50013112015-04-26 16:49:24 +01003363 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003364}
3365
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003366void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003367{
Peter Maydell50013112015-04-26 16:49:24 +01003368 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369}
3370
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003371void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003372{
Peter Maydell50013112015-04-26 16:49:24 +01003373 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003374}
3375
aliguori5e2972f2009-03-28 17:51:36 +00003376/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003377int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003378 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003379{
3380 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003381 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003382 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003383
3384 while (len > 0) {
3385 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003386 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003387 /* if no physical page mapped, return an error */
3388 if (phys_addr == -1)
3389 return -1;
3390 l = (page + TARGET_PAGE_SIZE) - addr;
3391 if (l > len)
3392 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003393 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003394 if (is_write) {
3395 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3396 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003397 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3398 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003399 }
bellard13eb76e2004-01-24 15:23:36 +00003400 len -= l;
3401 buf += l;
3402 addr += l;
3403 }
3404 return 0;
3405}
Paul Brooka68fe892010-03-01 00:08:59 +00003406#endif
bellard13eb76e2004-01-24 15:23:36 +00003407
Blue Swirl8e4a4242013-01-06 18:30:17 +00003408/*
3409 * A helper function for the _utterly broken_ virtio device model to find out if
3410 * it's running on a big endian machine. Don't do this at home kids!
3411 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003412bool target_words_bigendian(void);
3413bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003414{
3415#if defined(TARGET_WORDS_BIGENDIAN)
3416 return true;
3417#else
3418 return false;
3419#endif
3420}
3421
Wen Congyang76f35532012-05-07 12:04:18 +08003422#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003423bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003424{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003425 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003426 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003427 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003428
Paolo Bonzini41063e12015-03-18 14:21:43 +01003429 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003430 mr = address_space_translate(&address_space_memory,
3431 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003432
Paolo Bonzini41063e12015-03-18 14:21:43 +01003433 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3434 rcu_read_unlock();
3435 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003436}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003437
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003438int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003439{
3440 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003441 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003442
Mike Day0dc3f442013-09-05 14:41:35 -04003443 rcu_read_lock();
3444 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003445 ret = func(block->idstr, block->host, block->offset,
3446 block->used_length, opaque);
3447 if (ret) {
3448 break;
3449 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003450 }
Mike Day0dc3f442013-09-05 14:41:35 -04003451 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003452 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003453}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003454#endif