blob: f46e5968189387a8bcd35d0d2751149eaa1762e2 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010040#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010041#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000042#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010044#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010045#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010046#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000047#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010048#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040049#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020050#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000051#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030052#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030056#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Fam Zheng729633c2016-03-01 14:18:24 +0800139 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200140 /* This is a multi-level map on the physical address space.
141 * The bottom level has pointers to MemoryRegionSections.
142 */
143 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200144 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200145 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200146};
147
Jan Kiszka90260c62013-05-26 21:46:51 +0200148#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
149typedef struct subpage_t {
150 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200151 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200152 hwaddr base;
153 uint16_t sub_section[TARGET_PAGE_SIZE];
154} subpage_t;
155
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200156#define PHYS_SECTION_UNASSIGNED 0
157#define PHYS_SECTION_NOTDIRTY 1
158#define PHYS_SECTION_ROM 2
159#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200160
pbrooke2eef172008-06-08 01:09:01 +0000161static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300162static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000163static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000164
Avi Kivity1ec9b902012-01-02 12:47:48 +0200165static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100166
167/**
168 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
169 * @cpu: the CPU whose AddressSpace this is
170 * @as: the AddressSpace itself
171 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
172 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 */
174struct CPUAddressSpace {
175 CPUState *cpu;
176 AddressSpace *as;
177 struct AddressSpaceDispatch *memory_dispatch;
178 MemoryListener tcg_as_listener;
179};
180
pbrook6658ffb2007-03-16 23:58:11 +0000181#endif
bellard54936002003-05-13 00:25:15 +0000182
Paul Brook6d9a1302010-02-28 23:55:53 +0000183#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
190 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 }
192}
193
Paolo Bonzinidb946042015-05-21 15:12:29 +0200194static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200195{
196 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200197 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200198 PhysPageEntry e;
199 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200201 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200202 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200204 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205
206 e.skip = leaf ? 0 : 1;
207 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200209 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200211 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212}
213
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200214static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
215 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200216 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200217{
218 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100219 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200220
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200221 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200222 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100225 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200226
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200228 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200229 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200230 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200231 *index += step;
232 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200233 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200234 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200235 }
236 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200237 }
238}
239
Avi Kivityac1970f2012-10-03 16:22:53 +0200240static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200241 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200242 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000243{
Avi Kivity29990972012-02-13 20:21:20 +0200244 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200245 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000246
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000248}
249
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200250/* Compact a non leaf page entry. Simply detect that the entry has a single child,
251 * and update our entry so we can skip it and go directly to the destination.
252 */
253static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
254{
255 unsigned valid_ptr = P_L2_SIZE;
256 int valid = 0;
257 PhysPageEntry *p;
258 int i;
259
260 if (lp->ptr == PHYS_MAP_NODE_NIL) {
261 return;
262 }
263
264 p = nodes[lp->ptr];
265 for (i = 0; i < P_L2_SIZE; i++) {
266 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
267 continue;
268 }
269
270 valid_ptr = i;
271 valid++;
272 if (p[i].skip) {
273 phys_page_compact(&p[i], nodes, compacted);
274 }
275 }
276
277 /* We can only compress if there's only one child. */
278 if (valid != 1) {
279 return;
280 }
281
282 assert(valid_ptr < P_L2_SIZE);
283
284 /* Don't compress if it won't fit in the # of bits we have. */
285 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
286 return;
287 }
288
289 lp->ptr = p[valid_ptr].ptr;
290 if (!p[valid_ptr].skip) {
291 /* If our only child is a leaf, make this a leaf. */
292 /* By design, we should have made this node a leaf to begin with so we
293 * should never reach here.
294 * But since it's so simple to handle this, let's do it just in case we
295 * change this rule.
296 */
297 lp->skip = 0;
298 } else {
299 lp->skip += p[valid_ptr].skip;
300 }
301}
302
303static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
304{
305 DECLARE_BITMAP(compacted, nodes_nb);
306
307 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200308 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200309 }
310}
311
Fam Zheng29cb5332016-03-01 14:18:23 +0800312static inline bool section_covers_addr(const MemoryRegionSection *section,
313 hwaddr addr)
314{
315 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
316 * the section must cover the entire address space.
317 */
318 return section->size.hi ||
319 range_covers_byte(section->offset_within_address_space,
320 section->size.lo, addr);
321}
322
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200323static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200324 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000325{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200326 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200327 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200328 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200329
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200330 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200331 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200332 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200334 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100335 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200336 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200337
Fam Zheng29cb5332016-03-01 14:18:23 +0800338 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200339 return &sections[lp.ptr];
340 } else {
341 return &sections[PHYS_SECTION_UNASSIGNED];
342 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200343}
344
Blue Swirle5548612012-04-21 13:08:33 +0000345bool memory_region_is_unassigned(MemoryRegion *mr)
346{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200347 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000348 && mr != &io_mem_watch;
349}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200350
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100351/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200352static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 hwaddr addr,
354 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200355{
Fam Zheng729633c2016-03-01 14:18:24 +0800356 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200357 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800358 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200359
Fam Zheng729633c2016-03-01 14:18:24 +0800360 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
361 section_covers_addr(section, addr)) {
362 update = false;
363 } else {
364 section = phys_page_find(d->phys_map, addr, d->map.nodes,
365 d->map.sections);
366 update = true;
367 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200368 if (resolve_subpage && section->mr->subpage) {
369 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200370 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200371 }
Fam Zheng729633c2016-03-01 14:18:24 +0800372 if (update) {
373 atomic_set(&d->mru_section, section);
374 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200375 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200376}
377
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100378/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200379static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200380address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200381 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200382{
383 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200384 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100385 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200386
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200387 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200388 /* Compute offset within MemoryRegionSection */
389 addr -= section->offset_within_address_space;
390
391 /* Compute offset within MemoryRegion */
392 *xlat = addr + section->offset_within_region;
393
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200394 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200395
396 /* MMIO registers can be expected to perform full-width accesses based only
397 * on their address, without considering adjacent registers that could
398 * decode to completely different MemoryRegions. When such registers
399 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
400 * regions overlap wildly. For this reason we cannot clamp the accesses
401 * here.
402 *
403 * If the length is small (as is the case for address_space_ldl/stl),
404 * everything works fine. If the incoming length is large, however,
405 * the caller really has to do the clamping through memory_access_size.
406 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200407 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200408 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200409 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
410 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200411 return section;
412}
Jan Kiszka90260c62013-05-26 21:46:51 +0200413
Paolo Bonzini41063e12015-03-18 14:21:43 +0100414/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200415MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
416 hwaddr *xlat, hwaddr *plen,
417 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200418{
Avi Kivity30951152012-10-30 13:47:46 +0200419 IOMMUTLBEntry iotlb;
420 MemoryRegionSection *section;
421 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200422
423 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100424 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
425 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200426 mr = section->mr;
427
428 if (!mr->iommu_ops) {
429 break;
430 }
431
Le Tan8d7b8cb2014-08-16 13:55:37 +0800432 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200433 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
434 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700435 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200436 if (!(iotlb.perm & (1 << is_write))) {
437 mr = &io_mem_unassigned;
438 break;
439 }
440
441 as = iotlb.target_as;
442 }
443
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000444 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100445 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700446 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100447 }
448
Avi Kivity30951152012-10-30 13:47:46 +0200449 *xlat = addr;
450 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200451}
452
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100453/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200454MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000455address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200456 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200457{
Avi Kivity30951152012-10-30 13:47:46 +0200458 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000459 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
460
461 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200462
463 assert(!section->mr->iommu_ops);
464 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200465}
bellard9fa3e852004-01-04 18:06:42 +0000466#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000467
Andreas Färberb170fce2013-01-20 20:23:22 +0100468#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000469
Juan Quintelae59fb372009-09-29 22:48:21 +0200470static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200471{
Andreas Färber259186a2013-01-17 18:51:17 +0100472 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473
aurel323098dba2009-03-07 21:28:24 +0000474 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
475 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100476 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100477 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000478
479 return 0;
480}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200481
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400482static int cpu_common_pre_load(void *opaque)
483{
484 CPUState *cpu = opaque;
485
Paolo Bonziniadee6422014-12-19 12:53:14 +0100486 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487
488 return 0;
489}
490
491static bool cpu_common_exception_index_needed(void *opaque)
492{
493 CPUState *cpu = opaque;
494
Paolo Bonziniadee6422014-12-19 12:53:14 +0100495 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496}
497
498static const VMStateDescription vmstate_cpu_common_exception_index = {
499 .name = "cpu_common/exception_index",
500 .version_id = 1,
501 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200502 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400503 .fields = (VMStateField[]) {
504 VMSTATE_INT32(exception_index, CPUState),
505 VMSTATE_END_OF_LIST()
506 }
507};
508
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300509static bool cpu_common_crash_occurred_needed(void *opaque)
510{
511 CPUState *cpu = opaque;
512
513 return cpu->crash_occurred;
514}
515
516static const VMStateDescription vmstate_cpu_common_crash_occurred = {
517 .name = "cpu_common/crash_occurred",
518 .version_id = 1,
519 .minimum_version_id = 1,
520 .needed = cpu_common_crash_occurred_needed,
521 .fields = (VMStateField[]) {
522 VMSTATE_BOOL(crash_occurred, CPUState),
523 VMSTATE_END_OF_LIST()
524 }
525};
526
Andreas Färber1a1562f2013-06-17 04:09:11 +0200527const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200528 .name = "cpu_common",
529 .version_id = 1,
530 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400531 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200533 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100534 VMSTATE_UINT32(halted, CPUState),
535 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200536 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400537 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200538 .subsections = (const VMStateDescription*[]) {
539 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300540 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200541 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200542 }
543};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200544
pbrook9656f322008-07-01 20:01:19 +0000545#endif
546
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100547CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400548{
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400550
Andreas Färberbdc44642013-06-24 23:50:24 +0200551 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100552 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100554 }
Glauber Costa950f1472009-06-09 12:15:18 -0400555 }
556
Andreas Färberbdc44642013-06-24 23:50:24 +0200557 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400558}
559
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000560#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000561void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000563 CPUAddressSpace *newas;
564
565 /* Target code should have set num_ases before calling us */
566 assert(asidx < cpu->num_ases);
567
Peter Maydell56943e82016-01-21 14:15:04 +0000568 if (asidx == 0) {
569 /* address space 0 gets the convenience alias */
570 cpu->as = as;
571 }
572
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000573 /* KVM cannot currently support multiple address spaces. */
574 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000575
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000576 if (!cpu->cpu_ases) {
577 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000578 }
Peter Maydell32857f42015-10-01 15:29:50 +0100579
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000580 newas = &cpu->cpu_ases[asidx];
581 newas->cpu = cpu;
582 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000583 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000584 newas->tcg_as_listener.commit = tcg_commit;
585 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000586 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000587}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000588
589AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
590{
591 /* Return the AddressSpace corresponding to the specified index */
592 return cpu->cpu_ases[asidx].as;
593}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000594#endif
595
Bharata B Raob7bca732015-06-23 19:31:13 -0700596#ifndef CONFIG_USER_ONLY
597static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
598
599static int cpu_get_free_index(Error **errp)
600{
601 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
602
603 if (cpu >= MAX_CPUMASK_BITS) {
604 error_setg(errp, "Trying to use more CPUs than max of %d",
605 MAX_CPUMASK_BITS);
606 return -1;
607 }
608
609 bitmap_set(cpu_index_map, cpu, 1);
610 return cpu;
611}
612
613void cpu_exec_exit(CPUState *cpu)
614{
615 if (cpu->cpu_index == -1) {
616 /* cpu_index was never allocated by this @cpu or was already freed. */
617 return;
618 }
619
620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
621 cpu->cpu_index = -1;
622}
623#else
624
625static int cpu_get_free_index(Error **errp)
626{
627 CPUState *some_cpu;
628 int cpu_index = 0;
629
630 CPU_FOREACH(some_cpu) {
631 cpu_index++;
632 }
633 return cpu_index;
634}
635
636void cpu_exec_exit(CPUState *cpu)
637{
638}
639#endif
640
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700641void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000642{
Andreas Färberb170fce2013-01-20 20:23:22 +0100643 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000644 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700645 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000646
Peter Maydell56943e82016-01-21 14:15:04 +0000647 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000648 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000649
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300651 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000652
653 /* This is a softmmu CPU object, so create a property for it
654 * so users can wire up its memory. (This can't go in qom/cpu.c
655 * because that file is compiled only once for both user-mode
656 * and system builds.) The default if no link is set up is to use
657 * the system address space.
658 */
659 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
660 (Object **)&cpu->memory,
661 qdev_prop_allow_set_link_before_realize,
662 OBJ_PROP_LINK_UNREF_ON_RELEASE,
663 &error_abort);
664 cpu->memory = system_memory;
665 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300666#endif
667
pbrookc2764712009-03-07 15:24:59 +0000668#if defined(CONFIG_USER_ONLY)
669 cpu_list_lock();
670#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700671 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
672 if (local_err) {
673 error_propagate(errp, local_err);
674#if defined(CONFIG_USER_ONLY)
675 cpu_list_unlock();
676#endif
677 return;
bellard6a00d602005-11-21 23:25:50 +0000678 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200679 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000680#if defined(CONFIG_USER_ONLY)
681 cpu_list_unlock();
682#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200683 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
684 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
685 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100686 if (cc->vmsd != NULL) {
687 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
688 }
bellardfd6ce8f2003-05-14 19:00:11 +0000689}
690
Paul Brook94df27f2010-02-28 23:47:45 +0000691#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200692static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000693{
694 tb_invalidate_phys_page_range(pc, pc + 1, 0);
695}
696#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200697static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400698{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000699 MemTxAttrs attrs;
700 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
701 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400702 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000703 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100704 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400705 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400706}
bellardc27004e2005-01-03 23:35:10 +0000707#endif
bellardd720b932004-04-25 17:57:43 +0000708
Paul Brookc527ee82010-03-01 03:31:14 +0000709#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200710void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000711
712{
713}
714
Peter Maydell3ee887e2014-09-12 14:06:48 +0100715int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
716 int flags)
717{
718 return -ENOSYS;
719}
720
721void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
722{
723}
724
Andreas Färber75a34032013-09-02 16:57:02 +0200725int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000726 int flags, CPUWatchpoint **watchpoint)
727{
728 return -ENOSYS;
729}
730#else
pbrook6658ffb2007-03-16 23:58:11 +0000731/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200732int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000733 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000734{
aliguoric0ce9982008-11-25 22:13:57 +0000735 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000736
Peter Maydell05068c02014-09-12 14:06:48 +0100737 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700738 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200739 error_report("tried to set invalid watchpoint at %"
740 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000741 return -EINVAL;
742 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500743 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000744
aliguoria1d1bb32008-11-18 20:07:32 +0000745 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100746 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000747 wp->flags = flags;
748
aliguori2dc9f412008-11-18 20:56:59 +0000749 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200750 if (flags & BP_GDB) {
751 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
752 } else {
753 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
754 }
aliguoria1d1bb32008-11-18 20:07:32 +0000755
Andreas Färber31b030d2013-09-04 01:29:02 +0200756 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000757
758 if (watchpoint)
759 *watchpoint = wp;
760 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000761}
762
aliguoria1d1bb32008-11-18 20:07:32 +0000763/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200764int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000765 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000766{
aliguoria1d1bb32008-11-18 20:07:32 +0000767 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000768
Andreas Färberff4700b2013-08-26 18:23:18 +0200769 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100770 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000771 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200772 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000773 return 0;
774 }
775 }
aliguoria1d1bb32008-11-18 20:07:32 +0000776 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000777}
778
aliguoria1d1bb32008-11-18 20:07:32 +0000779/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200780void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000781{
Andreas Färberff4700b2013-08-26 18:23:18 +0200782 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000783
Andreas Färber31b030d2013-09-04 01:29:02 +0200784 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000785
Anthony Liguori7267c092011-08-20 22:09:37 -0500786 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000787}
788
aliguoria1d1bb32008-11-18 20:07:32 +0000789/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200790void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000791{
aliguoric0ce9982008-11-25 22:13:57 +0000792 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000793
Andreas Färberff4700b2013-08-26 18:23:18 +0200794 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200795 if (wp->flags & mask) {
796 cpu_watchpoint_remove_by_ref(cpu, wp);
797 }
aliguoric0ce9982008-11-25 22:13:57 +0000798 }
aliguoria1d1bb32008-11-18 20:07:32 +0000799}
Peter Maydell05068c02014-09-12 14:06:48 +0100800
801/* Return true if this watchpoint address matches the specified
802 * access (ie the address range covered by the watchpoint overlaps
803 * partially or completely with the address range covered by the
804 * access).
805 */
806static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
807 vaddr addr,
808 vaddr len)
809{
810 /* We know the lengths are non-zero, but a little caution is
811 * required to avoid errors in the case where the range ends
812 * exactly at the top of the address space and so addr + len
813 * wraps round to zero.
814 */
815 vaddr wpend = wp->vaddr + wp->len - 1;
816 vaddr addrend = addr + len - 1;
817
818 return !(addr > wpend || wp->vaddr > addrend);
819}
820
Paul Brookc527ee82010-03-01 03:31:14 +0000821#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000822
823/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200824int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000825 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000826{
aliguoric0ce9982008-11-25 22:13:57 +0000827 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000828
Anthony Liguori7267c092011-08-20 22:09:37 -0500829 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000830
831 bp->pc = pc;
832 bp->flags = flags;
833
aliguori2dc9f412008-11-18 20:56:59 +0000834 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200835 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200836 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200837 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200838 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200839 }
aliguoria1d1bb32008-11-18 20:07:32 +0000840
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000842
Andreas Färber00b941e2013-06-29 18:55:54 +0200843 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000844 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200845 }
aliguoria1d1bb32008-11-18 20:07:32 +0000846 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000847}
848
849/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200850int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000851{
aliguoria1d1bb32008-11-18 20:07:32 +0000852 CPUBreakpoint *bp;
853
Andreas Färberf0c3c502013-08-26 21:22:53 +0200854 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000855 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200856 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000857 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000858 }
bellard4c3a88a2003-07-26 12:06:08 +0000859 }
aliguoria1d1bb32008-11-18 20:07:32 +0000860 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000861}
862
aliguoria1d1bb32008-11-18 20:07:32 +0000863/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200864void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000865{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200866 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
867
868 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000869
Anthony Liguori7267c092011-08-20 22:09:37 -0500870 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000871}
872
873/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200874void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000875{
aliguoric0ce9982008-11-25 22:13:57 +0000876 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000877
Andreas Färberf0c3c502013-08-26 21:22:53 +0200878 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200879 if (bp->flags & mask) {
880 cpu_breakpoint_remove_by_ref(cpu, bp);
881 }
aliguoric0ce9982008-11-25 22:13:57 +0000882 }
bellard4c3a88a2003-07-26 12:06:08 +0000883}
884
bellardc33a3462003-07-29 20:50:33 +0000885/* enable or disable single step mode. EXCP_DEBUG is returned by the
886 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200887void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000888{
Andreas Färbered2803d2013-06-21 20:20:45 +0200889 if (cpu->singlestep_enabled != enabled) {
890 cpu->singlestep_enabled = enabled;
891 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200892 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200893 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100894 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000895 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700896 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000897 }
bellardc33a3462003-07-29 20:50:33 +0000898 }
bellardc33a3462003-07-29 20:50:33 +0000899}
900
Andreas Färbera47dddd2013-09-03 17:38:47 +0200901void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000902{
903 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000904 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000905
906 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000907 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000908 fprintf(stderr, "qemu: fatal: ");
909 vfprintf(stderr, fmt, ap);
910 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200911 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100912 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000913 qemu_log("qemu: fatal: ");
914 qemu_log_vprintf(fmt, ap2);
915 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200916 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000917 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000918 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000919 }
pbrook493ae1f2007-11-23 16:53:59 +0000920 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000921 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300922 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200923#if defined(CONFIG_USER_ONLY)
924 {
925 struct sigaction act;
926 sigfillset(&act.sa_mask);
927 act.sa_handler = SIG_DFL;
928 sigaction(SIGABRT, &act, NULL);
929 }
930#endif
bellard75012672003-06-21 13:11:07 +0000931 abort();
932}
933
bellard01243112004-01-04 15:48:17 +0000934#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400935/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200936static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
937{
938 RAMBlock *block;
939
Paolo Bonzini43771532013-09-09 17:58:40 +0200940 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200941 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200942 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200943 }
Mike Day0dc3f442013-09-05 14:41:35 -0400944 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200945 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200946 goto found;
947 }
948 }
949
950 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
951 abort();
952
953found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200954 /* It is safe to write mru_block outside the iothread lock. This
955 * is what happens:
956 *
957 * mru_block = xxx
958 * rcu_read_unlock()
959 * xxx removed from list
960 * rcu_read_lock()
961 * read mru_block
962 * mru_block = NULL;
963 * call_rcu(reclaim_ramblock, xxx);
964 * rcu_read_unlock()
965 *
966 * atomic_rcu_set is not needed here. The block was already published
967 * when it was placed into the list. Here we're just making an extra
968 * copy of the pointer.
969 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200970 ram_list.mru_block = block;
971 return block;
972}
973
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200974static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000975{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700976 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200977 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200978 RAMBlock *block;
979 ram_addr_t end;
980
981 end = TARGET_PAGE_ALIGN(start + length);
982 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000983
Mike Day0dc3f442013-09-05 14:41:35 -0400984 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200985 block = qemu_get_ram_block(start);
986 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200987 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700988 CPU_FOREACH(cpu) {
989 tlb_reset_dirty(cpu, start1, length);
990 }
Mike Day0dc3f442013-09-05 14:41:35 -0400991 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200992}
993
994/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000995bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
996 ram_addr_t length,
997 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200998{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000999 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001000 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001001 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001002
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001003 if (length == 0) {
1004 return false;
1005 }
1006
1007 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1008 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001009
1010 rcu_read_lock();
1011
1012 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1013
1014 while (page < end) {
1015 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1017 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1018
1019 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1020 offset, num);
1021 page += num;
1022 }
1023
1024 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001025
1026 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001027 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001028 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001029
1030 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001031}
1032
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001033/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001034hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001035 MemoryRegionSection *section,
1036 target_ulong vaddr,
1037 hwaddr paddr, hwaddr xlat,
1038 int prot,
1039 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001040{
Avi Kivitya8170e52012-10-23 12:30:10 +02001041 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001042 CPUWatchpoint *wp;
1043
Blue Swirlcc5bea62012-04-14 14:56:48 +00001044 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001045 /* Normal RAM. */
1046 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001047 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001048 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001049 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001050 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001051 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001052 }
1053 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001054 AddressSpaceDispatch *d;
1055
1056 d = atomic_rcu_read(&section->address_space->dispatch);
1057 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001058 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001059 }
1060
1061 /* Make accesses to pages with watchpoints go via the
1062 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001063 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001064 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001065 /* Avoid trapping reads of pages with a write breakpoint. */
1066 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001067 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001068 *address |= TLB_MMIO;
1069 break;
1070 }
1071 }
1072 }
1073
1074 return iotlb;
1075}
bellard9fa3e852004-01-04 18:06:42 +00001076#endif /* defined(CONFIG_USER_ONLY) */
1077
pbrooke2eef172008-06-08 01:09:01 +00001078#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001079
Anthony Liguoric227f092009-10-01 16:12:16 -05001080static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001081 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001082static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001083
Igor Mammedova2b257d2014-10-31 16:38:37 +00001084static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1085 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001086
1087/*
1088 * Set a custom physical guest memory alloator.
1089 * Accelerators with unusual needs may need this. Hopefully, we can
1090 * get rid of it eventually.
1091 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001092void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001093{
1094 phys_mem_alloc = alloc;
1095}
1096
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001097static uint16_t phys_section_add(PhysPageMap *map,
1098 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001099{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001100 /* The physical section number is ORed with a page-aligned
1101 * pointer to produce the iotlb entries. Thus it should
1102 * never overflow into the page-aligned value.
1103 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001105
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 if (map->sections_nb == map->sections_nb_alloc) {
1107 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1108 map->sections = g_renew(MemoryRegionSection, map->sections,
1109 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001110 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001111 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001112 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001113 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001114}
1115
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001116static void phys_section_destroy(MemoryRegion *mr)
1117{
Don Slutz55b4e802015-11-30 17:11:04 -05001118 bool have_sub_page = mr->subpage;
1119
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001120 memory_region_unref(mr);
1121
Don Slutz55b4e802015-11-30 17:11:04 -05001122 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001123 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001124 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001125 g_free(subpage);
1126 }
1127}
1128
Paolo Bonzini60926662013-05-29 12:30:26 +02001129static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001130{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001131 while (map->sections_nb > 0) {
1132 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001133 phys_section_destroy(section->mr);
1134 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001135 g_free(map->sections);
1136 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001137}
1138
Avi Kivityac1970f2012-10-03 16:22:53 +02001139static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001140{
1141 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001142 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001143 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001144 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001145 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 MemoryRegionSection subsection = {
1147 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001150 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151
Avi Kivityf3705d52012-03-08 16:16:34 +02001152 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001153
Avi Kivityf3705d52012-03-08 16:16:34 +02001154 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001155 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001156 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001158 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001159 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001160 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001161 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162 }
1163 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001164 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001165 subpage_register(subpage, start, end,
1166 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001167}
1168
1169
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001170static void register_multipage(AddressSpaceDispatch *d,
1171 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001172{
Avi Kivitya8170e52012-10-23 12:30:10 +02001173 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001174 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001175 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1176 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001177
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001178 assert(num_pages);
1179 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001180}
1181
Avi Kivityac1970f2012-10-03 16:22:53 +02001182static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001183{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001184 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001185 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001186 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001187 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001188
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001189 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1190 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1191 - now.offset_within_address_space;
1192
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001194 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001195 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001196 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 while (int128_ne(remain.size, now.size)) {
1199 remain.size = int128_sub(remain.size, now.size);
1200 remain.offset_within_address_space += int128_get64(now.size);
1201 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001202 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001203 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001204 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001205 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001206 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001207 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001208 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001209 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001210 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001211 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001212 }
1213}
1214
Sheng Yang62a27442010-01-26 19:21:16 +08001215void qemu_flush_coalesced_mmio_buffer(void)
1216{
1217 if (kvm_enabled())
1218 kvm_flush_coalesced_mmio_buffer();
1219}
1220
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001221void qemu_mutex_lock_ramlist(void)
1222{
1223 qemu_mutex_lock(&ram_list.mutex);
1224}
1225
1226void qemu_mutex_unlock_ramlist(void)
1227{
1228 qemu_mutex_unlock(&ram_list.mutex);
1229}
1230
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001231#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001232static void *file_ram_alloc(RAMBlock *block,
1233 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001234 const char *path,
1235 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001237 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001238 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001239 char *sanitized_name;
1240 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001241 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001242 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001243 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001244
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001245 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1246 error_setg(errp,
1247 "host lacks kvm mmu notifiers, -mem-path unsupported");
1248 return NULL;
1249 }
1250
1251 for (;;) {
1252 fd = open(path, O_RDWR);
1253 if (fd >= 0) {
1254 /* @path names an existing file, use it */
1255 break;
1256 }
1257 if (errno == ENOENT) {
1258 /* @path names a file that doesn't exist, create it */
1259 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1260 if (fd >= 0) {
1261 unlink_on_error = true;
1262 break;
1263 }
1264 } else if (errno == EISDIR) {
1265 /* @path names a directory, create a file there */
1266 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1267 sanitized_name = g_strdup(memory_region_name(block->mr));
1268 for (c = sanitized_name; *c != '\0'; c++) {
1269 if (*c == '/') {
1270 *c = '_';
1271 }
1272 }
1273
1274 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1275 sanitized_name);
1276 g_free(sanitized_name);
1277
1278 fd = mkstemp(filename);
1279 if (fd >= 0) {
1280 unlink(filename);
1281 g_free(filename);
1282 break;
1283 }
1284 g_free(filename);
1285 }
1286 if (errno != EEXIST && errno != EINTR) {
1287 error_setg_errno(errp, errno,
1288 "can't open backing store %s for guest RAM",
1289 path);
1290 goto error;
1291 }
1292 /*
1293 * Try again on EINTR and EEXIST. The latter happens when
1294 * something else creates the file between our two open().
1295 */
1296 }
1297
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001298 page_size = qemu_fd_getpagesize(fd);
1299 block->mr->align = page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001300
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001301 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001302 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001303 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001304 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001305 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001306 }
1307
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001308 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001309
1310 /*
1311 * ftruncate is not supported by hugetlbfs in older
1312 * hosts, so don't bother bailing out on errors.
1313 * If anything goes wrong with it under other filesystems,
1314 * mmap will fail.
1315 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001316 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001317 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001318 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001319
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001320 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001321 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001322 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001323 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001324 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001325 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001326
1327 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001328 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001329 }
1330
Alex Williamson04b16652010-07-02 11:13:17 -06001331 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001332 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001333
1334error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001335 if (unlink_on_error) {
1336 unlink(path);
1337 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001338 if (fd != -1) {
1339 close(fd);
1340 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001341 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001342}
1343#endif
1344
Mike Day0dc3f442013-09-05 14:41:35 -04001345/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001346static ram_addr_t find_ram_offset(ram_addr_t size)
1347{
Alex Williamson04b16652010-07-02 11:13:17 -06001348 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001349 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001350
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001351 assert(size != 0); /* it would hand out same offset multiple times */
1352
Mike Day0dc3f442013-09-05 14:41:35 -04001353 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001354 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001355 }
Alex Williamson04b16652010-07-02 11:13:17 -06001356
Mike Day0dc3f442013-09-05 14:41:35 -04001357 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001358 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001359
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001360 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001361
Mike Day0dc3f442013-09-05 14:41:35 -04001362 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001363 if (next_block->offset >= end) {
1364 next = MIN(next, next_block->offset);
1365 }
1366 }
1367 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001368 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001369 mingap = next - end;
1370 }
1371 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001372
1373 if (offset == RAM_ADDR_MAX) {
1374 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1375 (uint64_t)size);
1376 abort();
1377 }
1378
Alex Williamson04b16652010-07-02 11:13:17 -06001379 return offset;
1380}
1381
Juan Quintela652d7ec2012-07-20 10:37:54 +02001382ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001383{
Alex Williamsond17b5282010-06-25 11:08:38 -06001384 RAMBlock *block;
1385 ram_addr_t last = 0;
1386
Mike Day0dc3f442013-09-05 14:41:35 -04001387 rcu_read_lock();
1388 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001389 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001390 }
Mike Day0dc3f442013-09-05 14:41:35 -04001391 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001392 return last;
1393}
1394
Jason Baronddb97f12012-08-02 15:44:16 -04001395static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1396{
1397 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001398
1399 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001400 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001401 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1402 if (ret) {
1403 perror("qemu_madvise");
1404 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1405 "but dump_guest_core=off specified\n");
1406 }
1407 }
1408}
1409
Mike Day0dc3f442013-09-05 14:41:35 -04001410/* Called within an RCU critical section, or while the ramlist lock
1411 * is held.
1412 */
Hu Tao20cfe882014-04-02 15:13:26 +08001413static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001414{
Hu Tao20cfe882014-04-02 15:13:26 +08001415 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001416
Mike Day0dc3f442013-09-05 14:41:35 -04001417 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001418 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001419 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001420 }
1421 }
Hu Tao20cfe882014-04-02 15:13:26 +08001422
1423 return NULL;
1424}
1425
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001426const char *qemu_ram_get_idstr(RAMBlock *rb)
1427{
1428 return rb->idstr;
1429}
1430
Mike Dayae3a7042013-09-05 14:41:35 -04001431/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001432void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1433{
Mike Dayae3a7042013-09-05 14:41:35 -04001434 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001435
Mike Day0dc3f442013-09-05 14:41:35 -04001436 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001437 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001438 assert(new_block);
1439 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001440
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001441 if (dev) {
1442 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001443 if (id) {
1444 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001445 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001446 }
1447 }
1448 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1449
Mike Day0dc3f442013-09-05 14:41:35 -04001450 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001451 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001452 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1453 new_block->idstr);
1454 abort();
1455 }
1456 }
Mike Day0dc3f442013-09-05 14:41:35 -04001457 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001458}
1459
Mike Dayae3a7042013-09-05 14:41:35 -04001460/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001461void qemu_ram_unset_idstr(ram_addr_t addr)
1462{
Mike Dayae3a7042013-09-05 14:41:35 -04001463 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001464
Mike Dayae3a7042013-09-05 14:41:35 -04001465 /* FIXME: arch_init.c assumes that this is not called throughout
1466 * migration. Ignore the problem since hot-unplug during migration
1467 * does not work anyway.
1468 */
1469
Mike Day0dc3f442013-09-05 14:41:35 -04001470 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001471 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001472 if (block) {
1473 memset(block->idstr, 0, sizeof(block->idstr));
1474 }
Mike Day0dc3f442013-09-05 14:41:35 -04001475 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001476}
1477
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001478static int memory_try_enable_merging(void *addr, size_t len)
1479{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001480 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001481 /* disabled by the user */
1482 return 0;
1483 }
1484
1485 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1486}
1487
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001488/* Only legal before guest might have detected the memory size: e.g. on
1489 * incoming migration, or right after reset.
1490 *
1491 * As memory core doesn't know how is memory accessed, it is up to
1492 * resize callback to update device state and/or add assertions to detect
1493 * misuse, if necessary.
1494 */
1495int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1496{
1497 RAMBlock *block = find_ram_block(base);
1498
1499 assert(block);
1500
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001501 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001502
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001503 if (block->used_length == newsize) {
1504 return 0;
1505 }
1506
1507 if (!(block->flags & RAM_RESIZEABLE)) {
1508 error_setg_errno(errp, EINVAL,
1509 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1510 " in != 0x" RAM_ADDR_FMT, block->idstr,
1511 newsize, block->used_length);
1512 return -EINVAL;
1513 }
1514
1515 if (block->max_length < newsize) {
1516 error_setg_errno(errp, EINVAL,
1517 "Length too large: %s: 0x" RAM_ADDR_FMT
1518 " > 0x" RAM_ADDR_FMT, block->idstr,
1519 newsize, block->max_length);
1520 return -EINVAL;
1521 }
1522
1523 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1524 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001525 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1526 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001527 memory_region_set_size(block->mr, newsize);
1528 if (block->resized) {
1529 block->resized(block->idstr, newsize, block->host);
1530 }
1531 return 0;
1532}
1533
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001534/* Called with ram_list.mutex held */
1535static void dirty_memory_extend(ram_addr_t old_ram_size,
1536 ram_addr_t new_ram_size)
1537{
1538 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1539 DIRTY_MEMORY_BLOCK_SIZE);
1540 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1541 DIRTY_MEMORY_BLOCK_SIZE);
1542 int i;
1543
1544 /* Only need to extend if block count increased */
1545 if (new_num_blocks <= old_num_blocks) {
1546 return;
1547 }
1548
1549 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1550 DirtyMemoryBlocks *old_blocks;
1551 DirtyMemoryBlocks *new_blocks;
1552 int j;
1553
1554 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1555 new_blocks = g_malloc(sizeof(*new_blocks) +
1556 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1557
1558 if (old_num_blocks) {
1559 memcpy(new_blocks->blocks, old_blocks->blocks,
1560 old_num_blocks * sizeof(old_blocks->blocks[0]));
1561 }
1562
1563 for (j = old_num_blocks; j < new_num_blocks; j++) {
1564 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1565 }
1566
1567 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1568
1569 if (old_blocks) {
1570 g_free_rcu(old_blocks, rcu);
1571 }
1572 }
1573}
1574
Fam Zheng528f46a2016-03-01 14:18:18 +08001575static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001576{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001578 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001579 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001580 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001581
1582 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001583
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001584 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001585 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001586
1587 if (!new_block->host) {
1588 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001589 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001590 new_block->mr, &err);
1591 if (err) {
1592 error_propagate(errp, err);
1593 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001594 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001595 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001596 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001597 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001598 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001599 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001600 error_setg_errno(errp, errno,
1601 "cannot set up guest memory '%s'",
1602 memory_region_name(new_block->mr));
1603 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001604 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001605 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001606 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001607 }
1608 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001609
Li Zhijiandd631692015-07-02 20:18:06 +08001610 new_ram_size = MAX(old_ram_size,
1611 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1612 if (new_ram_size > old_ram_size) {
1613 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001614 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001615 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001616 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1617 * QLIST (which has an RCU-friendly variant) does not have insertion at
1618 * tail, so save the last element in last_block.
1619 */
Mike Day0dc3f442013-09-05 14:41:35 -04001620 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001621 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001622 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001623 break;
1624 }
1625 }
1626 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001627 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001628 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001629 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001630 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001631 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001632 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001633 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001634
Mike Day0dc3f442013-09-05 14:41:35 -04001635 /* Write list before version */
1636 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001637 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001638 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001639
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001640 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001641 new_block->used_length,
1642 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001643
Paolo Bonzinia904c912015-01-21 16:18:35 +01001644 if (new_block->host) {
1645 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1646 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1647 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1648 if (kvm_enabled()) {
1649 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1650 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001651 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001652}
1653
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001654#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001655RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1656 bool share, const char *mem_path,
1657 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001658{
1659 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001660 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001661
1662 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001663 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001664 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001665 }
1666
1667 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1668 /*
1669 * file_ram_alloc() needs to allocate just like
1670 * phys_mem_alloc, but we haven't bothered to provide
1671 * a hook there.
1672 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001673 error_setg(errp,
1674 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001675 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001676 }
1677
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001678 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001679 new_block = g_malloc0(sizeof(*new_block));
1680 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001681 new_block->used_length = size;
1682 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001683 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001684 new_block->host = file_ram_alloc(new_block, size,
1685 mem_path, errp);
1686 if (!new_block->host) {
1687 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001688 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001689 }
1690
Fam Zheng528f46a2016-03-01 14:18:18 +08001691 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001692 if (local_err) {
1693 g_free(new_block);
1694 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001695 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001696 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001697 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001698}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001699#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001700
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001701static
Fam Zheng528f46a2016-03-01 14:18:18 +08001702RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1703 void (*resized)(const char*,
1704 uint64_t length,
1705 void *host),
1706 void *host, bool resizeable,
1707 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001708{
1709 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001710 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001711
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001712 size = HOST_PAGE_ALIGN(size);
1713 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001714 new_block = g_malloc0(sizeof(*new_block));
1715 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001716 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001717 new_block->used_length = size;
1718 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001719 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001720 new_block->fd = -1;
1721 new_block->host = host;
1722 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001723 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001724 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001725 if (resizeable) {
1726 new_block->flags |= RAM_RESIZEABLE;
1727 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001728 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001729 if (local_err) {
1730 g_free(new_block);
1731 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001732 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001733 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001734 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001735}
1736
Fam Zheng528f46a2016-03-01 14:18:18 +08001737RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001738 MemoryRegion *mr, Error **errp)
1739{
1740 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1741}
1742
Fam Zheng528f46a2016-03-01 14:18:18 +08001743RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001744{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001745 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1746}
1747
Fam Zheng528f46a2016-03-01 14:18:18 +08001748RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001749 void (*resized)(const char*,
1750 uint64_t length,
1751 void *host),
1752 MemoryRegion *mr, Error **errp)
1753{
1754 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001755}
bellarde9a1ab12007-02-08 23:08:38 +00001756
Paolo Bonzini43771532013-09-09 17:58:40 +02001757static void reclaim_ramblock(RAMBlock *block)
1758{
1759 if (block->flags & RAM_PREALLOC) {
1760 ;
1761 } else if (xen_enabled()) {
1762 xen_invalidate_map_cache_entry(block->host);
1763#ifndef _WIN32
1764 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001765 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001766 close(block->fd);
1767#endif
1768 } else {
1769 qemu_anon_ram_free(block->host, block->max_length);
1770 }
1771 g_free(block);
1772}
1773
Fam Zhengf1060c52016-03-01 14:18:22 +08001774void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001775{
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001776 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001777 QLIST_REMOVE_RCU(block, next);
1778 ram_list.mru_block = NULL;
1779 /* Write list before version */
1780 smp_wmb();
1781 ram_list.version++;
1782 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001783 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001784}
1785
Huang Yingcd19cfa2011-03-02 08:56:19 +01001786#ifndef _WIN32
1787void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1788{
1789 RAMBlock *block;
1790 ram_addr_t offset;
1791 int flags;
1792 void *area, *vaddr;
1793
Mike Day0dc3f442013-09-05 14:41:35 -04001794 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001795 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001796 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001797 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001798 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001799 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001800 } else if (xen_enabled()) {
1801 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001802 } else {
1803 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001804 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001805 flags |= (block->flags & RAM_SHARED ?
1806 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001807 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1808 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001809 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001810 /*
1811 * Remap needs to match alloc. Accelerators that
1812 * set phys_mem_alloc never remap. If they did,
1813 * we'd need a remap hook here.
1814 */
1815 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1816
Huang Yingcd19cfa2011-03-02 08:56:19 +01001817 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1818 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1819 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001820 }
1821 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001822 fprintf(stderr, "Could not remap addr: "
1823 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001824 length, addr);
1825 exit(1);
1826 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001827 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001828 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001829 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001830 }
1831 }
1832}
1833#endif /* !_WIN32 */
1834
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001835int qemu_get_ram_fd(ram_addr_t addr)
1836{
Mike Dayae3a7042013-09-05 14:41:35 -04001837 RAMBlock *block;
1838 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001839
Mike Day0dc3f442013-09-05 14:41:35 -04001840 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001841 block = qemu_get_ram_block(addr);
1842 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001843 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001844 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001845}
1846
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001847void qemu_set_ram_fd(ram_addr_t addr, int fd)
1848{
1849 RAMBlock *block;
1850
1851 rcu_read_lock();
1852 block = qemu_get_ram_block(addr);
1853 block->fd = fd;
1854 rcu_read_unlock();
1855}
1856
Damjan Marion3fd74b82014-06-26 23:01:32 +02001857void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1858{
Mike Dayae3a7042013-09-05 14:41:35 -04001859 RAMBlock *block;
1860 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001861
Mike Day0dc3f442013-09-05 14:41:35 -04001862 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001863 block = qemu_get_ram_block(addr);
1864 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001865 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001866 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001867}
1868
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001869/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001870 * This should not be used for general purpose DMA. Use address_space_map
1871 * or address_space_rw instead. For local memory (e.g. video ram) that the
1872 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001873 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001874 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001875 */
Gonglei3655cb92016-02-20 10:35:20 +08001876void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001877{
Gonglei3655cb92016-02-20 10:35:20 +08001878 RAMBlock *block = ram_block;
1879
1880 if (block == NULL) {
1881 block = qemu_get_ram_block(addr);
1882 }
Mike Dayae3a7042013-09-05 14:41:35 -04001883
1884 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001885 /* We need to check if the requested address is in the RAM
1886 * because we don't want to map the entire memory in QEMU.
1887 * In that case just map until the end of the page.
1888 */
1889 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001890 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001891 }
Mike Dayae3a7042013-09-05 14:41:35 -04001892
1893 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001894 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001895 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001896}
1897
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001898/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001899 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001900 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001901 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001902 */
Gonglei3655cb92016-02-20 10:35:20 +08001903static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1904 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001905{
Gonglei3655cb92016-02-20 10:35:20 +08001906 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001907 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001908 if (*size == 0) {
1909 return NULL;
1910 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001911
Gonglei3655cb92016-02-20 10:35:20 +08001912 if (block == NULL) {
1913 block = qemu_get_ram_block(addr);
1914 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001915 offset_inside_block = addr - block->offset;
1916 *size = MIN(*size, block->max_length - offset_inside_block);
1917
1918 if (xen_enabled() && block->host == NULL) {
1919 /* We need to check if the requested address is in the RAM
1920 * because we don't want to map the entire memory in QEMU.
1921 * In that case just map the requested area.
1922 */
1923 if (block->offset == 0) {
1924 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001925 }
1926
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001927 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001928 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001929
1930 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001931}
1932
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001933/*
1934 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1935 * in that RAMBlock.
1936 *
1937 * ptr: Host pointer to look up
1938 * round_offset: If true round the result offset down to a page boundary
1939 * *ram_addr: set to result ram_addr
1940 * *offset: set to result offset within the RAMBlock
1941 *
1942 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001943 *
1944 * By the time this function returns, the returned pointer is not protected
1945 * by RCU anymore. If the caller is not within an RCU critical section and
1946 * does not hold the iothread lock, it must have other means of protecting the
1947 * pointer, such as a reference to the region that includes the incoming
1948 * ram_addr_t.
1949 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001950RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1951 ram_addr_t *ram_addr,
1952 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001953{
pbrook94a6b542009-04-11 17:15:54 +00001954 RAMBlock *block;
1955 uint8_t *host = ptr;
1956
Jan Kiszka868bb332011-06-21 22:59:09 +02001957 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001958 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001959 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001960 block = qemu_get_ram_block(*ram_addr);
1961 if (block) {
1962 *offset = (host - block->host);
1963 }
Mike Day0dc3f442013-09-05 14:41:35 -04001964 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001965 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001966 }
1967
Mike Day0dc3f442013-09-05 14:41:35 -04001968 rcu_read_lock();
1969 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001970 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001971 goto found;
1972 }
1973
Mike Day0dc3f442013-09-05 14:41:35 -04001974 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001975 /* This case append when the block is not mapped. */
1976 if (block->host == NULL) {
1977 continue;
1978 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001979 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001980 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001981 }
pbrook94a6b542009-04-11 17:15:54 +00001982 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001983
Mike Day0dc3f442013-09-05 14:41:35 -04001984 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001985 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001986
1987found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001988 *offset = (host - block->host);
1989 if (round_offset) {
1990 *offset &= TARGET_PAGE_MASK;
1991 }
1992 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001993 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001994 return block;
1995}
1996
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001997/*
1998 * Finds the named RAMBlock
1999 *
2000 * name: The name of RAMBlock to find
2001 *
2002 * Returns: RAMBlock (or NULL if not found)
2003 */
2004RAMBlock *qemu_ram_block_by_name(const char *name)
2005{
2006 RAMBlock *block;
2007
2008 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2009 if (!strcmp(name, block->idstr)) {
2010 return block;
2011 }
2012 }
2013
2014 return NULL;
2015}
2016
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002017/* Some of the softmmu routines need to translate from a host pointer
2018 (typically a TLB entry) back to a ram offset. */
2019MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2020{
2021 RAMBlock *block;
2022 ram_addr_t offset; /* Not used */
2023
2024 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2025
2026 if (!block) {
2027 return NULL;
2028 }
2029
2030 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002031}
Alex Williamsonf471a172010-06-11 11:11:42 -06002032
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002033/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002034static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002035 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002036{
Juan Quintela52159192013-10-08 12:44:04 +02002037 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002038 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002039 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002040 switch (size) {
2041 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002042 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002043 break;
2044 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002045 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002046 break;
2047 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002048 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002049 break;
2050 default:
2051 abort();
2052 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002053 /* Set both VGA and migration bits for simplicity and to remove
2054 * the notdirty callback faster.
2055 */
2056 cpu_physical_memory_set_dirty_range(ram_addr, size,
2057 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002058 /* we remove the notdirty callback only if the code has been
2059 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002060 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002061 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002062 }
bellard1ccde1c2004-02-06 19:46:14 +00002063}
2064
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002065static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2066 unsigned size, bool is_write)
2067{
2068 return is_write;
2069}
2070
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002071static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002072 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002073 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002074 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002075};
2076
pbrook0f459d12008-06-09 00:20:13 +00002077/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002078static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002079{
Andreas Färber93afead2013-08-26 03:41:01 +02002080 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002081 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002082 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002083 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002084 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002085 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002086 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002087
Andreas Färberff4700b2013-08-26 18:23:18 +02002088 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002089 /* We re-entered the check after replacing the TB. Now raise
2090 * the debug interrupt so that is will trigger after the
2091 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002092 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002093 return;
2094 }
Andreas Färber93afead2013-08-26 03:41:01 +02002095 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002096 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002097 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2098 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002099 if (flags == BP_MEM_READ) {
2100 wp->flags |= BP_WATCHPOINT_HIT_READ;
2101 } else {
2102 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2103 }
2104 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002105 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002106 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002107 if (wp->flags & BP_CPU &&
2108 !cc->debug_check_watchpoint(cpu, wp)) {
2109 wp->flags &= ~BP_WATCHPOINT_HIT;
2110 continue;
2111 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002112 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002113 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002114 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002115 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002116 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002117 } else {
2118 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002119 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002120 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002121 }
aliguori06d55cc2008-11-18 20:24:06 +00002122 }
aliguori6e140f22008-11-18 20:37:55 +00002123 } else {
2124 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002125 }
2126 }
2127}
2128
pbrook6658ffb2007-03-16 23:58:11 +00002129/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2130 so these check for a hit then pass through to the normal out-of-line
2131 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002132static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2133 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002134{
Peter Maydell66b9b432015-04-26 16:49:24 +01002135 MemTxResult res;
2136 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002137 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2138 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002139
Peter Maydell66b9b432015-04-26 16:49:24 +01002140 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002141 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002142 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002143 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002144 break;
2145 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002146 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002147 break;
2148 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002149 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002150 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002151 default: abort();
2152 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002153 *pdata = data;
2154 return res;
2155}
2156
2157static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2158 uint64_t val, unsigned size,
2159 MemTxAttrs attrs)
2160{
2161 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002162 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2163 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002164
2165 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2166 switch (size) {
2167 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002168 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002169 break;
2170 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002171 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002172 break;
2173 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002174 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002175 break;
2176 default: abort();
2177 }
2178 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002179}
2180
Avi Kivity1ec9b902012-01-02 12:47:48 +02002181static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002182 .read_with_attrs = watch_mem_read,
2183 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002184 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002185};
pbrook6658ffb2007-03-16 23:58:11 +00002186
Peter Maydellf25a49e2015-04-26 16:49:24 +01002187static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2188 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002189{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002190 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002191 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002192 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002193
blueswir1db7b5422007-05-26 17:36:03 +00002194#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002195 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002196 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002197#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002198 res = address_space_read(subpage->as, addr + subpage->base,
2199 attrs, buf, len);
2200 if (res) {
2201 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002202 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002203 switch (len) {
2204 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002205 *data = ldub_p(buf);
2206 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002207 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002208 *data = lduw_p(buf);
2209 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002210 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002211 *data = ldl_p(buf);
2212 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002213 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002214 *data = ldq_p(buf);
2215 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002216 default:
2217 abort();
2218 }
blueswir1db7b5422007-05-26 17:36:03 +00002219}
2220
Peter Maydellf25a49e2015-04-26 16:49:24 +01002221static MemTxResult subpage_write(void *opaque, hwaddr addr,
2222 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002223{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002224 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002225 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002226
blueswir1db7b5422007-05-26 17:36:03 +00002227#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002228 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002229 " value %"PRIx64"\n",
2230 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002231#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002232 switch (len) {
2233 case 1:
2234 stb_p(buf, value);
2235 break;
2236 case 2:
2237 stw_p(buf, value);
2238 break;
2239 case 4:
2240 stl_p(buf, value);
2241 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002242 case 8:
2243 stq_p(buf, value);
2244 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002245 default:
2246 abort();
2247 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002248 return address_space_write(subpage->as, addr + subpage->base,
2249 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002250}
2251
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002252static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002253 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002254{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002255 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002256#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002257 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002258 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002259#endif
2260
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002261 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002262 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002263}
2264
Avi Kivity70c68e42012-01-02 12:32:48 +02002265static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002266 .read_with_attrs = subpage_read,
2267 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002268 .impl.min_access_size = 1,
2269 .impl.max_access_size = 8,
2270 .valid.min_access_size = 1,
2271 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002272 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002273 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002274};
2275
Anthony Liguoric227f092009-10-01 16:12:16 -05002276static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002277 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002278{
2279 int idx, eidx;
2280
2281 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2282 return -1;
2283 idx = SUBPAGE_IDX(start);
2284 eidx = SUBPAGE_IDX(end);
2285#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002286 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2287 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002288#endif
blueswir1db7b5422007-05-26 17:36:03 +00002289 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002290 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002291 }
2292
2293 return 0;
2294}
2295
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002296static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002297{
Anthony Liguoric227f092009-10-01 16:12:16 -05002298 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002299
Anthony Liguori7267c092011-08-20 22:09:37 -05002300 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002301
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002302 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002303 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002304 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002305 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002306 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002307#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002308 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2309 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002310#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002311 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002312
2313 return mmio;
2314}
2315
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002316static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2317 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002318{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002319 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002320 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002321 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002322 .mr = mr,
2323 .offset_within_address_space = 0,
2324 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002325 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002326 };
2327
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002328 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002329}
2330
Peter Maydella54c87b2016-01-21 14:15:05 +00002331MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002332{
Peter Maydella54c87b2016-01-21 14:15:05 +00002333 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2334 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002335 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002336 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002337
2338 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002339}
2340
Avi Kivitye9179ce2009-06-14 11:38:52 +03002341static void io_mem_init(void)
2342{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002343 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002344 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002345 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002346 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002347 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002348 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002349 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002350}
2351
Avi Kivityac1970f2012-10-03 16:22:53 +02002352static void mem_begin(MemoryListener *listener)
2353{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002354 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002355 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2356 uint16_t n;
2357
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002358 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002359 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002360 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002361 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002362 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002363 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002364 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002365 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002366
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002367 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002368 d->as = as;
2369 as->next_dispatch = d;
2370}
2371
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002372static void address_space_dispatch_free(AddressSpaceDispatch *d)
2373{
2374 phys_sections_free(&d->map);
2375 g_free(d);
2376}
2377
Paolo Bonzini00752702013-05-29 12:13:54 +02002378static void mem_commit(MemoryListener *listener)
2379{
2380 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002381 AddressSpaceDispatch *cur = as->dispatch;
2382 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002383
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002384 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002385
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002386 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002387 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002388 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002389 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002390}
2391
Avi Kivity1d711482012-10-02 18:54:45 +02002392static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002393{
Peter Maydell32857f42015-10-01 15:29:50 +01002394 CPUAddressSpace *cpuas;
2395 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002396
2397 /* since each CPU stores ram addresses in its TLB cache, we must
2398 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002399 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2400 cpu_reloading_memory_map();
2401 /* The CPU and TLB are protected by the iothread lock.
2402 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2403 * may have split the RCU critical section.
2404 */
2405 d = atomic_rcu_read(&cpuas->as->dispatch);
2406 cpuas->memory_dispatch = d;
2407 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002408}
2409
Avi Kivityac1970f2012-10-03 16:22:53 +02002410void address_space_init_dispatch(AddressSpace *as)
2411{
Paolo Bonzini00752702013-05-29 12:13:54 +02002412 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002413 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002414 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002415 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002416 .region_add = mem_add,
2417 .region_nop = mem_add,
2418 .priority = 0,
2419 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002420 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002421}
2422
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002423void address_space_unregister(AddressSpace *as)
2424{
2425 memory_listener_unregister(&as->dispatch_listener);
2426}
2427
Avi Kivity83f3c252012-10-07 12:59:55 +02002428void address_space_destroy_dispatch(AddressSpace *as)
2429{
2430 AddressSpaceDispatch *d = as->dispatch;
2431
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002432 atomic_rcu_set(&as->dispatch, NULL);
2433 if (d) {
2434 call_rcu(d, address_space_dispatch_free, rcu);
2435 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002436}
2437
Avi Kivity62152b82011-07-26 14:26:14 +03002438static void memory_map_init(void)
2439{
Anthony Liguori7267c092011-08-20 22:09:37 -05002440 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002441
Paolo Bonzini57271d62013-11-07 17:14:37 +01002442 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002443 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002444
Anthony Liguori7267c092011-08-20 22:09:37 -05002445 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002446 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2447 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002448 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002449}
2450
2451MemoryRegion *get_system_memory(void)
2452{
2453 return system_memory;
2454}
2455
Avi Kivity309cb472011-08-08 16:09:03 +03002456MemoryRegion *get_system_io(void)
2457{
2458 return system_io;
2459}
2460
pbrooke2eef172008-06-08 01:09:01 +00002461#endif /* !defined(CONFIG_USER_ONLY) */
2462
bellard13eb76e2004-01-24 15:23:36 +00002463/* physical memory access (slow version, mainly for debug) */
2464#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002465int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002466 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002467{
2468 int l, flags;
2469 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002470 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002471
2472 while (len > 0) {
2473 page = addr & TARGET_PAGE_MASK;
2474 l = (page + TARGET_PAGE_SIZE) - addr;
2475 if (l > len)
2476 l = len;
2477 flags = page_get_flags(page);
2478 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002479 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002480 if (is_write) {
2481 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002482 return -1;
bellard579a97f2007-11-11 14:26:47 +00002483 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002484 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002485 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002486 memcpy(p, buf, l);
2487 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002488 } else {
2489 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002490 return -1;
bellard579a97f2007-11-11 14:26:47 +00002491 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002492 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002493 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002494 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002495 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002496 }
2497 len -= l;
2498 buf += l;
2499 addr += l;
2500 }
Paul Brooka68fe892010-03-01 00:08:59 +00002501 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002502}
bellard8df1cd02005-01-28 22:37:22 +00002503
bellard13eb76e2004-01-24 15:23:36 +00002504#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002505
Paolo Bonzini845b6212015-03-23 11:45:53 +01002506static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002507 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002508{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002509 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2510 /* No early return if dirty_log_mask is or becomes 0, because
2511 * cpu_physical_memory_set_dirty_range will still call
2512 * xen_modified_memory.
2513 */
2514 if (dirty_log_mask) {
2515 dirty_log_mask =
2516 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002517 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002518 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2519 tb_invalidate_phys_range(addr, addr + length);
2520 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2521 }
2522 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002523}
2524
Richard Henderson23326162013-07-08 14:55:59 -07002525static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002526{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002527 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002528
2529 /* Regions are assumed to support 1-4 byte accesses unless
2530 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002531 if (access_size_max == 0) {
2532 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002533 }
Richard Henderson23326162013-07-08 14:55:59 -07002534
2535 /* Bound the maximum access by the alignment of the address. */
2536 if (!mr->ops->impl.unaligned) {
2537 unsigned align_size_max = addr & -addr;
2538 if (align_size_max != 0 && align_size_max < access_size_max) {
2539 access_size_max = align_size_max;
2540 }
2541 }
2542
2543 /* Don't attempt accesses larger than the maximum. */
2544 if (l > access_size_max) {
2545 l = access_size_max;
2546 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002547 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002548
2549 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002550}
2551
Jan Kiszka4840f102015-06-18 18:47:22 +02002552static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002553{
Jan Kiszka4840f102015-06-18 18:47:22 +02002554 bool unlocked = !qemu_mutex_iothread_locked();
2555 bool release_lock = false;
2556
2557 if (unlocked && mr->global_locking) {
2558 qemu_mutex_lock_iothread();
2559 unlocked = false;
2560 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002561 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002562 if (mr->flush_coalesced_mmio) {
2563 if (unlocked) {
2564 qemu_mutex_lock_iothread();
2565 }
2566 qemu_flush_coalesced_mmio_buffer();
2567 if (unlocked) {
2568 qemu_mutex_unlock_iothread();
2569 }
2570 }
2571
2572 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002573}
2574
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002575/* Called within RCU critical section. */
2576static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2577 MemTxAttrs attrs,
2578 const uint8_t *buf,
2579 int len, hwaddr addr1,
2580 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002581{
bellard13eb76e2004-01-24 15:23:36 +00002582 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002583 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002584 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002585 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002586
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002587 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002588 if (!memory_access_is_direct(mr, true)) {
2589 release_lock |= prepare_mmio_access(mr);
2590 l = memory_access_size(mr, l, addr1);
2591 /* XXX: could force current_cpu to NULL to avoid
2592 potential bugs */
2593 switch (l) {
2594 case 8:
2595 /* 64 bit write access */
2596 val = ldq_p(buf);
2597 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2598 attrs);
2599 break;
2600 case 4:
2601 /* 32 bit write access */
2602 val = ldl_p(buf);
2603 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2604 attrs);
2605 break;
2606 case 2:
2607 /* 16 bit write access */
2608 val = lduw_p(buf);
2609 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2610 attrs);
2611 break;
2612 case 1:
2613 /* 8 bit write access */
2614 val = ldub_p(buf);
2615 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2616 attrs);
2617 break;
2618 default:
2619 abort();
bellard13eb76e2004-01-24 15:23:36 +00002620 }
2621 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002622 addr1 += memory_region_get_ram_addr(mr);
2623 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002624 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002625 memcpy(ptr, buf, l);
2626 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002627 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002628
2629 if (release_lock) {
2630 qemu_mutex_unlock_iothread();
2631 release_lock = false;
2632 }
2633
bellard13eb76e2004-01-24 15:23:36 +00002634 len -= l;
2635 buf += l;
2636 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002637
2638 if (!len) {
2639 break;
2640 }
2641
2642 l = len;
2643 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002644 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002645
Peter Maydell3b643492015-04-26 16:49:23 +01002646 return result;
bellard13eb76e2004-01-24 15:23:36 +00002647}
bellard8df1cd02005-01-28 22:37:22 +00002648
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002649MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2650 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002651{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002652 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002653 hwaddr addr1;
2654 MemoryRegion *mr;
2655 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002656
2657 if (len > 0) {
2658 rcu_read_lock();
2659 l = len;
2660 mr = address_space_translate(as, addr, &addr1, &l, true);
2661 result = address_space_write_continue(as, addr, attrs, buf, len,
2662 addr1, l, mr);
2663 rcu_read_unlock();
2664 }
2665
2666 return result;
2667}
2668
2669/* Called within RCU critical section. */
2670MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2671 MemTxAttrs attrs, uint8_t *buf,
2672 int len, hwaddr addr1, hwaddr l,
2673 MemoryRegion *mr)
2674{
2675 uint8_t *ptr;
2676 uint64_t val;
2677 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002678 bool release_lock = false;
2679
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002680 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002681 if (!memory_access_is_direct(mr, false)) {
2682 /* I/O case */
2683 release_lock |= prepare_mmio_access(mr);
2684 l = memory_access_size(mr, l, addr1);
2685 switch (l) {
2686 case 8:
2687 /* 64 bit read access */
2688 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2689 attrs);
2690 stq_p(buf, val);
2691 break;
2692 case 4:
2693 /* 32 bit read access */
2694 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2695 attrs);
2696 stl_p(buf, val);
2697 break;
2698 case 2:
2699 /* 16 bit read access */
2700 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2701 attrs);
2702 stw_p(buf, val);
2703 break;
2704 case 1:
2705 /* 8 bit read access */
2706 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2707 attrs);
2708 stb_p(buf, val);
2709 break;
2710 default:
2711 abort();
2712 }
2713 } else {
2714 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002715 ptr = qemu_get_ram_ptr(mr->ram_block,
2716 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002717 memcpy(buf, ptr, l);
2718 }
2719
2720 if (release_lock) {
2721 qemu_mutex_unlock_iothread();
2722 release_lock = false;
2723 }
2724
2725 len -= l;
2726 buf += l;
2727 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002728
2729 if (!len) {
2730 break;
2731 }
2732
2733 l = len;
2734 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002735 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002736
2737 return result;
2738}
2739
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002740MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2741 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002742{
2743 hwaddr l;
2744 hwaddr addr1;
2745 MemoryRegion *mr;
2746 MemTxResult result = MEMTX_OK;
2747
2748 if (len > 0) {
2749 rcu_read_lock();
2750 l = len;
2751 mr = address_space_translate(as, addr, &addr1, &l, false);
2752 result = address_space_read_continue(as, addr, attrs, buf, len,
2753 addr1, l, mr);
2754 rcu_read_unlock();
2755 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002756
2757 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002758}
2759
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002760MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2761 uint8_t *buf, int len, bool is_write)
2762{
2763 if (is_write) {
2764 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2765 } else {
2766 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2767 }
2768}
Avi Kivityac1970f2012-10-03 16:22:53 +02002769
Avi Kivitya8170e52012-10-23 12:30:10 +02002770void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002771 int len, int is_write)
2772{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002773 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2774 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002775}
2776
Alexander Graf582b55a2013-12-11 14:17:44 +01002777enum write_rom_type {
2778 WRITE_DATA,
2779 FLUSH_CACHE,
2780};
2781
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002782static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002783 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002784{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002785 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002786 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002787 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002788 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002789
Paolo Bonzini41063e12015-03-18 14:21:43 +01002790 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002791 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002792 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002793 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002794
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002795 if (!(memory_region_is_ram(mr) ||
2796 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002797 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002798 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002799 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002800 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002801 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002802 switch (type) {
2803 case WRITE_DATA:
2804 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002805 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002806 break;
2807 case FLUSH_CACHE:
2808 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2809 break;
2810 }
bellardd0ecd2a2006-04-23 17:14:48 +00002811 }
2812 len -= l;
2813 buf += l;
2814 addr += l;
2815 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002816 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002817}
2818
Alexander Graf582b55a2013-12-11 14:17:44 +01002819/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002820void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002821 const uint8_t *buf, int len)
2822{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002823 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002824}
2825
2826void cpu_flush_icache_range(hwaddr start, int len)
2827{
2828 /*
2829 * This function should do the same thing as an icache flush that was
2830 * triggered from within the guest. For TCG we are always cache coherent,
2831 * so there is no need to flush anything. For KVM / Xen we need to flush
2832 * the host's instruction cache at least.
2833 */
2834 if (tcg_enabled()) {
2835 return;
2836 }
2837
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002838 cpu_physical_memory_write_rom_internal(&address_space_memory,
2839 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002840}
2841
aliguori6d16c2f2009-01-22 16:59:11 +00002842typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002843 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002844 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002845 hwaddr addr;
2846 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002847 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002848} BounceBuffer;
2849
2850static BounceBuffer bounce;
2851
aliguoriba223c22009-01-22 16:59:16 +00002852typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002853 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002854 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002855} MapClient;
2856
Fam Zheng38e047b2015-03-16 17:03:35 +08002857QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002858static QLIST_HEAD(map_client_list, MapClient) map_client_list
2859 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002860
Fam Zhenge95205e2015-03-16 17:03:37 +08002861static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002862{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002863 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002864 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002865}
2866
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002867static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002868{
2869 MapClient *client;
2870
Blue Swirl72cf2d42009-09-12 07:36:22 +00002871 while (!QLIST_EMPTY(&map_client_list)) {
2872 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002873 qemu_bh_schedule(client->bh);
2874 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002875 }
2876}
2877
Fam Zhenge95205e2015-03-16 17:03:37 +08002878void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002879{
2880 MapClient *client = g_malloc(sizeof(*client));
2881
Fam Zheng38e047b2015-03-16 17:03:35 +08002882 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002883 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002884 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002885 if (!atomic_read(&bounce.in_use)) {
2886 cpu_notify_map_clients_locked();
2887 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002888 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002889}
2890
Fam Zheng38e047b2015-03-16 17:03:35 +08002891void cpu_exec_init_all(void)
2892{
2893 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002894 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002895 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002896 qemu_mutex_init(&map_client_list_lock);
2897}
2898
Fam Zhenge95205e2015-03-16 17:03:37 +08002899void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002900{
Fam Zhenge95205e2015-03-16 17:03:37 +08002901 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002902
Fam Zhenge95205e2015-03-16 17:03:37 +08002903 qemu_mutex_lock(&map_client_list_lock);
2904 QLIST_FOREACH(client, &map_client_list, link) {
2905 if (client->bh == bh) {
2906 cpu_unregister_map_client_do(client);
2907 break;
2908 }
2909 }
2910 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002911}
2912
2913static void cpu_notify_map_clients(void)
2914{
Fam Zheng38e047b2015-03-16 17:03:35 +08002915 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002916 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002917 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002918}
2919
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002920bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2921{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002922 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002923 hwaddr l, xlat;
2924
Paolo Bonzini41063e12015-03-18 14:21:43 +01002925 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002926 while (len > 0) {
2927 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002928 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2929 if (!memory_access_is_direct(mr, is_write)) {
2930 l = memory_access_size(mr, l, addr);
2931 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002932 return false;
2933 }
2934 }
2935
2936 len -= l;
2937 addr += l;
2938 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002939 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002940 return true;
2941}
2942
aliguori6d16c2f2009-01-22 16:59:11 +00002943/* Map a physical memory region into a host virtual address.
2944 * May map a subset of the requested range, given by and returned in *plen.
2945 * May return NULL if resources needed to perform the mapping are exhausted.
2946 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002947 * Use cpu_register_map_client() to know when retrying the map operation is
2948 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002949 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002950void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002951 hwaddr addr,
2952 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002953 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002954{
Avi Kivitya8170e52012-10-23 12:30:10 +02002955 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002956 hwaddr done = 0;
2957 hwaddr l, xlat, base;
2958 MemoryRegion *mr, *this_mr;
2959 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002960 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002961
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002962 if (len == 0) {
2963 return NULL;
2964 }
aliguori6d16c2f2009-01-22 16:59:11 +00002965
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002967 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002968 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002969
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002970 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002971 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002972 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002973 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002974 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002975 /* Avoid unbounded allocations */
2976 l = MIN(l, TARGET_PAGE_SIZE);
2977 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002978 bounce.addr = addr;
2979 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002980
2981 memory_region_ref(mr);
2982 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002983 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002984 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2985 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002986 }
aliguori6d16c2f2009-01-22 16:59:11 +00002987
Paolo Bonzini41063e12015-03-18 14:21:43 +01002988 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002989 *plen = l;
2990 return bounce.buffer;
2991 }
2992
2993 base = xlat;
2994 raddr = memory_region_get_ram_addr(mr);
2995
2996 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002997 len -= l;
2998 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002999 done += l;
3000 if (len == 0) {
3001 break;
3002 }
3003
3004 l = len;
3005 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3006 if (this_mr != mr || xlat != base + done) {
3007 break;
3008 }
aliguori6d16c2f2009-01-22 16:59:11 +00003009 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003010
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003011 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003012 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003013 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003014 rcu_read_unlock();
3015
3016 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003017}
3018
Avi Kivityac1970f2012-10-03 16:22:53 +02003019/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003020 * Will also mark the memory as dirty if is_write == 1. access_len gives
3021 * the amount of memory that was actually read or written by the caller.
3022 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003023void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3024 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003025{
3026 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003027 MemoryRegion *mr;
3028 ram_addr_t addr1;
3029
3030 mr = qemu_ram_addr_from_host(buffer, &addr1);
3031 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003032 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003033 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003034 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003035 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003036 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003037 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003038 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003039 return;
3040 }
3041 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003042 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3043 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003044 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003045 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003046 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003047 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003048 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003049 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003050}
bellardd0ecd2a2006-04-23 17:14:48 +00003051
Avi Kivitya8170e52012-10-23 12:30:10 +02003052void *cpu_physical_memory_map(hwaddr addr,
3053 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003054 int is_write)
3055{
3056 return address_space_map(&address_space_memory, addr, plen, is_write);
3057}
3058
Avi Kivitya8170e52012-10-23 12:30:10 +02003059void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3060 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003061{
3062 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3063}
3064
bellard8df1cd02005-01-28 22:37:22 +00003065/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003066static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3067 MemTxAttrs attrs,
3068 MemTxResult *result,
3069 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003070{
bellard8df1cd02005-01-28 22:37:22 +00003071 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003072 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003073 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003074 hwaddr l = 4;
3075 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003076 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003077 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003078
Paolo Bonzini41063e12015-03-18 14:21:43 +01003079 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003080 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003081 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003082 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003083
bellard8df1cd02005-01-28 22:37:22 +00003084 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003085 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003086#if defined(TARGET_WORDS_BIGENDIAN)
3087 if (endian == DEVICE_LITTLE_ENDIAN) {
3088 val = bswap32(val);
3089 }
3090#else
3091 if (endian == DEVICE_BIG_ENDIAN) {
3092 val = bswap32(val);
3093 }
3094#endif
bellard8df1cd02005-01-28 22:37:22 +00003095 } else {
3096 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003097 ptr = qemu_get_ram_ptr(mr->ram_block,
3098 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003099 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003100 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003101 switch (endian) {
3102 case DEVICE_LITTLE_ENDIAN:
3103 val = ldl_le_p(ptr);
3104 break;
3105 case DEVICE_BIG_ENDIAN:
3106 val = ldl_be_p(ptr);
3107 break;
3108 default:
3109 val = ldl_p(ptr);
3110 break;
3111 }
Peter Maydell50013112015-04-26 16:49:24 +01003112 r = MEMTX_OK;
3113 }
3114 if (result) {
3115 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003116 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003117 if (release_lock) {
3118 qemu_mutex_unlock_iothread();
3119 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003120 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003121 return val;
3122}
3123
Peter Maydell50013112015-04-26 16:49:24 +01003124uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3125 MemTxAttrs attrs, MemTxResult *result)
3126{
3127 return address_space_ldl_internal(as, addr, attrs, result,
3128 DEVICE_NATIVE_ENDIAN);
3129}
3130
3131uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3132 MemTxAttrs attrs, MemTxResult *result)
3133{
3134 return address_space_ldl_internal(as, addr, attrs, result,
3135 DEVICE_LITTLE_ENDIAN);
3136}
3137
3138uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3139 MemTxAttrs attrs, MemTxResult *result)
3140{
3141 return address_space_ldl_internal(as, addr, attrs, result,
3142 DEVICE_BIG_ENDIAN);
3143}
3144
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003145uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003146{
Peter Maydell50013112015-04-26 16:49:24 +01003147 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003148}
3149
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003150uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003151{
Peter Maydell50013112015-04-26 16:49:24 +01003152 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003153}
3154
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003155uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003156{
Peter Maydell50013112015-04-26 16:49:24 +01003157 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003158}
3159
bellard84b7b8e2005-11-28 21:19:04 +00003160/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003161static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3162 MemTxAttrs attrs,
3163 MemTxResult *result,
3164 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003165{
bellard84b7b8e2005-11-28 21:19:04 +00003166 uint8_t *ptr;
3167 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003168 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003169 hwaddr l = 8;
3170 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003171 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003172 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003173
Paolo Bonzini41063e12015-03-18 14:21:43 +01003174 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003175 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003176 false);
3177 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003178 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003179
bellard84b7b8e2005-11-28 21:19:04 +00003180 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003181 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003182#if defined(TARGET_WORDS_BIGENDIAN)
3183 if (endian == DEVICE_LITTLE_ENDIAN) {
3184 val = bswap64(val);
3185 }
3186#else
3187 if (endian == DEVICE_BIG_ENDIAN) {
3188 val = bswap64(val);
3189 }
3190#endif
bellard84b7b8e2005-11-28 21:19:04 +00003191 } else {
3192 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003193 ptr = qemu_get_ram_ptr(mr->ram_block,
3194 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003195 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003196 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003197 switch (endian) {
3198 case DEVICE_LITTLE_ENDIAN:
3199 val = ldq_le_p(ptr);
3200 break;
3201 case DEVICE_BIG_ENDIAN:
3202 val = ldq_be_p(ptr);
3203 break;
3204 default:
3205 val = ldq_p(ptr);
3206 break;
3207 }
Peter Maydell50013112015-04-26 16:49:24 +01003208 r = MEMTX_OK;
3209 }
3210 if (result) {
3211 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003212 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003213 if (release_lock) {
3214 qemu_mutex_unlock_iothread();
3215 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003216 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003217 return val;
3218}
3219
Peter Maydell50013112015-04-26 16:49:24 +01003220uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3221 MemTxAttrs attrs, MemTxResult *result)
3222{
3223 return address_space_ldq_internal(as, addr, attrs, result,
3224 DEVICE_NATIVE_ENDIAN);
3225}
3226
3227uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3228 MemTxAttrs attrs, MemTxResult *result)
3229{
3230 return address_space_ldq_internal(as, addr, attrs, result,
3231 DEVICE_LITTLE_ENDIAN);
3232}
3233
3234uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3235 MemTxAttrs attrs, MemTxResult *result)
3236{
3237 return address_space_ldq_internal(as, addr, attrs, result,
3238 DEVICE_BIG_ENDIAN);
3239}
3240
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003241uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003242{
Peter Maydell50013112015-04-26 16:49:24 +01003243 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003244}
3245
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003246uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003247{
Peter Maydell50013112015-04-26 16:49:24 +01003248 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003249}
3250
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003251uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003252{
Peter Maydell50013112015-04-26 16:49:24 +01003253 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003254}
3255
bellardaab33092005-10-30 20:48:42 +00003256/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003257uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3258 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003259{
3260 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003261 MemTxResult r;
3262
3263 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3264 if (result) {
3265 *result = r;
3266 }
bellardaab33092005-10-30 20:48:42 +00003267 return val;
3268}
3269
Peter Maydell50013112015-04-26 16:49:24 +01003270uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3271{
3272 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3273}
3274
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003275/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003276static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3277 hwaddr addr,
3278 MemTxAttrs attrs,
3279 MemTxResult *result,
3280 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003281{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003282 uint8_t *ptr;
3283 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003284 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003285 hwaddr l = 2;
3286 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003287 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003288 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003289
Paolo Bonzini41063e12015-03-18 14:21:43 +01003290 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003291 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003292 false);
3293 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003294 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003295
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003296 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003297 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003298#if defined(TARGET_WORDS_BIGENDIAN)
3299 if (endian == DEVICE_LITTLE_ENDIAN) {
3300 val = bswap16(val);
3301 }
3302#else
3303 if (endian == DEVICE_BIG_ENDIAN) {
3304 val = bswap16(val);
3305 }
3306#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003307 } else {
3308 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003309 ptr = qemu_get_ram_ptr(mr->ram_block,
3310 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003311 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003312 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313 switch (endian) {
3314 case DEVICE_LITTLE_ENDIAN:
3315 val = lduw_le_p(ptr);
3316 break;
3317 case DEVICE_BIG_ENDIAN:
3318 val = lduw_be_p(ptr);
3319 break;
3320 default:
3321 val = lduw_p(ptr);
3322 break;
3323 }
Peter Maydell50013112015-04-26 16:49:24 +01003324 r = MEMTX_OK;
3325 }
3326 if (result) {
3327 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003328 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003329 if (release_lock) {
3330 qemu_mutex_unlock_iothread();
3331 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003332 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003333 return val;
bellardaab33092005-10-30 20:48:42 +00003334}
3335
Peter Maydell50013112015-04-26 16:49:24 +01003336uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3337 MemTxAttrs attrs, MemTxResult *result)
3338{
3339 return address_space_lduw_internal(as, addr, attrs, result,
3340 DEVICE_NATIVE_ENDIAN);
3341}
3342
3343uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3344 MemTxAttrs attrs, MemTxResult *result)
3345{
3346 return address_space_lduw_internal(as, addr, attrs, result,
3347 DEVICE_LITTLE_ENDIAN);
3348}
3349
3350uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3351 MemTxAttrs attrs, MemTxResult *result)
3352{
3353 return address_space_lduw_internal(as, addr, attrs, result,
3354 DEVICE_BIG_ENDIAN);
3355}
3356
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003357uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358{
Peter Maydell50013112015-04-26 16:49:24 +01003359 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003360}
3361
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003362uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363{
Peter Maydell50013112015-04-26 16:49:24 +01003364 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365}
3366
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003367uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368{
Peter Maydell50013112015-04-26 16:49:24 +01003369 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370}
3371
bellard8df1cd02005-01-28 22:37:22 +00003372/* warning: addr must be aligned. The ram page is not masked as dirty
3373 and the code inside is not invalidated. It is useful if the dirty
3374 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003375void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3376 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003377{
bellard8df1cd02005-01-28 22:37:22 +00003378 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003379 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003380 hwaddr l = 4;
3381 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003382 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003383 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003384 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003385
Paolo Bonzini41063e12015-03-18 14:21:43 +01003386 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003387 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003388 true);
3389 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003390 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003391
Peter Maydell50013112015-04-26 16:49:24 +01003392 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003393 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003394 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003395 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003396 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003397
Paolo Bonzini845b6212015-03-23 11:45:53 +01003398 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3399 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003400 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003401 r = MEMTX_OK;
3402 }
3403 if (result) {
3404 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003405 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003406 if (release_lock) {
3407 qemu_mutex_unlock_iothread();
3408 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003409 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003410}
3411
Peter Maydell50013112015-04-26 16:49:24 +01003412void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3413{
3414 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3415}
3416
bellard8df1cd02005-01-28 22:37:22 +00003417/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003418static inline void address_space_stl_internal(AddressSpace *as,
3419 hwaddr addr, uint32_t val,
3420 MemTxAttrs attrs,
3421 MemTxResult *result,
3422 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003423{
bellard8df1cd02005-01-28 22:37:22 +00003424 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003425 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003426 hwaddr l = 4;
3427 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003428 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003429 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003430
Paolo Bonzini41063e12015-03-18 14:21:43 +01003431 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003432 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003433 true);
3434 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003435 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003436
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003437#if defined(TARGET_WORDS_BIGENDIAN)
3438 if (endian == DEVICE_LITTLE_ENDIAN) {
3439 val = bswap32(val);
3440 }
3441#else
3442 if (endian == DEVICE_BIG_ENDIAN) {
3443 val = bswap32(val);
3444 }
3445#endif
Peter Maydell50013112015-04-26 16:49:24 +01003446 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003447 } else {
bellard8df1cd02005-01-28 22:37:22 +00003448 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003449 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003450 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003451 switch (endian) {
3452 case DEVICE_LITTLE_ENDIAN:
3453 stl_le_p(ptr, val);
3454 break;
3455 case DEVICE_BIG_ENDIAN:
3456 stl_be_p(ptr, val);
3457 break;
3458 default:
3459 stl_p(ptr, val);
3460 break;
3461 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003462 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003463 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003464 }
Peter Maydell50013112015-04-26 16:49:24 +01003465 if (result) {
3466 *result = r;
3467 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003468 if (release_lock) {
3469 qemu_mutex_unlock_iothread();
3470 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003471 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003472}
3473
3474void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3475 MemTxAttrs attrs, MemTxResult *result)
3476{
3477 address_space_stl_internal(as, addr, val, attrs, result,
3478 DEVICE_NATIVE_ENDIAN);
3479}
3480
3481void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3482 MemTxAttrs attrs, MemTxResult *result)
3483{
3484 address_space_stl_internal(as, addr, val, attrs, result,
3485 DEVICE_LITTLE_ENDIAN);
3486}
3487
3488void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3489 MemTxAttrs attrs, MemTxResult *result)
3490{
3491 address_space_stl_internal(as, addr, val, attrs, result,
3492 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003493}
3494
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003495void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003496{
Peter Maydell50013112015-04-26 16:49:24 +01003497 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003498}
3499
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003500void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003501{
Peter Maydell50013112015-04-26 16:49:24 +01003502 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003503}
3504
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003505void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003506{
Peter Maydell50013112015-04-26 16:49:24 +01003507 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003508}
3509
bellardaab33092005-10-30 20:48:42 +00003510/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003511void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3512 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003513{
3514 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003515 MemTxResult r;
3516
3517 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3518 if (result) {
3519 *result = r;
3520 }
3521}
3522
3523void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3524{
3525 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003526}
3527
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003528/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003529static inline void address_space_stw_internal(AddressSpace *as,
3530 hwaddr addr, uint32_t val,
3531 MemTxAttrs attrs,
3532 MemTxResult *result,
3533 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003534{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003535 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003536 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003537 hwaddr l = 2;
3538 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003539 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003540 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003541
Paolo Bonzini41063e12015-03-18 14:21:43 +01003542 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003543 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003544 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003545 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003546
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003547#if defined(TARGET_WORDS_BIGENDIAN)
3548 if (endian == DEVICE_LITTLE_ENDIAN) {
3549 val = bswap16(val);
3550 }
3551#else
3552 if (endian == DEVICE_BIG_ENDIAN) {
3553 val = bswap16(val);
3554 }
3555#endif
Peter Maydell50013112015-04-26 16:49:24 +01003556 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003557 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003558 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003559 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003560 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003561 switch (endian) {
3562 case DEVICE_LITTLE_ENDIAN:
3563 stw_le_p(ptr, val);
3564 break;
3565 case DEVICE_BIG_ENDIAN:
3566 stw_be_p(ptr, val);
3567 break;
3568 default:
3569 stw_p(ptr, val);
3570 break;
3571 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003572 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003573 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003574 }
Peter Maydell50013112015-04-26 16:49:24 +01003575 if (result) {
3576 *result = r;
3577 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003578 if (release_lock) {
3579 qemu_mutex_unlock_iothread();
3580 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003581 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003582}
3583
3584void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3585 MemTxAttrs attrs, MemTxResult *result)
3586{
3587 address_space_stw_internal(as, addr, val, attrs, result,
3588 DEVICE_NATIVE_ENDIAN);
3589}
3590
3591void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3592 MemTxAttrs attrs, MemTxResult *result)
3593{
3594 address_space_stw_internal(as, addr, val, attrs, result,
3595 DEVICE_LITTLE_ENDIAN);
3596}
3597
3598void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3599 MemTxAttrs attrs, MemTxResult *result)
3600{
3601 address_space_stw_internal(as, addr, val, attrs, result,
3602 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003603}
3604
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003605void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003606{
Peter Maydell50013112015-04-26 16:49:24 +01003607 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003608}
3609
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003610void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003611{
Peter Maydell50013112015-04-26 16:49:24 +01003612 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003613}
3614
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003615void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003616{
Peter Maydell50013112015-04-26 16:49:24 +01003617 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003618}
3619
bellardaab33092005-10-30 20:48:42 +00003620/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003621void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3622 MemTxAttrs attrs, MemTxResult *result)
3623{
3624 MemTxResult r;
3625 val = tswap64(val);
3626 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3627 if (result) {
3628 *result = r;
3629 }
3630}
3631
3632void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3633 MemTxAttrs attrs, MemTxResult *result)
3634{
3635 MemTxResult r;
3636 val = cpu_to_le64(val);
3637 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3638 if (result) {
3639 *result = r;
3640 }
3641}
3642void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3643 MemTxAttrs attrs, MemTxResult *result)
3644{
3645 MemTxResult r;
3646 val = cpu_to_be64(val);
3647 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3648 if (result) {
3649 *result = r;
3650 }
3651}
3652
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003653void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003654{
Peter Maydell50013112015-04-26 16:49:24 +01003655 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003656}
3657
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003658void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003659{
Peter Maydell50013112015-04-26 16:49:24 +01003660 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003661}
3662
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003663void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003664{
Peter Maydell50013112015-04-26 16:49:24 +01003665 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003666}
3667
aliguori5e2972f2009-03-28 17:51:36 +00003668/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003669int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003670 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003671{
3672 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003673 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003674 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003675
3676 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003677 int asidx;
3678 MemTxAttrs attrs;
3679
bellard13eb76e2004-01-24 15:23:36 +00003680 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003681 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3682 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003683 /* if no physical page mapped, return an error */
3684 if (phys_addr == -1)
3685 return -1;
3686 l = (page + TARGET_PAGE_SIZE) - addr;
3687 if (l > len)
3688 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003689 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003690 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003691 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3692 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003693 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003694 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3695 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003696 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003697 }
bellard13eb76e2004-01-24 15:23:36 +00003698 len -= l;
3699 buf += l;
3700 addr += l;
3701 }
3702 return 0;
3703}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003704
3705/*
3706 * Allows code that needs to deal with migration bitmaps etc to still be built
3707 * target independent.
3708 */
3709size_t qemu_target_page_bits(void)
3710{
3711 return TARGET_PAGE_BITS;
3712}
3713
Paul Brooka68fe892010-03-01 00:08:59 +00003714#endif
bellard13eb76e2004-01-24 15:23:36 +00003715
Blue Swirl8e4a4242013-01-06 18:30:17 +00003716/*
3717 * A helper function for the _utterly broken_ virtio device model to find out if
3718 * it's running on a big endian machine. Don't do this at home kids!
3719 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003720bool target_words_bigendian(void);
3721bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003722{
3723#if defined(TARGET_WORDS_BIGENDIAN)
3724 return true;
3725#else
3726 return false;
3727#endif
3728}
3729
Wen Congyang76f35532012-05-07 12:04:18 +08003730#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003731bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003732{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003733 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003734 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003735 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003736
Paolo Bonzini41063e12015-03-18 14:21:43 +01003737 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003738 mr = address_space_translate(&address_space_memory,
3739 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003740
Paolo Bonzini41063e12015-03-18 14:21:43 +01003741 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3742 rcu_read_unlock();
3743 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003744}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003745
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003746int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003747{
3748 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003749 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003750
Mike Day0dc3f442013-09-05 14:41:35 -04003751 rcu_read_lock();
3752 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003753 ret = func(block->idstr, block->host, block->offset,
3754 block->used_length, opaque);
3755 if (ret) {
3756 break;
3757 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003758 }
Mike Day0dc3f442013-09-05 14:41:35 -04003759 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003760 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003761}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003762#endif