blob: 60cf46a5b51cf9b8127d0b1747349be52eddde7d [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
258static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
278 phys_page_compact(&p[i], nodes, compacted);
279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
310 DECLARE_BITMAP(compacted, nodes_nb);
311
312 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200314 }
315}
316
Fam Zheng29cb5332016-03-01 14:18:23 +0800317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
322 */
323 return section->size.hi ||
324 range_covers_byte(section->offset_within_address_space,
325 section->size.lo, addr);
326}
327
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200329 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000330{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200332 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200335 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200336 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200338 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200339 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100340 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200341 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342
Fam Zheng29cb5332016-03-01 14:18:23 +0800343 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 return &sections[lp.ptr];
345 } else {
346 return &sections[PHYS_SECTION_UNASSIGNED];
347 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200348}
349
Blue Swirle5548612012-04-21 13:08:33 +0000350bool memory_region_is_unassigned(MemoryRegion *mr)
351{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200352 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000353 && mr != &io_mem_watch;
354}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 hwaddr addr,
359 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200360{
Fam Zheng729633c2016-03-01 14:18:24 +0800361 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800363 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200364
Fam Zheng729633c2016-03-01 14:18:24 +0800365 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366 section_covers_addr(section, addr)) {
367 update = false;
368 } else {
369 section = phys_page_find(d->phys_map, addr, d->map.nodes,
370 d->map.sections);
371 update = true;
372 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 if (resolve_subpage && section->mr->subpage) {
374 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200375 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 }
Fam Zheng729633c2016-03-01 14:18:24 +0800377 if (update) {
378 atomic_set(&d->mru_section, section);
379 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200381}
382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100383/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200384static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200385address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200386 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387{
388 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100390 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200392 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 /* Compute offset within MemoryRegionSection */
394 addr -= section->offset_within_address_space;
395
396 /* Compute offset within MemoryRegion */
397 *xlat = addr + section->offset_within_region;
398
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200399 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200400
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * here.
407 *
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
411 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200413 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200414 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200416 return section;
417}
Jan Kiszka90260c62013-05-26 21:46:51 +0200418
Paolo Bonzini41063e12015-03-18 14:21:43 +0100419/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200420MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen,
422 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200423{
Avi Kivity30951152012-10-30 13:47:46 +0200424 IOMMUTLBEntry iotlb;
425 MemoryRegionSection *section;
426 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200427
428 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100429 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
430 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200431 mr = section->mr;
432
433 if (!mr->iommu_ops) {
434 break;
435 }
436
Le Tan8d7b8cb2014-08-16 13:55:37 +0800437 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200438 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
439 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200441 if (!(iotlb.perm & (1 << is_write))) {
442 mr = &io_mem_unassigned;
443 break;
444 }
445
446 as = iotlb.target_as;
447 }
448
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000449 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700451 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100452 }
453
Avi Kivity30951152012-10-30 13:47:46 +0200454 *xlat = addr;
455 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200456}
457
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100458/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200459MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200462{
Avi Kivity30951152012-10-30 13:47:46 +0200463 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000464 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465
466 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200467
468 assert(!section->mr->iommu_ops);
469 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200470}
bellard9fa3e852004-01-04 18:06:42 +0000471#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000472
Andreas Färberb170fce2013-01-20 20:23:22 +0100473#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000474
Juan Quintelae59fb372009-09-29 22:48:21 +0200475static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476{
Andreas Färber259186a2013-01-17 18:51:17 +0100477 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478
aurel323098dba2009-03-07 21:28:24 +0000479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100482 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000483
484 return 0;
485}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487static int cpu_common_pre_load(void *opaque)
488{
489 CPUState *cpu = opaque;
490
Paolo Bonziniadee6422014-12-19 12:53:14 +0100491 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492
493 return 0;
494}
495
496static bool cpu_common_exception_index_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
Paolo Bonziniadee6422014-12-19 12:53:14 +0100500 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400501}
502
503static const VMStateDescription vmstate_cpu_common_exception_index = {
504 .name = "cpu_common/exception_index",
505 .version_id = 1,
506 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200507 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400508 .fields = (VMStateField[]) {
509 VMSTATE_INT32(exception_index, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514static bool cpu_common_crash_occurred_needed(void *opaque)
515{
516 CPUState *cpu = opaque;
517
518 return cpu->crash_occurred;
519}
520
521static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522 .name = "cpu_common/crash_occurred",
523 .version_id = 1,
524 .minimum_version_id = 1,
525 .needed = cpu_common_crash_occurred_needed,
526 .fields = (VMStateField[]) {
527 VMSTATE_BOOL(crash_occurred, CPUState),
528 VMSTATE_END_OF_LIST()
529 }
530};
531
Andreas Färber1a1562f2013-06-17 04:09:11 +0200532const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .name = "cpu_common",
534 .version_id = 1,
535 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200538 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100539 VMSTATE_UINT32(halted, CPUState),
540 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400542 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 .subsections = (const VMStateDescription*[]) {
544 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300545 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200546 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547 }
548};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200549
pbrook9656f322008-07-01 20:01:19 +0000550#endif
551
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100552CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400553{
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100559 }
Glauber Costa950f1472009-06-09 12:15:18 -0400560 }
561
Andreas Färberbdc44642013-06-24 23:50:24 +0200562 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400563}
564
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000566void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000568 CPUAddressSpace *newas;
569
570 /* Target code should have set num_ases before calling us */
571 assert(asidx < cpu->num_ases);
572
Peter Maydell56943e82016-01-21 14:15:04 +0000573 if (asidx == 0) {
574 /* address space 0 gets the convenience alias */
575 cpu->as = as;
576 }
577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 if (!cpu->cpu_ases) {
582 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000583 }
Peter Maydell32857f42015-10-01 15:29:50 +0100584
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas = &cpu->cpu_ases[asidx];
586 newas->cpu = cpu;
587 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000588 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000589 newas->tcg_as_listener.commit = tcg_commit;
590 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000591 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000592}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000593
594AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595{
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu->cpu_ases[asidx].as;
598}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599#endif
600
Bharata B Raob7bca732015-06-23 19:31:13 -0700601#ifndef CONFIG_USER_ONLY
602static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
603
604static int cpu_get_free_index(Error **errp)
605{
606 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
607
608 if (cpu >= MAX_CPUMASK_BITS) {
609 error_setg(errp, "Trying to use more CPUs than max of %d",
610 MAX_CPUMASK_BITS);
611 return -1;
612 }
613
614 bitmap_set(cpu_index_map, cpu, 1);
615 return cpu;
616}
617
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530618static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700619{
Bharata B Raob7bca732015-06-23 19:31:13 -0700620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
Bharata B Raob7bca732015-06-23 19:31:13 -0700621}
622#else
623
624static int cpu_get_free_index(Error **errp)
625{
626 CPUState *some_cpu;
627 int cpu_index = 0;
628
629 CPU_FOREACH(some_cpu) {
630 cpu_index++;
631 }
632 return cpu_index;
633}
634
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530635static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700636{
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530637 return;
Bharata B Raob7bca732015-06-23 19:31:13 -0700638}
639#endif
640
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530641void cpu_exec_exit(CPUState *cpu)
642{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530643 CPUClass *cc = CPU_GET_CLASS(cpu);
644
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645#if defined(CONFIG_USER_ONLY)
646 cpu_list_lock();
647#endif
648 if (cpu->cpu_index == -1) {
649 /* cpu_index was never allocated by this @cpu or was already freed. */
650#if defined(CONFIG_USER_ONLY)
651 cpu_list_unlock();
652#endif
653 return;
654 }
655
656 QTAILQ_REMOVE(&cpus, cpu, node);
657 cpu_release_index(cpu);
658 cpu->cpu_index = -1;
659#if defined(CONFIG_USER_ONLY)
660 cpu_list_unlock();
661#endif
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530662
663 if (cc->vmsd != NULL) {
664 vmstate_unregister(NULL, cc->vmsd, cpu);
665 }
666 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
667 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
668 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530669}
670
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700671void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000672{
Andreas Färberb170fce2013-01-20 20:23:22 +0100673 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700674 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000675
Peter Maydell56943e82016-01-21 14:15:04 +0000676 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000677 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000678
Eduardo Habkost291135b2015-04-27 17:00:33 -0300679#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300680 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000681
682 /* This is a softmmu CPU object, so create a property for it
683 * so users can wire up its memory. (This can't go in qom/cpu.c
684 * because that file is compiled only once for both user-mode
685 * and system builds.) The default if no link is set up is to use
686 * the system address space.
687 */
688 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
689 (Object **)&cpu->memory,
690 qdev_prop_allow_set_link_before_realize,
691 OBJ_PROP_LINK_UNREF_ON_RELEASE,
692 &error_abort);
693 cpu->memory = system_memory;
694 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300695#endif
696
pbrookc2764712009-03-07 15:24:59 +0000697#if defined(CONFIG_USER_ONLY)
698 cpu_list_lock();
699#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200700 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700701 if (local_err) {
702 error_propagate(errp, local_err);
703#if defined(CONFIG_USER_ONLY)
704 cpu_list_unlock();
705#endif
706 return;
bellard6a00d602005-11-21 23:25:50 +0000707 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200708 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000709#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200710 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000711 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200712#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200713 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200714 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200715 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100716 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200717 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100718 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200719#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000720}
721
Paul Brook94df27f2010-02-28 23:47:45 +0000722#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200723static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000724{
725 tb_invalidate_phys_page_range(pc, pc + 1, 0);
726}
727#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200728static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400729{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000730 MemTxAttrs attrs;
731 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
732 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400733 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000734 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100735 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400736 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400737}
bellardc27004e2005-01-03 23:35:10 +0000738#endif
bellardd720b932004-04-25 17:57:43 +0000739
Paul Brookc527ee82010-03-01 03:31:14 +0000740#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200741void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000742
743{
744}
745
Peter Maydell3ee887e2014-09-12 14:06:48 +0100746int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
747 int flags)
748{
749 return -ENOSYS;
750}
751
752void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
753{
754}
755
Andreas Färber75a34032013-09-02 16:57:02 +0200756int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000757 int flags, CPUWatchpoint **watchpoint)
758{
759 return -ENOSYS;
760}
761#else
pbrook6658ffb2007-03-16 23:58:11 +0000762/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200763int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000764 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000765{
aliguoric0ce9982008-11-25 22:13:57 +0000766 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000767
Peter Maydell05068c02014-09-12 14:06:48 +0100768 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700769 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200770 error_report("tried to set invalid watchpoint at %"
771 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000772 return -EINVAL;
773 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500774 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000775
aliguoria1d1bb32008-11-18 20:07:32 +0000776 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100777 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000778 wp->flags = flags;
779
aliguori2dc9f412008-11-18 20:56:59 +0000780 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200781 if (flags & BP_GDB) {
782 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
783 } else {
784 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
785 }
aliguoria1d1bb32008-11-18 20:07:32 +0000786
Andreas Färber31b030d2013-09-04 01:29:02 +0200787 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000788
789 if (watchpoint)
790 *watchpoint = wp;
791 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000792}
793
aliguoria1d1bb32008-11-18 20:07:32 +0000794/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200795int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000796 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000797{
aliguoria1d1bb32008-11-18 20:07:32 +0000798 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000799
Andreas Färberff4700b2013-08-26 18:23:18 +0200800 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100801 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000802 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200803 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000804 return 0;
805 }
806 }
aliguoria1d1bb32008-11-18 20:07:32 +0000807 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000808}
809
aliguoria1d1bb32008-11-18 20:07:32 +0000810/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200811void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000812{
Andreas Färberff4700b2013-08-26 18:23:18 +0200813 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000814
Andreas Färber31b030d2013-09-04 01:29:02 +0200815 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000816
Anthony Liguori7267c092011-08-20 22:09:37 -0500817 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000818}
819
aliguoria1d1bb32008-11-18 20:07:32 +0000820/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200821void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000822{
aliguoric0ce9982008-11-25 22:13:57 +0000823 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000824
Andreas Färberff4700b2013-08-26 18:23:18 +0200825 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200826 if (wp->flags & mask) {
827 cpu_watchpoint_remove_by_ref(cpu, wp);
828 }
aliguoric0ce9982008-11-25 22:13:57 +0000829 }
aliguoria1d1bb32008-11-18 20:07:32 +0000830}
Peter Maydell05068c02014-09-12 14:06:48 +0100831
832/* Return true if this watchpoint address matches the specified
833 * access (ie the address range covered by the watchpoint overlaps
834 * partially or completely with the address range covered by the
835 * access).
836 */
837static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
838 vaddr addr,
839 vaddr len)
840{
841 /* We know the lengths are non-zero, but a little caution is
842 * required to avoid errors in the case where the range ends
843 * exactly at the top of the address space and so addr + len
844 * wraps round to zero.
845 */
846 vaddr wpend = wp->vaddr + wp->len - 1;
847 vaddr addrend = addr + len - 1;
848
849 return !(addr > wpend || wp->vaddr > addrend);
850}
851
Paul Brookc527ee82010-03-01 03:31:14 +0000852#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000853
854/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200855int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000856 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000857{
aliguoric0ce9982008-11-25 22:13:57 +0000858 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000859
Anthony Liguori7267c092011-08-20 22:09:37 -0500860 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000861
862 bp->pc = pc;
863 bp->flags = flags;
864
aliguori2dc9f412008-11-18 20:56:59 +0000865 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200866 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200867 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200868 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200869 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200870 }
aliguoria1d1bb32008-11-18 20:07:32 +0000871
Andreas Färberf0c3c502013-08-26 21:22:53 +0200872 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000873
Andreas Färber00b941e2013-06-29 18:55:54 +0200874 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000875 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200876 }
aliguoria1d1bb32008-11-18 20:07:32 +0000877 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000878}
879
880/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200881int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000882{
aliguoria1d1bb32008-11-18 20:07:32 +0000883 CPUBreakpoint *bp;
884
Andreas Färberf0c3c502013-08-26 21:22:53 +0200885 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000886 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200887 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000888 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000889 }
bellard4c3a88a2003-07-26 12:06:08 +0000890 }
aliguoria1d1bb32008-11-18 20:07:32 +0000891 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000892}
893
aliguoria1d1bb32008-11-18 20:07:32 +0000894/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200895void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000896{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200897 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
898
899 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000900
Anthony Liguori7267c092011-08-20 22:09:37 -0500901 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000902}
903
904/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200905void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000906{
aliguoric0ce9982008-11-25 22:13:57 +0000907 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000908
Andreas Färberf0c3c502013-08-26 21:22:53 +0200909 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200910 if (bp->flags & mask) {
911 cpu_breakpoint_remove_by_ref(cpu, bp);
912 }
aliguoric0ce9982008-11-25 22:13:57 +0000913 }
bellard4c3a88a2003-07-26 12:06:08 +0000914}
915
bellardc33a3462003-07-29 20:50:33 +0000916/* enable or disable single step mode. EXCP_DEBUG is returned by the
917 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200918void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000919{
Andreas Färbered2803d2013-06-21 20:20:45 +0200920 if (cpu->singlestep_enabled != enabled) {
921 cpu->singlestep_enabled = enabled;
922 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200923 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200924 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100925 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000926 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700927 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000928 }
bellardc33a3462003-07-29 20:50:33 +0000929 }
bellardc33a3462003-07-29 20:50:33 +0000930}
931
Andreas Färbera47dddd2013-09-03 17:38:47 +0200932void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000933{
934 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000935 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000936
937 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000938 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000939 fprintf(stderr, "qemu: fatal: ");
940 vfprintf(stderr, fmt, ap);
941 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200942 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100943 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000944 qemu_log("qemu: fatal: ");
945 qemu_log_vprintf(fmt, ap2);
946 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200947 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000948 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000949 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000950 }
pbrook493ae1f2007-11-23 16:53:59 +0000951 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000952 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300953 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200954#if defined(CONFIG_USER_ONLY)
955 {
956 struct sigaction act;
957 sigfillset(&act.sa_mask);
958 act.sa_handler = SIG_DFL;
959 sigaction(SIGABRT, &act, NULL);
960 }
961#endif
bellard75012672003-06-21 13:11:07 +0000962 abort();
963}
964
bellard01243112004-01-04 15:48:17 +0000965#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400966/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200967static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
968{
969 RAMBlock *block;
970
Paolo Bonzini43771532013-09-09 17:58:40 +0200971 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200972 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200973 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200974 }
Mike Day0dc3f442013-09-05 14:41:35 -0400975 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200976 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200977 goto found;
978 }
979 }
980
981 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
982 abort();
983
984found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200985 /* It is safe to write mru_block outside the iothread lock. This
986 * is what happens:
987 *
988 * mru_block = xxx
989 * rcu_read_unlock()
990 * xxx removed from list
991 * rcu_read_lock()
992 * read mru_block
993 * mru_block = NULL;
994 * call_rcu(reclaim_ramblock, xxx);
995 * rcu_read_unlock()
996 *
997 * atomic_rcu_set is not needed here. The block was already published
998 * when it was placed into the list. Here we're just making an extra
999 * copy of the pointer.
1000 */
Paolo Bonzini041603f2013-09-09 17:49:45 +02001001 ram_list.mru_block = block;
1002 return block;
1003}
1004
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001005static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001006{
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001007 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +02001008 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001009 RAMBlock *block;
1010 ram_addr_t end;
1011
1012 end = TARGET_PAGE_ALIGN(start + length);
1013 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +00001014
Mike Day0dc3f442013-09-05 14:41:35 -04001015 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +02001016 block = qemu_get_ram_block(start);
1017 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001018 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001019 CPU_FOREACH(cpu) {
1020 tlb_reset_dirty(cpu, start1, length);
1021 }
Mike Day0dc3f442013-09-05 14:41:35 -04001022 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +02001023}
1024
1025/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001026bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1027 ram_addr_t length,
1028 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001029{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001030 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001031 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001032 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001033
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001034 if (length == 0) {
1035 return false;
1036 }
1037
1038 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1039 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001040
1041 rcu_read_lock();
1042
1043 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1044
1045 while (page < end) {
1046 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1047 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1048 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1049
1050 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1051 offset, num);
1052 page += num;
1053 }
1054
1055 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001056
1057 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001058 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001059 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001060
1061 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001062}
1063
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001064/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001065hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001066 MemoryRegionSection *section,
1067 target_ulong vaddr,
1068 hwaddr paddr, hwaddr xlat,
1069 int prot,
1070 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001071{
Avi Kivitya8170e52012-10-23 12:30:10 +02001072 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001073 CPUWatchpoint *wp;
1074
Blue Swirlcc5bea62012-04-14 14:56:48 +00001075 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001076 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001077 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001078 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001079 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001080 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001081 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001082 }
1083 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001084 AddressSpaceDispatch *d;
1085
1086 d = atomic_rcu_read(&section->address_space->dispatch);
1087 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001088 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001089 }
1090
1091 /* Make accesses to pages with watchpoints go via the
1092 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001093 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001094 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001095 /* Avoid trapping reads of pages with a write breakpoint. */
1096 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001097 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001098 *address |= TLB_MMIO;
1099 break;
1100 }
1101 }
1102 }
1103
1104 return iotlb;
1105}
bellard9fa3e852004-01-04 18:06:42 +00001106#endif /* defined(CONFIG_USER_ONLY) */
1107
pbrooke2eef172008-06-08 01:09:01 +00001108#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001109
Anthony Liguoric227f092009-10-01 16:12:16 -05001110static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001111 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001112static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001113
Igor Mammedova2b257d2014-10-31 16:38:37 +00001114static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1115 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001116
1117/*
1118 * Set a custom physical guest memory alloator.
1119 * Accelerators with unusual needs may need this. Hopefully, we can
1120 * get rid of it eventually.
1121 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001122void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001123{
1124 phys_mem_alloc = alloc;
1125}
1126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001127static uint16_t phys_section_add(PhysPageMap *map,
1128 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001129{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001130 /* The physical section number is ORed with a page-aligned
1131 * pointer to produce the iotlb entries. Thus it should
1132 * never overflow into the page-aligned value.
1133 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001134 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001135
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001136 if (map->sections_nb == map->sections_nb_alloc) {
1137 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1138 map->sections = g_renew(MemoryRegionSection, map->sections,
1139 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001140 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001141 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001142 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001143 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001144}
1145
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001146static void phys_section_destroy(MemoryRegion *mr)
1147{
Don Slutz55b4e802015-11-30 17:11:04 -05001148 bool have_sub_page = mr->subpage;
1149
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001150 memory_region_unref(mr);
1151
Don Slutz55b4e802015-11-30 17:11:04 -05001152 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001153 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001154 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001155 g_free(subpage);
1156 }
1157}
1158
Paolo Bonzini60926662013-05-29 12:30:26 +02001159static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001160{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001161 while (map->sections_nb > 0) {
1162 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001163 phys_section_destroy(section->mr);
1164 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001165 g_free(map->sections);
1166 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001167}
1168
Avi Kivityac1970f2012-10-03 16:22:53 +02001169static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001170{
1171 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001172 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001173 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001174 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001175 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001176 MemoryRegionSection subsection = {
1177 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001178 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001179 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001180 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001181
Avi Kivityf3705d52012-03-08 16:16:34 +02001182 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001183
Avi Kivityf3705d52012-03-08 16:16:34 +02001184 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001185 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001186 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001187 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001188 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001189 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001190 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001191 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001192 }
1193 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001194 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001195 subpage_register(subpage, start, end,
1196 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001197}
1198
1199
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001200static void register_multipage(AddressSpaceDispatch *d,
1201 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001202{
Avi Kivitya8170e52012-10-23 12:30:10 +02001203 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001204 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1206 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001207
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001208 assert(num_pages);
1209 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001210}
1211
Avi Kivityac1970f2012-10-03 16:22:53 +02001212static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001213{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001214 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001215 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001216 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001217 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001218
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001219 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1220 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1221 - now.offset_within_address_space;
1222
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001223 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001224 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001225 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001226 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001227 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001228 while (int128_ne(remain.size, now.size)) {
1229 remain.size = int128_sub(remain.size, now.size);
1230 remain.offset_within_address_space += int128_get64(now.size);
1231 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001232 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001233 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001234 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001235 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001236 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001237 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001238 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001239 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001240 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001241 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001242 }
1243}
1244
Sheng Yang62a27442010-01-26 19:21:16 +08001245void qemu_flush_coalesced_mmio_buffer(void)
1246{
1247 if (kvm_enabled())
1248 kvm_flush_coalesced_mmio_buffer();
1249}
1250
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001251void qemu_mutex_lock_ramlist(void)
1252{
1253 qemu_mutex_lock(&ram_list.mutex);
1254}
1255
1256void qemu_mutex_unlock_ramlist(void)
1257{
1258 qemu_mutex_unlock(&ram_list.mutex);
1259}
1260
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001261#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001262static void *file_ram_alloc(RAMBlock *block,
1263 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001264 const char *path,
1265 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001266{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001267 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001268 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001269 char *sanitized_name;
1270 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001271 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001272 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001273 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001275 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1276 error_setg(errp,
1277 "host lacks kvm mmu notifiers, -mem-path unsupported");
1278 return NULL;
1279 }
1280
1281 for (;;) {
1282 fd = open(path, O_RDWR);
1283 if (fd >= 0) {
1284 /* @path names an existing file, use it */
1285 break;
1286 }
1287 if (errno == ENOENT) {
1288 /* @path names a file that doesn't exist, create it */
1289 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1290 if (fd >= 0) {
1291 unlink_on_error = true;
1292 break;
1293 }
1294 } else if (errno == EISDIR) {
1295 /* @path names a directory, create a file there */
1296 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1297 sanitized_name = g_strdup(memory_region_name(block->mr));
1298 for (c = sanitized_name; *c != '\0'; c++) {
1299 if (*c == '/') {
1300 *c = '_';
1301 }
1302 }
1303
1304 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1305 sanitized_name);
1306 g_free(sanitized_name);
1307
1308 fd = mkstemp(filename);
1309 if (fd >= 0) {
1310 unlink(filename);
1311 g_free(filename);
1312 break;
1313 }
1314 g_free(filename);
1315 }
1316 if (errno != EEXIST && errno != EINTR) {
1317 error_setg_errno(errp, errno,
1318 "can't open backing store %s for guest RAM",
1319 path);
1320 goto error;
1321 }
1322 /*
1323 * Try again on EINTR and EEXIST. The latter happens when
1324 * something else creates the file between our two open().
1325 */
1326 }
1327
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001328 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001329 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001330
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001331 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001332 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001333 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001334 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001335 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001336 }
1337
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001338 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001339
1340 /*
1341 * ftruncate is not supported by hugetlbfs in older
1342 * hosts, so don't bother bailing out on errors.
1343 * If anything goes wrong with it under other filesystems,
1344 * mmap will fail.
1345 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001346 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001347 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001348 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001349
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001350 area = qemu_ram_mmap(fd, memory, block->mr->align,
1351 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001352 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001353 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001354 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001355 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001356 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001357
1358 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001359 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001360 }
1361
Alex Williamson04b16652010-07-02 11:13:17 -06001362 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001363 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001364
1365error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001366 if (unlink_on_error) {
1367 unlink(path);
1368 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001369 if (fd != -1) {
1370 close(fd);
1371 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001372 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001373}
1374#endif
1375
Mike Day0dc3f442013-09-05 14:41:35 -04001376/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001377static ram_addr_t find_ram_offset(ram_addr_t size)
1378{
Alex Williamson04b16652010-07-02 11:13:17 -06001379 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001380 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001381
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001382 assert(size != 0); /* it would hand out same offset multiple times */
1383
Mike Day0dc3f442013-09-05 14:41:35 -04001384 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001385 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001386 }
Alex Williamson04b16652010-07-02 11:13:17 -06001387
Mike Day0dc3f442013-09-05 14:41:35 -04001388 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001389 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001390
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001391 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001392
Mike Day0dc3f442013-09-05 14:41:35 -04001393 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001394 if (next_block->offset >= end) {
1395 next = MIN(next, next_block->offset);
1396 }
1397 }
1398 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001399 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001400 mingap = next - end;
1401 }
1402 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001403
1404 if (offset == RAM_ADDR_MAX) {
1405 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1406 (uint64_t)size);
1407 abort();
1408 }
1409
Alex Williamson04b16652010-07-02 11:13:17 -06001410 return offset;
1411}
1412
Juan Quintela652d7ec2012-07-20 10:37:54 +02001413ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001414{
Alex Williamsond17b5282010-06-25 11:08:38 -06001415 RAMBlock *block;
1416 ram_addr_t last = 0;
1417
Mike Day0dc3f442013-09-05 14:41:35 -04001418 rcu_read_lock();
1419 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001420 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001421 }
Mike Day0dc3f442013-09-05 14:41:35 -04001422 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001423 return last;
1424}
1425
Jason Baronddb97f12012-08-02 15:44:16 -04001426static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1427{
1428 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001429
1430 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001431 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001432 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1433 if (ret) {
1434 perror("qemu_madvise");
1435 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1436 "but dump_guest_core=off specified\n");
1437 }
1438 }
1439}
1440
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001441const char *qemu_ram_get_idstr(RAMBlock *rb)
1442{
1443 return rb->idstr;
1444}
1445
Mike Dayae3a7042013-09-05 14:41:35 -04001446/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001447void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001448{
Gongleifa53a0e2016-05-10 10:04:59 +08001449 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001450
Avi Kivityc5705a72011-12-20 15:59:12 +02001451 assert(new_block);
1452 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001453
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001454 if (dev) {
1455 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001456 if (id) {
1457 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001458 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001459 }
1460 }
1461 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1462
Gongleiab0a9952016-05-10 10:05:00 +08001463 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001464 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001465 if (block != new_block &&
1466 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001467 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1468 new_block->idstr);
1469 abort();
1470 }
1471 }
Mike Day0dc3f442013-09-05 14:41:35 -04001472 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001473}
1474
Mike Dayae3a7042013-09-05 14:41:35 -04001475/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001476void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001477{
Mike Dayae3a7042013-09-05 14:41:35 -04001478 /* FIXME: arch_init.c assumes that this is not called throughout
1479 * migration. Ignore the problem since hot-unplug during migration
1480 * does not work anyway.
1481 */
Hu Tao20cfe882014-04-02 15:13:26 +08001482 if (block) {
1483 memset(block->idstr, 0, sizeof(block->idstr));
1484 }
1485}
1486
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001487static int memory_try_enable_merging(void *addr, size_t len)
1488{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001489 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001490 /* disabled by the user */
1491 return 0;
1492 }
1493
1494 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1495}
1496
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001497/* Only legal before guest might have detected the memory size: e.g. on
1498 * incoming migration, or right after reset.
1499 *
1500 * As memory core doesn't know how is memory accessed, it is up to
1501 * resize callback to update device state and/or add assertions to detect
1502 * misuse, if necessary.
1503 */
Gongleifa53a0e2016-05-10 10:04:59 +08001504int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001505{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001506 assert(block);
1507
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001508 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001509
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001510 if (block->used_length == newsize) {
1511 return 0;
1512 }
1513
1514 if (!(block->flags & RAM_RESIZEABLE)) {
1515 error_setg_errno(errp, EINVAL,
1516 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1517 " in != 0x" RAM_ADDR_FMT, block->idstr,
1518 newsize, block->used_length);
1519 return -EINVAL;
1520 }
1521
1522 if (block->max_length < newsize) {
1523 error_setg_errno(errp, EINVAL,
1524 "Length too large: %s: 0x" RAM_ADDR_FMT
1525 " > 0x" RAM_ADDR_FMT, block->idstr,
1526 newsize, block->max_length);
1527 return -EINVAL;
1528 }
1529
1530 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1531 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001532 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1533 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001534 memory_region_set_size(block->mr, newsize);
1535 if (block->resized) {
1536 block->resized(block->idstr, newsize, block->host);
1537 }
1538 return 0;
1539}
1540
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001541/* Called with ram_list.mutex held */
1542static void dirty_memory_extend(ram_addr_t old_ram_size,
1543 ram_addr_t new_ram_size)
1544{
1545 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1546 DIRTY_MEMORY_BLOCK_SIZE);
1547 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1548 DIRTY_MEMORY_BLOCK_SIZE);
1549 int i;
1550
1551 /* Only need to extend if block count increased */
1552 if (new_num_blocks <= old_num_blocks) {
1553 return;
1554 }
1555
1556 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1557 DirtyMemoryBlocks *old_blocks;
1558 DirtyMemoryBlocks *new_blocks;
1559 int j;
1560
1561 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1562 new_blocks = g_malloc(sizeof(*new_blocks) +
1563 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1564
1565 if (old_num_blocks) {
1566 memcpy(new_blocks->blocks, old_blocks->blocks,
1567 old_num_blocks * sizeof(old_blocks->blocks[0]));
1568 }
1569
1570 for (j = old_num_blocks; j < new_num_blocks; j++) {
1571 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1572 }
1573
1574 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1575
1576 if (old_blocks) {
1577 g_free_rcu(old_blocks, rcu);
1578 }
1579 }
1580}
1581
Fam Zheng528f46a2016-03-01 14:18:18 +08001582static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001583{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001584 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001585 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001586 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001587 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001588
1589 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001590
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001591 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001592 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001593
1594 if (!new_block->host) {
1595 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001596 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001597 new_block->mr, &err);
1598 if (err) {
1599 error_propagate(errp, err);
1600 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001601 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001602 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001603 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001604 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001605 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001606 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001607 error_setg_errno(errp, errno,
1608 "cannot set up guest memory '%s'",
1609 memory_region_name(new_block->mr));
1610 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001611 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001612 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001613 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001614 }
1615 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001616
Li Zhijiandd631692015-07-02 20:18:06 +08001617 new_ram_size = MAX(old_ram_size,
1618 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1619 if (new_ram_size > old_ram_size) {
1620 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001621 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001622 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001623 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1624 * QLIST (which has an RCU-friendly variant) does not have insertion at
1625 * tail, so save the last element in last_block.
1626 */
Mike Day0dc3f442013-09-05 14:41:35 -04001627 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001628 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001629 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001630 break;
1631 }
1632 }
1633 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001634 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001635 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001636 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001637 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001638 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001639 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001640 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001641
Mike Day0dc3f442013-09-05 14:41:35 -04001642 /* Write list before version */
1643 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001644 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001645 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001646
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001647 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001648 new_block->used_length,
1649 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001650
Paolo Bonzinia904c912015-01-21 16:18:35 +01001651 if (new_block->host) {
1652 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1653 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1654 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1655 if (kvm_enabled()) {
1656 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1657 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001658 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001659}
1660
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001661#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001662RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1663 bool share, const char *mem_path,
1664 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001665{
1666 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001667 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001668
1669 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001670 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001671 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001672 }
1673
1674 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1675 /*
1676 * file_ram_alloc() needs to allocate just like
1677 * phys_mem_alloc, but we haven't bothered to provide
1678 * a hook there.
1679 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001680 error_setg(errp,
1681 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001682 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001683 }
1684
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001685 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001686 new_block = g_malloc0(sizeof(*new_block));
1687 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001688 new_block->used_length = size;
1689 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001690 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001691 new_block->host = file_ram_alloc(new_block, size,
1692 mem_path, errp);
1693 if (!new_block->host) {
1694 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001695 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001696 }
1697
Fam Zheng528f46a2016-03-01 14:18:18 +08001698 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001699 if (local_err) {
1700 g_free(new_block);
1701 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001702 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001703 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001704 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001705}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001706#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001707
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001708static
Fam Zheng528f46a2016-03-01 14:18:18 +08001709RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1710 void (*resized)(const char*,
1711 uint64_t length,
1712 void *host),
1713 void *host, bool resizeable,
1714 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001715{
1716 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001717 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001718
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001719 size = HOST_PAGE_ALIGN(size);
1720 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001721 new_block = g_malloc0(sizeof(*new_block));
1722 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001723 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001724 new_block->used_length = size;
1725 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001726 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001727 new_block->fd = -1;
1728 new_block->host = host;
1729 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001730 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001731 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001732 if (resizeable) {
1733 new_block->flags |= RAM_RESIZEABLE;
1734 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001735 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001736 if (local_err) {
1737 g_free(new_block);
1738 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001739 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001740 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001741 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001742}
1743
Fam Zheng528f46a2016-03-01 14:18:18 +08001744RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001745 MemoryRegion *mr, Error **errp)
1746{
1747 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1748}
1749
Fam Zheng528f46a2016-03-01 14:18:18 +08001750RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001751{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001752 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1753}
1754
Fam Zheng528f46a2016-03-01 14:18:18 +08001755RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001756 void (*resized)(const char*,
1757 uint64_t length,
1758 void *host),
1759 MemoryRegion *mr, Error **errp)
1760{
1761 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001762}
bellarde9a1ab12007-02-08 23:08:38 +00001763
Paolo Bonzini43771532013-09-09 17:58:40 +02001764static void reclaim_ramblock(RAMBlock *block)
1765{
1766 if (block->flags & RAM_PREALLOC) {
1767 ;
1768 } else if (xen_enabled()) {
1769 xen_invalidate_map_cache_entry(block->host);
1770#ifndef _WIN32
1771 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001772 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001773 close(block->fd);
1774#endif
1775 } else {
1776 qemu_anon_ram_free(block->host, block->max_length);
1777 }
1778 g_free(block);
1779}
1780
Fam Zhengf1060c52016-03-01 14:18:22 +08001781void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001782{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001783 if (!block) {
1784 return;
1785 }
1786
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001787 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001788 QLIST_REMOVE_RCU(block, next);
1789 ram_list.mru_block = NULL;
1790 /* Write list before version */
1791 smp_wmb();
1792 ram_list.version++;
1793 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001794 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001795}
1796
Huang Yingcd19cfa2011-03-02 08:56:19 +01001797#ifndef _WIN32
1798void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1799{
1800 RAMBlock *block;
1801 ram_addr_t offset;
1802 int flags;
1803 void *area, *vaddr;
1804
Mike Day0dc3f442013-09-05 14:41:35 -04001805 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001806 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001807 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001808 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001809 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001810 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001811 } else if (xen_enabled()) {
1812 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001813 } else {
1814 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001815 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001816 flags |= (block->flags & RAM_SHARED ?
1817 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001818 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1819 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001820 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001821 /*
1822 * Remap needs to match alloc. Accelerators that
1823 * set phys_mem_alloc never remap. If they did,
1824 * we'd need a remap hook here.
1825 */
1826 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1827
Huang Yingcd19cfa2011-03-02 08:56:19 +01001828 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1829 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1830 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001831 }
1832 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001833 fprintf(stderr, "Could not remap addr: "
1834 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001835 length, addr);
1836 exit(1);
1837 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001838 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001839 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001840 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001841 }
1842 }
1843}
1844#endif /* !_WIN32 */
1845
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001846/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001847 * This should not be used for general purpose DMA. Use address_space_map
1848 * or address_space_rw instead. For local memory (e.g. video ram) that the
1849 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001850 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001851 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001852 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001853void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001854{
Gonglei3655cb92016-02-20 10:35:20 +08001855 RAMBlock *block = ram_block;
1856
1857 if (block == NULL) {
1858 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001859 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001860 }
Mike Dayae3a7042013-09-05 14:41:35 -04001861
1862 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001863 /* We need to check if the requested address is in the RAM
1864 * because we don't want to map the entire memory in QEMU.
1865 * In that case just map until the end of the page.
1866 */
1867 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001868 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001869 }
Mike Dayae3a7042013-09-05 14:41:35 -04001870
1871 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001872 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001873 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001874}
1875
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001876/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001877 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001878 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001879 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001880 */
Gonglei3655cb92016-02-20 10:35:20 +08001881static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1882 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001883{
Gonglei3655cb92016-02-20 10:35:20 +08001884 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001885 if (*size == 0) {
1886 return NULL;
1887 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001888
Gonglei3655cb92016-02-20 10:35:20 +08001889 if (block == NULL) {
1890 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001891 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001892 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001893 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001894
1895 if (xen_enabled() && block->host == NULL) {
1896 /* We need to check if the requested address is in the RAM
1897 * because we don't want to map the entire memory in QEMU.
1898 * In that case just map the requested area.
1899 */
1900 if (block->offset == 0) {
1901 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001902 }
1903
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001904 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001905 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001906
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001907 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001908}
1909
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001910/*
1911 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1912 * in that RAMBlock.
1913 *
1914 * ptr: Host pointer to look up
1915 * round_offset: If true round the result offset down to a page boundary
1916 * *ram_addr: set to result ram_addr
1917 * *offset: set to result offset within the RAMBlock
1918 *
1919 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001920 *
1921 * By the time this function returns, the returned pointer is not protected
1922 * by RCU anymore. If the caller is not within an RCU critical section and
1923 * does not hold the iothread lock, it must have other means of protecting the
1924 * pointer, such as a reference to the region that includes the incoming
1925 * ram_addr_t.
1926 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001927RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001928 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001929{
pbrook94a6b542009-04-11 17:15:54 +00001930 RAMBlock *block;
1931 uint8_t *host = ptr;
1932
Jan Kiszka868bb332011-06-21 22:59:09 +02001933 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001934 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001935 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001936 ram_addr = xen_ram_addr_from_mapcache(ptr);
1937 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001939 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001940 }
Mike Day0dc3f442013-09-05 14:41:35 -04001941 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001942 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001943 }
1944
Mike Day0dc3f442013-09-05 14:41:35 -04001945 rcu_read_lock();
1946 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001947 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001948 goto found;
1949 }
1950
Mike Day0dc3f442013-09-05 14:41:35 -04001951 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001952 /* This case append when the block is not mapped. */
1953 if (block->host == NULL) {
1954 continue;
1955 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001956 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001957 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001958 }
pbrook94a6b542009-04-11 17:15:54 +00001959 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001960
Mike Day0dc3f442013-09-05 14:41:35 -04001961 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001962 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001963
1964found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001965 *offset = (host - block->host);
1966 if (round_offset) {
1967 *offset &= TARGET_PAGE_MASK;
1968 }
Mike Day0dc3f442013-09-05 14:41:35 -04001969 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001970 return block;
1971}
1972
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001973/*
1974 * Finds the named RAMBlock
1975 *
1976 * name: The name of RAMBlock to find
1977 *
1978 * Returns: RAMBlock (or NULL if not found)
1979 */
1980RAMBlock *qemu_ram_block_by_name(const char *name)
1981{
1982 RAMBlock *block;
1983
1984 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1985 if (!strcmp(name, block->idstr)) {
1986 return block;
1987 }
1988 }
1989
1990 return NULL;
1991}
1992
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001993/* Some of the softmmu routines need to translate from a host pointer
1994 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001995ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001996{
1997 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001998 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001999
Paolo Bonzinif615f392016-05-26 10:07:50 +02002000 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002001 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002002 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002003 }
2004
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002005 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002006}
Alex Williamsonf471a172010-06-11 11:11:42 -06002007
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002008/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002009static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002010 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002011{
Juan Quintela52159192013-10-08 12:44:04 +02002012 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002013 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002014 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002015 switch (size) {
2016 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002017 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002018 break;
2019 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002020 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002021 break;
2022 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002023 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002024 break;
2025 default:
2026 abort();
2027 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002028 /* Set both VGA and migration bits for simplicity and to remove
2029 * the notdirty callback faster.
2030 */
2031 cpu_physical_memory_set_dirty_range(ram_addr, size,
2032 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002033 /* we remove the notdirty callback only if the code has been
2034 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002035 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002036 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002037 }
bellard1ccde1c2004-02-06 19:46:14 +00002038}
2039
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002040static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2041 unsigned size, bool is_write)
2042{
2043 return is_write;
2044}
2045
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002046static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002047 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002048 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002049 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002050};
2051
pbrook0f459d12008-06-09 00:20:13 +00002052/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002053static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002054{
Andreas Färber93afead2013-08-26 03:41:01 +02002055 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002056 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002057 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002058 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002059 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002060 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002061 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002062
Andreas Färberff4700b2013-08-26 18:23:18 +02002063 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002064 /* We re-entered the check after replacing the TB. Now raise
2065 * the debug interrupt so that is will trigger after the
2066 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002067 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002068 return;
2069 }
Andreas Färber93afead2013-08-26 03:41:01 +02002070 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002071 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002072 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2073 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002074 if (flags == BP_MEM_READ) {
2075 wp->flags |= BP_WATCHPOINT_HIT_READ;
2076 } else {
2077 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2078 }
2079 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002080 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002081 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002082 if (wp->flags & BP_CPU &&
2083 !cc->debug_check_watchpoint(cpu, wp)) {
2084 wp->flags &= ~BP_WATCHPOINT_HIT;
2085 continue;
2086 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002087 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002088 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002089 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002090 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002091 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002092 } else {
2093 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002094 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002095 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002096 }
aliguori06d55cc2008-11-18 20:24:06 +00002097 }
aliguori6e140f22008-11-18 20:37:55 +00002098 } else {
2099 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002100 }
2101 }
2102}
2103
pbrook6658ffb2007-03-16 23:58:11 +00002104/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2105 so these check for a hit then pass through to the normal out-of-line
2106 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002107static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2108 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002109{
Peter Maydell66b9b432015-04-26 16:49:24 +01002110 MemTxResult res;
2111 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002112 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2113 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002114
Peter Maydell66b9b432015-04-26 16:49:24 +01002115 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002116 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002117 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002118 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002119 break;
2120 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002121 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002122 break;
2123 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002124 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002125 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002126 default: abort();
2127 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002128 *pdata = data;
2129 return res;
2130}
2131
2132static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2133 uint64_t val, unsigned size,
2134 MemTxAttrs attrs)
2135{
2136 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002137 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2138 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002139
2140 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2141 switch (size) {
2142 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002143 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002144 break;
2145 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002146 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002147 break;
2148 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002149 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002150 break;
2151 default: abort();
2152 }
2153 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002154}
2155
Avi Kivity1ec9b902012-01-02 12:47:48 +02002156static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002157 .read_with_attrs = watch_mem_read,
2158 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002159 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002160};
pbrook6658ffb2007-03-16 23:58:11 +00002161
Peter Maydellf25a49e2015-04-26 16:49:24 +01002162static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2163 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002164{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002165 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002166 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002167 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002168
blueswir1db7b5422007-05-26 17:36:03 +00002169#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002170 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002171 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002172#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002173 res = address_space_read(subpage->as, addr + subpage->base,
2174 attrs, buf, len);
2175 if (res) {
2176 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002177 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002178 switch (len) {
2179 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002180 *data = ldub_p(buf);
2181 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002182 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002183 *data = lduw_p(buf);
2184 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002185 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002186 *data = ldl_p(buf);
2187 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002188 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002189 *data = ldq_p(buf);
2190 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002191 default:
2192 abort();
2193 }
blueswir1db7b5422007-05-26 17:36:03 +00002194}
2195
Peter Maydellf25a49e2015-04-26 16:49:24 +01002196static MemTxResult subpage_write(void *opaque, hwaddr addr,
2197 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002198{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002200 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002201
blueswir1db7b5422007-05-26 17:36:03 +00002202#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002203 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002204 " value %"PRIx64"\n",
2205 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002206#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002207 switch (len) {
2208 case 1:
2209 stb_p(buf, value);
2210 break;
2211 case 2:
2212 stw_p(buf, value);
2213 break;
2214 case 4:
2215 stl_p(buf, value);
2216 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002217 case 8:
2218 stq_p(buf, value);
2219 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 default:
2221 abort();
2222 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002223 return address_space_write(subpage->as, addr + subpage->base,
2224 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002225}
2226
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002227static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002228 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002229{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002230 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002231#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002232 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002233 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002234#endif
2235
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002236 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002237 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002238}
2239
Avi Kivity70c68e42012-01-02 12:32:48 +02002240static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002241 .read_with_attrs = subpage_read,
2242 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002243 .impl.min_access_size = 1,
2244 .impl.max_access_size = 8,
2245 .valid.min_access_size = 1,
2246 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002247 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002248 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002249};
2250
Anthony Liguoric227f092009-10-01 16:12:16 -05002251static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002252 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002253{
2254 int idx, eidx;
2255
2256 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2257 return -1;
2258 idx = SUBPAGE_IDX(start);
2259 eidx = SUBPAGE_IDX(end);
2260#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002261 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2262 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002263#endif
blueswir1db7b5422007-05-26 17:36:03 +00002264 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002265 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002266 }
2267
2268 return 0;
2269}
2270
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002271static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002272{
Anthony Liguoric227f092009-10-01 16:12:16 -05002273 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002274
Anthony Liguori7267c092011-08-20 22:09:37 -05002275 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002276
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002277 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002278 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002279 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002280 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002281 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002282#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002283 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2284 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002285#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002286 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002287
2288 return mmio;
2289}
2290
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002291static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2292 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002293{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002294 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002295 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002296 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002297 .mr = mr,
2298 .offset_within_address_space = 0,
2299 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002300 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002301 };
2302
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002303 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002304}
2305
Peter Maydella54c87b2016-01-21 14:15:05 +00002306MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002307{
Peter Maydella54c87b2016-01-21 14:15:05 +00002308 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2309 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002310 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002311 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002312
2313 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002314}
2315
Avi Kivitye9179ce2009-06-14 11:38:52 +03002316static void io_mem_init(void)
2317{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002318 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002319 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002320 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002321 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002322 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002323 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002324 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002325}
2326
Avi Kivityac1970f2012-10-03 16:22:53 +02002327static void mem_begin(MemoryListener *listener)
2328{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002329 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002330 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2331 uint16_t n;
2332
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002333 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002334 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002335 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002336 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002337 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002338 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002339 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002340 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002341
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002342 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002343 d->as = as;
2344 as->next_dispatch = d;
2345}
2346
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002347static void address_space_dispatch_free(AddressSpaceDispatch *d)
2348{
2349 phys_sections_free(&d->map);
2350 g_free(d);
2351}
2352
Paolo Bonzini00752702013-05-29 12:13:54 +02002353static void mem_commit(MemoryListener *listener)
2354{
2355 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002356 AddressSpaceDispatch *cur = as->dispatch;
2357 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002358
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002359 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002360
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002361 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002362 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002363 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002364 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002365}
2366
Avi Kivity1d711482012-10-02 18:54:45 +02002367static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002368{
Peter Maydell32857f42015-10-01 15:29:50 +01002369 CPUAddressSpace *cpuas;
2370 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002371
2372 /* since each CPU stores ram addresses in its TLB cache, we must
2373 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002374 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2375 cpu_reloading_memory_map();
2376 /* The CPU and TLB are protected by the iothread lock.
2377 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2378 * may have split the RCU critical section.
2379 */
2380 d = atomic_rcu_read(&cpuas->as->dispatch);
2381 cpuas->memory_dispatch = d;
2382 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002383}
2384
Avi Kivityac1970f2012-10-03 16:22:53 +02002385void address_space_init_dispatch(AddressSpace *as)
2386{
Paolo Bonzini00752702013-05-29 12:13:54 +02002387 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002388 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002389 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002390 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002391 .region_add = mem_add,
2392 .region_nop = mem_add,
2393 .priority = 0,
2394 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002395 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002396}
2397
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002398void address_space_unregister(AddressSpace *as)
2399{
2400 memory_listener_unregister(&as->dispatch_listener);
2401}
2402
Avi Kivity83f3c252012-10-07 12:59:55 +02002403void address_space_destroy_dispatch(AddressSpace *as)
2404{
2405 AddressSpaceDispatch *d = as->dispatch;
2406
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002407 atomic_rcu_set(&as->dispatch, NULL);
2408 if (d) {
2409 call_rcu(d, address_space_dispatch_free, rcu);
2410 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002411}
2412
Avi Kivity62152b82011-07-26 14:26:14 +03002413static void memory_map_init(void)
2414{
Anthony Liguori7267c092011-08-20 22:09:37 -05002415 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002416
Paolo Bonzini57271d62013-11-07 17:14:37 +01002417 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002418 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002419
Anthony Liguori7267c092011-08-20 22:09:37 -05002420 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002421 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2422 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002423 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002424}
2425
2426MemoryRegion *get_system_memory(void)
2427{
2428 return system_memory;
2429}
2430
Avi Kivity309cb472011-08-08 16:09:03 +03002431MemoryRegion *get_system_io(void)
2432{
2433 return system_io;
2434}
2435
pbrooke2eef172008-06-08 01:09:01 +00002436#endif /* !defined(CONFIG_USER_ONLY) */
2437
bellard13eb76e2004-01-24 15:23:36 +00002438/* physical memory access (slow version, mainly for debug) */
2439#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002440int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002441 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002442{
2443 int l, flags;
2444 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002445 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002446
2447 while (len > 0) {
2448 page = addr & TARGET_PAGE_MASK;
2449 l = (page + TARGET_PAGE_SIZE) - addr;
2450 if (l > len)
2451 l = len;
2452 flags = page_get_flags(page);
2453 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002454 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002455 if (is_write) {
2456 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002457 return -1;
bellard579a97f2007-11-11 14:26:47 +00002458 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002459 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002460 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002461 memcpy(p, buf, l);
2462 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002463 } else {
2464 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002465 return -1;
bellard579a97f2007-11-11 14:26:47 +00002466 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002467 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002468 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002469 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002470 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002471 }
2472 len -= l;
2473 buf += l;
2474 addr += l;
2475 }
Paul Brooka68fe892010-03-01 00:08:59 +00002476 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002477}
bellard8df1cd02005-01-28 22:37:22 +00002478
bellard13eb76e2004-01-24 15:23:36 +00002479#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002480
Paolo Bonzini845b6212015-03-23 11:45:53 +01002481static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002482 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002483{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002484 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002485 addr += memory_region_get_ram_addr(mr);
2486
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002487 /* No early return if dirty_log_mask is or becomes 0, because
2488 * cpu_physical_memory_set_dirty_range will still call
2489 * xen_modified_memory.
2490 */
2491 if (dirty_log_mask) {
2492 dirty_log_mask =
2493 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002494 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002495 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2496 tb_invalidate_phys_range(addr, addr + length);
2497 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2498 }
2499 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002500}
2501
Richard Henderson23326162013-07-08 14:55:59 -07002502static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002503{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002504 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002505
2506 /* Regions are assumed to support 1-4 byte accesses unless
2507 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002508 if (access_size_max == 0) {
2509 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002510 }
Richard Henderson23326162013-07-08 14:55:59 -07002511
2512 /* Bound the maximum access by the alignment of the address. */
2513 if (!mr->ops->impl.unaligned) {
2514 unsigned align_size_max = addr & -addr;
2515 if (align_size_max != 0 && align_size_max < access_size_max) {
2516 access_size_max = align_size_max;
2517 }
2518 }
2519
2520 /* Don't attempt accesses larger than the maximum. */
2521 if (l > access_size_max) {
2522 l = access_size_max;
2523 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002524 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002525
2526 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002527}
2528
Jan Kiszka4840f102015-06-18 18:47:22 +02002529static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002530{
Jan Kiszka4840f102015-06-18 18:47:22 +02002531 bool unlocked = !qemu_mutex_iothread_locked();
2532 bool release_lock = false;
2533
2534 if (unlocked && mr->global_locking) {
2535 qemu_mutex_lock_iothread();
2536 unlocked = false;
2537 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002538 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002539 if (mr->flush_coalesced_mmio) {
2540 if (unlocked) {
2541 qemu_mutex_lock_iothread();
2542 }
2543 qemu_flush_coalesced_mmio_buffer();
2544 if (unlocked) {
2545 qemu_mutex_unlock_iothread();
2546 }
2547 }
2548
2549 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002550}
2551
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002552/* Called within RCU critical section. */
2553static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2554 MemTxAttrs attrs,
2555 const uint8_t *buf,
2556 int len, hwaddr addr1,
2557 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002558{
bellard13eb76e2004-01-24 15:23:36 +00002559 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002560 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002561 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002562 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002563
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002564 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002565 if (!memory_access_is_direct(mr, true)) {
2566 release_lock |= prepare_mmio_access(mr);
2567 l = memory_access_size(mr, l, addr1);
2568 /* XXX: could force current_cpu to NULL to avoid
2569 potential bugs */
2570 switch (l) {
2571 case 8:
2572 /* 64 bit write access */
2573 val = ldq_p(buf);
2574 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2575 attrs);
2576 break;
2577 case 4:
2578 /* 32 bit write access */
2579 val = ldl_p(buf);
2580 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2581 attrs);
2582 break;
2583 case 2:
2584 /* 16 bit write access */
2585 val = lduw_p(buf);
2586 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2587 attrs);
2588 break;
2589 case 1:
2590 /* 8 bit write access */
2591 val = ldub_p(buf);
2592 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2593 attrs);
2594 break;
2595 default:
2596 abort();
bellard13eb76e2004-01-24 15:23:36 +00002597 }
2598 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002599 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002600 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002601 memcpy(ptr, buf, l);
2602 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002603 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002604
2605 if (release_lock) {
2606 qemu_mutex_unlock_iothread();
2607 release_lock = false;
2608 }
2609
bellard13eb76e2004-01-24 15:23:36 +00002610 len -= l;
2611 buf += l;
2612 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002613
2614 if (!len) {
2615 break;
2616 }
2617
2618 l = len;
2619 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002620 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002621
Peter Maydell3b643492015-04-26 16:49:23 +01002622 return result;
bellard13eb76e2004-01-24 15:23:36 +00002623}
bellard8df1cd02005-01-28 22:37:22 +00002624
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002625MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2626 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002627{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002628 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002629 hwaddr addr1;
2630 MemoryRegion *mr;
2631 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002632
2633 if (len > 0) {
2634 rcu_read_lock();
2635 l = len;
2636 mr = address_space_translate(as, addr, &addr1, &l, true);
2637 result = address_space_write_continue(as, addr, attrs, buf, len,
2638 addr1, l, mr);
2639 rcu_read_unlock();
2640 }
2641
2642 return result;
2643}
2644
2645/* Called within RCU critical section. */
2646MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2647 MemTxAttrs attrs, uint8_t *buf,
2648 int len, hwaddr addr1, hwaddr l,
2649 MemoryRegion *mr)
2650{
2651 uint8_t *ptr;
2652 uint64_t val;
2653 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002654 bool release_lock = false;
2655
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002656 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002657 if (!memory_access_is_direct(mr, false)) {
2658 /* I/O case */
2659 release_lock |= prepare_mmio_access(mr);
2660 l = memory_access_size(mr, l, addr1);
2661 switch (l) {
2662 case 8:
2663 /* 64 bit read access */
2664 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2665 attrs);
2666 stq_p(buf, val);
2667 break;
2668 case 4:
2669 /* 32 bit read access */
2670 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2671 attrs);
2672 stl_p(buf, val);
2673 break;
2674 case 2:
2675 /* 16 bit read access */
2676 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2677 attrs);
2678 stw_p(buf, val);
2679 break;
2680 case 1:
2681 /* 8 bit read access */
2682 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2683 attrs);
2684 stb_p(buf, val);
2685 break;
2686 default:
2687 abort();
2688 }
2689 } else {
2690 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002691 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002692 memcpy(buf, ptr, l);
2693 }
2694
2695 if (release_lock) {
2696 qemu_mutex_unlock_iothread();
2697 release_lock = false;
2698 }
2699
2700 len -= l;
2701 buf += l;
2702 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002703
2704 if (!len) {
2705 break;
2706 }
2707
2708 l = len;
2709 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002710 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002711
2712 return result;
2713}
2714
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002715MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2716 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002717{
2718 hwaddr l;
2719 hwaddr addr1;
2720 MemoryRegion *mr;
2721 MemTxResult result = MEMTX_OK;
2722
2723 if (len > 0) {
2724 rcu_read_lock();
2725 l = len;
2726 mr = address_space_translate(as, addr, &addr1, &l, false);
2727 result = address_space_read_continue(as, addr, attrs, buf, len,
2728 addr1, l, mr);
2729 rcu_read_unlock();
2730 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002731
2732 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002733}
2734
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002735MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2736 uint8_t *buf, int len, bool is_write)
2737{
2738 if (is_write) {
2739 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2740 } else {
2741 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2742 }
2743}
Avi Kivityac1970f2012-10-03 16:22:53 +02002744
Avi Kivitya8170e52012-10-23 12:30:10 +02002745void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002746 int len, int is_write)
2747{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002748 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2749 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002750}
2751
Alexander Graf582b55a2013-12-11 14:17:44 +01002752enum write_rom_type {
2753 WRITE_DATA,
2754 FLUSH_CACHE,
2755};
2756
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002757static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002758 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002759{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002760 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002761 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002762 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002763 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002764
Paolo Bonzini41063e12015-03-18 14:21:43 +01002765 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002766 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002767 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002768 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002769
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002770 if (!(memory_region_is_ram(mr) ||
2771 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002772 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002773 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002774 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002775 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002776 switch (type) {
2777 case WRITE_DATA:
2778 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002779 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002780 break;
2781 case FLUSH_CACHE:
2782 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2783 break;
2784 }
bellardd0ecd2a2006-04-23 17:14:48 +00002785 }
2786 len -= l;
2787 buf += l;
2788 addr += l;
2789 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002790 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002791}
2792
Alexander Graf582b55a2013-12-11 14:17:44 +01002793/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002794void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002795 const uint8_t *buf, int len)
2796{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002797 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002798}
2799
2800void cpu_flush_icache_range(hwaddr start, int len)
2801{
2802 /*
2803 * This function should do the same thing as an icache flush that was
2804 * triggered from within the guest. For TCG we are always cache coherent,
2805 * so there is no need to flush anything. For KVM / Xen we need to flush
2806 * the host's instruction cache at least.
2807 */
2808 if (tcg_enabled()) {
2809 return;
2810 }
2811
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002812 cpu_physical_memory_write_rom_internal(&address_space_memory,
2813 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002814}
2815
aliguori6d16c2f2009-01-22 16:59:11 +00002816typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002817 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002818 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002819 hwaddr addr;
2820 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002821 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002822} BounceBuffer;
2823
2824static BounceBuffer bounce;
2825
aliguoriba223c22009-01-22 16:59:16 +00002826typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002827 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002828 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002829} MapClient;
2830
Fam Zheng38e047b2015-03-16 17:03:35 +08002831QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002832static QLIST_HEAD(map_client_list, MapClient) map_client_list
2833 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002834
Fam Zhenge95205e2015-03-16 17:03:37 +08002835static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002836{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002837 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002838 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002839}
2840
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002841static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002842{
2843 MapClient *client;
2844
Blue Swirl72cf2d42009-09-12 07:36:22 +00002845 while (!QLIST_EMPTY(&map_client_list)) {
2846 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002847 qemu_bh_schedule(client->bh);
2848 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002849 }
2850}
2851
Fam Zhenge95205e2015-03-16 17:03:37 +08002852void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002853{
2854 MapClient *client = g_malloc(sizeof(*client));
2855
Fam Zheng38e047b2015-03-16 17:03:35 +08002856 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002857 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002858 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002859 if (!atomic_read(&bounce.in_use)) {
2860 cpu_notify_map_clients_locked();
2861 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002862 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002863}
2864
Fam Zheng38e047b2015-03-16 17:03:35 +08002865void cpu_exec_init_all(void)
2866{
2867 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002868 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002869 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002870 qemu_mutex_init(&map_client_list_lock);
2871}
2872
Fam Zhenge95205e2015-03-16 17:03:37 +08002873void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002874{
Fam Zhenge95205e2015-03-16 17:03:37 +08002875 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002876
Fam Zhenge95205e2015-03-16 17:03:37 +08002877 qemu_mutex_lock(&map_client_list_lock);
2878 QLIST_FOREACH(client, &map_client_list, link) {
2879 if (client->bh == bh) {
2880 cpu_unregister_map_client_do(client);
2881 break;
2882 }
2883 }
2884 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002885}
2886
2887static void cpu_notify_map_clients(void)
2888{
Fam Zheng38e047b2015-03-16 17:03:35 +08002889 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002890 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002891 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002892}
2893
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002894bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2895{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002896 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002897 hwaddr l, xlat;
2898
Paolo Bonzini41063e12015-03-18 14:21:43 +01002899 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002900 while (len > 0) {
2901 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002902 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2903 if (!memory_access_is_direct(mr, is_write)) {
2904 l = memory_access_size(mr, l, addr);
2905 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002906 return false;
2907 }
2908 }
2909
2910 len -= l;
2911 addr += l;
2912 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002913 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002914 return true;
2915}
2916
aliguori6d16c2f2009-01-22 16:59:11 +00002917/* Map a physical memory region into a host virtual address.
2918 * May map a subset of the requested range, given by and returned in *plen.
2919 * May return NULL if resources needed to perform the mapping are exhausted.
2920 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002921 * Use cpu_register_map_client() to know when retrying the map operation is
2922 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002923 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002924void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002925 hwaddr addr,
2926 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002927 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002928{
Avi Kivitya8170e52012-10-23 12:30:10 +02002929 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002930 hwaddr done = 0;
2931 hwaddr l, xlat, base;
2932 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002933 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002934
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002935 if (len == 0) {
2936 return NULL;
2937 }
aliguori6d16c2f2009-01-22 16:59:11 +00002938
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002939 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002940 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002941 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002942
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002943 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002944 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002945 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002946 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002947 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002948 /* Avoid unbounded allocations */
2949 l = MIN(l, TARGET_PAGE_SIZE);
2950 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002951 bounce.addr = addr;
2952 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002953
2954 memory_region_ref(mr);
2955 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002956 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002957 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2958 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002959 }
aliguori6d16c2f2009-01-22 16:59:11 +00002960
Paolo Bonzini41063e12015-03-18 14:21:43 +01002961 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002962 *plen = l;
2963 return bounce.buffer;
2964 }
2965
2966 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002967
2968 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002969 len -= l;
2970 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002971 done += l;
2972 if (len == 0) {
2973 break;
2974 }
2975
2976 l = len;
2977 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2978 if (this_mr != mr || xlat != base + done) {
2979 break;
2980 }
aliguori6d16c2f2009-01-22 16:59:11 +00002981 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002982
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002983 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002984 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002985 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002986 rcu_read_unlock();
2987
2988 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002989}
2990
Avi Kivityac1970f2012-10-03 16:22:53 +02002991/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002992 * Will also mark the memory as dirty if is_write == 1. access_len gives
2993 * the amount of memory that was actually read or written by the caller.
2994 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002995void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2996 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002997{
2998 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002999 MemoryRegion *mr;
3000 ram_addr_t addr1;
3001
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01003002 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003003 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003004 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003005 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003006 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003007 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003008 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003009 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003010 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003011 return;
3012 }
3013 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003014 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3015 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003016 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003017 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003018 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003019 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003020 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003021 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003022}
bellardd0ecd2a2006-04-23 17:14:48 +00003023
Avi Kivitya8170e52012-10-23 12:30:10 +02003024void *cpu_physical_memory_map(hwaddr addr,
3025 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003026 int is_write)
3027{
3028 return address_space_map(&address_space_memory, addr, plen, is_write);
3029}
3030
Avi Kivitya8170e52012-10-23 12:30:10 +02003031void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3032 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003033{
3034 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3035}
3036
bellard8df1cd02005-01-28 22:37:22 +00003037/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003038static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3039 MemTxAttrs attrs,
3040 MemTxResult *result,
3041 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003042{
bellard8df1cd02005-01-28 22:37:22 +00003043 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003044 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003045 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003046 hwaddr l = 4;
3047 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003048 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003049 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003050
Paolo Bonzini41063e12015-03-18 14:21:43 +01003051 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003052 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003053 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003054 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003055
bellard8df1cd02005-01-28 22:37:22 +00003056 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003057 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003058#if defined(TARGET_WORDS_BIGENDIAN)
3059 if (endian == DEVICE_LITTLE_ENDIAN) {
3060 val = bswap32(val);
3061 }
3062#else
3063 if (endian == DEVICE_BIG_ENDIAN) {
3064 val = bswap32(val);
3065 }
3066#endif
bellard8df1cd02005-01-28 22:37:22 +00003067 } else {
3068 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003069 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003070 switch (endian) {
3071 case DEVICE_LITTLE_ENDIAN:
3072 val = ldl_le_p(ptr);
3073 break;
3074 case DEVICE_BIG_ENDIAN:
3075 val = ldl_be_p(ptr);
3076 break;
3077 default:
3078 val = ldl_p(ptr);
3079 break;
3080 }
Peter Maydell50013112015-04-26 16:49:24 +01003081 r = MEMTX_OK;
3082 }
3083 if (result) {
3084 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003085 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003086 if (release_lock) {
3087 qemu_mutex_unlock_iothread();
3088 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003089 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003090 return val;
3091}
3092
Peter Maydell50013112015-04-26 16:49:24 +01003093uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3094 MemTxAttrs attrs, MemTxResult *result)
3095{
3096 return address_space_ldl_internal(as, addr, attrs, result,
3097 DEVICE_NATIVE_ENDIAN);
3098}
3099
3100uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3101 MemTxAttrs attrs, MemTxResult *result)
3102{
3103 return address_space_ldl_internal(as, addr, attrs, result,
3104 DEVICE_LITTLE_ENDIAN);
3105}
3106
3107uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3108 MemTxAttrs attrs, MemTxResult *result)
3109{
3110 return address_space_ldl_internal(as, addr, attrs, result,
3111 DEVICE_BIG_ENDIAN);
3112}
3113
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003114uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003115{
Peter Maydell50013112015-04-26 16:49:24 +01003116 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003117}
3118
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003119uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003120{
Peter Maydell50013112015-04-26 16:49:24 +01003121 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003122}
3123
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003124uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003125{
Peter Maydell50013112015-04-26 16:49:24 +01003126 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003127}
3128
bellard84b7b8e2005-11-28 21:19:04 +00003129/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003130static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3131 MemTxAttrs attrs,
3132 MemTxResult *result,
3133 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003134{
bellard84b7b8e2005-11-28 21:19:04 +00003135 uint8_t *ptr;
3136 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003137 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003138 hwaddr l = 8;
3139 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003140 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003141 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003142
Paolo Bonzini41063e12015-03-18 14:21:43 +01003143 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003144 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003145 false);
3146 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003147 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003148
bellard84b7b8e2005-11-28 21:19:04 +00003149 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003150 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003151#if defined(TARGET_WORDS_BIGENDIAN)
3152 if (endian == DEVICE_LITTLE_ENDIAN) {
3153 val = bswap64(val);
3154 }
3155#else
3156 if (endian == DEVICE_BIG_ENDIAN) {
3157 val = bswap64(val);
3158 }
3159#endif
bellard84b7b8e2005-11-28 21:19:04 +00003160 } else {
3161 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003162 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003163 switch (endian) {
3164 case DEVICE_LITTLE_ENDIAN:
3165 val = ldq_le_p(ptr);
3166 break;
3167 case DEVICE_BIG_ENDIAN:
3168 val = ldq_be_p(ptr);
3169 break;
3170 default:
3171 val = ldq_p(ptr);
3172 break;
3173 }
Peter Maydell50013112015-04-26 16:49:24 +01003174 r = MEMTX_OK;
3175 }
3176 if (result) {
3177 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003178 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003179 if (release_lock) {
3180 qemu_mutex_unlock_iothread();
3181 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003182 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003183 return val;
3184}
3185
Peter Maydell50013112015-04-26 16:49:24 +01003186uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3187 MemTxAttrs attrs, MemTxResult *result)
3188{
3189 return address_space_ldq_internal(as, addr, attrs, result,
3190 DEVICE_NATIVE_ENDIAN);
3191}
3192
3193uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3194 MemTxAttrs attrs, MemTxResult *result)
3195{
3196 return address_space_ldq_internal(as, addr, attrs, result,
3197 DEVICE_LITTLE_ENDIAN);
3198}
3199
3200uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3201 MemTxAttrs attrs, MemTxResult *result)
3202{
3203 return address_space_ldq_internal(as, addr, attrs, result,
3204 DEVICE_BIG_ENDIAN);
3205}
3206
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003207uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003208{
Peter Maydell50013112015-04-26 16:49:24 +01003209 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003210}
3211
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003212uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003213{
Peter Maydell50013112015-04-26 16:49:24 +01003214 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003215}
3216
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003217uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003218{
Peter Maydell50013112015-04-26 16:49:24 +01003219 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003220}
3221
bellardaab33092005-10-30 20:48:42 +00003222/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003223uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3224 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003225{
3226 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003227 MemTxResult r;
3228
3229 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3230 if (result) {
3231 *result = r;
3232 }
bellardaab33092005-10-30 20:48:42 +00003233 return val;
3234}
3235
Peter Maydell50013112015-04-26 16:49:24 +01003236uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3237{
3238 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3239}
3240
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003241/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003242static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3243 hwaddr addr,
3244 MemTxAttrs attrs,
3245 MemTxResult *result,
3246 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003247{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003248 uint8_t *ptr;
3249 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003250 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003251 hwaddr l = 2;
3252 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003253 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003254 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003255
Paolo Bonzini41063e12015-03-18 14:21:43 +01003256 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003257 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003258 false);
3259 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003260 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003261
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003262 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003263 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003264#if defined(TARGET_WORDS_BIGENDIAN)
3265 if (endian == DEVICE_LITTLE_ENDIAN) {
3266 val = bswap16(val);
3267 }
3268#else
3269 if (endian == DEVICE_BIG_ENDIAN) {
3270 val = bswap16(val);
3271 }
3272#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003273 } else {
3274 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003275 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003276 switch (endian) {
3277 case DEVICE_LITTLE_ENDIAN:
3278 val = lduw_le_p(ptr);
3279 break;
3280 case DEVICE_BIG_ENDIAN:
3281 val = lduw_be_p(ptr);
3282 break;
3283 default:
3284 val = lduw_p(ptr);
3285 break;
3286 }
Peter Maydell50013112015-04-26 16:49:24 +01003287 r = MEMTX_OK;
3288 }
3289 if (result) {
3290 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003291 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003292 if (release_lock) {
3293 qemu_mutex_unlock_iothread();
3294 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003295 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003296 return val;
bellardaab33092005-10-30 20:48:42 +00003297}
3298
Peter Maydell50013112015-04-26 16:49:24 +01003299uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3300 MemTxAttrs attrs, MemTxResult *result)
3301{
3302 return address_space_lduw_internal(as, addr, attrs, result,
3303 DEVICE_NATIVE_ENDIAN);
3304}
3305
3306uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3307 MemTxAttrs attrs, MemTxResult *result)
3308{
3309 return address_space_lduw_internal(as, addr, attrs, result,
3310 DEVICE_LITTLE_ENDIAN);
3311}
3312
3313uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3314 MemTxAttrs attrs, MemTxResult *result)
3315{
3316 return address_space_lduw_internal(as, addr, attrs, result,
3317 DEVICE_BIG_ENDIAN);
3318}
3319
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003320uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003321{
Peter Maydell50013112015-04-26 16:49:24 +01003322 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003323}
3324
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003325uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003326{
Peter Maydell50013112015-04-26 16:49:24 +01003327 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003328}
3329
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003330uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003331{
Peter Maydell50013112015-04-26 16:49:24 +01003332 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003333}
3334
bellard8df1cd02005-01-28 22:37:22 +00003335/* warning: addr must be aligned. The ram page is not masked as dirty
3336 and the code inside is not invalidated. It is useful if the dirty
3337 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003338void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3339 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003340{
bellard8df1cd02005-01-28 22:37:22 +00003341 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003342 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003343 hwaddr l = 4;
3344 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003345 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003346 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003347 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003348
Paolo Bonzini41063e12015-03-18 14:21:43 +01003349 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003350 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003351 true);
3352 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003353 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003354
Peter Maydell50013112015-04-26 16:49:24 +01003355 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003356 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003357 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003358 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003359
Paolo Bonzini845b6212015-03-23 11:45:53 +01003360 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3361 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003362 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3363 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003364 r = MEMTX_OK;
3365 }
3366 if (result) {
3367 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003368 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003369 if (release_lock) {
3370 qemu_mutex_unlock_iothread();
3371 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003372 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003373}
3374
Peter Maydell50013112015-04-26 16:49:24 +01003375void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3376{
3377 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3378}
3379
bellard8df1cd02005-01-28 22:37:22 +00003380/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003381static inline void address_space_stl_internal(AddressSpace *as,
3382 hwaddr addr, uint32_t val,
3383 MemTxAttrs attrs,
3384 MemTxResult *result,
3385 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003386{
bellard8df1cd02005-01-28 22:37:22 +00003387 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003388 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003389 hwaddr l = 4;
3390 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003391 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003392 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003393
Paolo Bonzini41063e12015-03-18 14:21:43 +01003394 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003395 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003396 true);
3397 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003398 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003399
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003400#if defined(TARGET_WORDS_BIGENDIAN)
3401 if (endian == DEVICE_LITTLE_ENDIAN) {
3402 val = bswap32(val);
3403 }
3404#else
3405 if (endian == DEVICE_BIG_ENDIAN) {
3406 val = bswap32(val);
3407 }
3408#endif
Peter Maydell50013112015-04-26 16:49:24 +01003409 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003410 } else {
bellard8df1cd02005-01-28 22:37:22 +00003411 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003412 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413 switch (endian) {
3414 case DEVICE_LITTLE_ENDIAN:
3415 stl_le_p(ptr, val);
3416 break;
3417 case DEVICE_BIG_ENDIAN:
3418 stl_be_p(ptr, val);
3419 break;
3420 default:
3421 stl_p(ptr, val);
3422 break;
3423 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003424 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003425 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003426 }
Peter Maydell50013112015-04-26 16:49:24 +01003427 if (result) {
3428 *result = r;
3429 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003430 if (release_lock) {
3431 qemu_mutex_unlock_iothread();
3432 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003433 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003434}
3435
3436void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3437 MemTxAttrs attrs, MemTxResult *result)
3438{
3439 address_space_stl_internal(as, addr, val, attrs, result,
3440 DEVICE_NATIVE_ENDIAN);
3441}
3442
3443void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3444 MemTxAttrs attrs, MemTxResult *result)
3445{
3446 address_space_stl_internal(as, addr, val, attrs, result,
3447 DEVICE_LITTLE_ENDIAN);
3448}
3449
3450void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3451 MemTxAttrs attrs, MemTxResult *result)
3452{
3453 address_space_stl_internal(as, addr, val, attrs, result,
3454 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003455}
3456
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003457void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003458{
Peter Maydell50013112015-04-26 16:49:24 +01003459 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003460}
3461
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003462void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003463{
Peter Maydell50013112015-04-26 16:49:24 +01003464 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003465}
3466
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003467void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003468{
Peter Maydell50013112015-04-26 16:49:24 +01003469 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003470}
3471
bellardaab33092005-10-30 20:48:42 +00003472/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003473void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3474 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003475{
3476 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003477 MemTxResult r;
3478
3479 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3480 if (result) {
3481 *result = r;
3482 }
3483}
3484
3485void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3486{
3487 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003488}
3489
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003490/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003491static inline void address_space_stw_internal(AddressSpace *as,
3492 hwaddr addr, uint32_t val,
3493 MemTxAttrs attrs,
3494 MemTxResult *result,
3495 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003496{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003497 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003498 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003499 hwaddr l = 2;
3500 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003501 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003502 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003503
Paolo Bonzini41063e12015-03-18 14:21:43 +01003504 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003505 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003506 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003507 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003508
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003509#if defined(TARGET_WORDS_BIGENDIAN)
3510 if (endian == DEVICE_LITTLE_ENDIAN) {
3511 val = bswap16(val);
3512 }
3513#else
3514 if (endian == DEVICE_BIG_ENDIAN) {
3515 val = bswap16(val);
3516 }
3517#endif
Peter Maydell50013112015-04-26 16:49:24 +01003518 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003519 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003520 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003521 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003522 switch (endian) {
3523 case DEVICE_LITTLE_ENDIAN:
3524 stw_le_p(ptr, val);
3525 break;
3526 case DEVICE_BIG_ENDIAN:
3527 stw_be_p(ptr, val);
3528 break;
3529 default:
3530 stw_p(ptr, val);
3531 break;
3532 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003533 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003534 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003535 }
Peter Maydell50013112015-04-26 16:49:24 +01003536 if (result) {
3537 *result = r;
3538 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003539 if (release_lock) {
3540 qemu_mutex_unlock_iothread();
3541 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003542 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003543}
3544
3545void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3546 MemTxAttrs attrs, MemTxResult *result)
3547{
3548 address_space_stw_internal(as, addr, val, attrs, result,
3549 DEVICE_NATIVE_ENDIAN);
3550}
3551
3552void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3553 MemTxAttrs attrs, MemTxResult *result)
3554{
3555 address_space_stw_internal(as, addr, val, attrs, result,
3556 DEVICE_LITTLE_ENDIAN);
3557}
3558
3559void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3560 MemTxAttrs attrs, MemTxResult *result)
3561{
3562 address_space_stw_internal(as, addr, val, attrs, result,
3563 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003564}
3565
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003566void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003567{
Peter Maydell50013112015-04-26 16:49:24 +01003568 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003569}
3570
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003571void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003572{
Peter Maydell50013112015-04-26 16:49:24 +01003573 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003574}
3575
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003576void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003577{
Peter Maydell50013112015-04-26 16:49:24 +01003578 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003579}
3580
bellardaab33092005-10-30 20:48:42 +00003581/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003582void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3583 MemTxAttrs attrs, MemTxResult *result)
3584{
3585 MemTxResult r;
3586 val = tswap64(val);
3587 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3588 if (result) {
3589 *result = r;
3590 }
3591}
3592
3593void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3594 MemTxAttrs attrs, MemTxResult *result)
3595{
3596 MemTxResult r;
3597 val = cpu_to_le64(val);
3598 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3599 if (result) {
3600 *result = r;
3601 }
3602}
3603void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3604 MemTxAttrs attrs, MemTxResult *result)
3605{
3606 MemTxResult r;
3607 val = cpu_to_be64(val);
3608 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3609 if (result) {
3610 *result = r;
3611 }
3612}
3613
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003614void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003615{
Peter Maydell50013112015-04-26 16:49:24 +01003616 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003617}
3618
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003619void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003620{
Peter Maydell50013112015-04-26 16:49:24 +01003621 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003622}
3623
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003624void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003625{
Peter Maydell50013112015-04-26 16:49:24 +01003626 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003627}
3628
aliguori5e2972f2009-03-28 17:51:36 +00003629/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003630int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003631 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003632{
3633 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003634 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003635 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003636
3637 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003638 int asidx;
3639 MemTxAttrs attrs;
3640
bellard13eb76e2004-01-24 15:23:36 +00003641 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003642 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3643 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003644 /* if no physical page mapped, return an error */
3645 if (phys_addr == -1)
3646 return -1;
3647 l = (page + TARGET_PAGE_SIZE) - addr;
3648 if (l > len)
3649 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003650 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003651 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003652 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3653 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003654 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003655 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3656 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003657 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003658 }
bellard13eb76e2004-01-24 15:23:36 +00003659 len -= l;
3660 buf += l;
3661 addr += l;
3662 }
3663 return 0;
3664}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003665
3666/*
3667 * Allows code that needs to deal with migration bitmaps etc to still be built
3668 * target independent.
3669 */
3670size_t qemu_target_page_bits(void)
3671{
3672 return TARGET_PAGE_BITS;
3673}
3674
Paul Brooka68fe892010-03-01 00:08:59 +00003675#endif
bellard13eb76e2004-01-24 15:23:36 +00003676
Blue Swirl8e4a4242013-01-06 18:30:17 +00003677/*
3678 * A helper function for the _utterly broken_ virtio device model to find out if
3679 * it's running on a big endian machine. Don't do this at home kids!
3680 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003681bool target_words_bigendian(void);
3682bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003683{
3684#if defined(TARGET_WORDS_BIGENDIAN)
3685 return true;
3686#else
3687 return false;
3688#endif
3689}
3690
Wen Congyang76f35532012-05-07 12:04:18 +08003691#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003692bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003693{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003694 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003695 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003696 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003697
Paolo Bonzini41063e12015-03-18 14:21:43 +01003698 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003699 mr = address_space_translate(&address_space_memory,
3700 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003701
Paolo Bonzini41063e12015-03-18 14:21:43 +01003702 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3703 rcu_read_unlock();
3704 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003705}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003706
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003707int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003708{
3709 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003710 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003711
Mike Day0dc3f442013-09-05 14:41:35 -04003712 rcu_read_lock();
3713 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003714 ret = func(block->idstr, block->host, block->offset,
3715 block->used_length, opaque);
3716 if (ret) {
3717 break;
3718 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003719 }
Mike Day0dc3f442013-09-05 14:41:35 -04003720 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003721 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003722}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003723#endif