blob: 3212acc7f49336807226bd69d0eb3cea33e0cb78 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070022#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010023#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030024#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040025#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000026#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030027
Paolo Bonzini022c62c2012-12-17 18:19:49 +010028#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020029#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030030#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080031#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020032#include "hw/misc/mmio_interface.h"
33#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010034#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020035
Paolo Bonzinid1970632013-05-24 13:23:38 +020036//#define DEBUG_UNASSIGNED
37
Jan Kiszka22bde712012-11-05 16:45:56 +010038static unsigned memory_region_transaction_depth;
39static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080040static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020041static bool global_dirty_log = false;
42
Avi Kivity72e22d22012-02-08 15:05:50 +020043static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030045
Avi Kivity0d673e32012-10-02 15:28:50 +020046static QTAILQ_HEAD(, AddressSpace) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces);
48
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100049static GHashTable *flat_views;
50
Avi Kivity093bc2c2011-07-26 14:26:01 +030051typedef struct AddrRange AddrRange;
52
Avi Kivity8417ceb2011-08-03 11:56:14 +030053/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080054 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030055 * (large MemoryRegion::alias_offset).
56 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030057struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020058 Int128 start;
59 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030060};
61
Avi Kivity08dafab2011-10-16 13:19:17 +020062static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030063{
64 return (AddrRange) { start, size };
65}
66
67static bool addrrange_equal(AddrRange r1, AddrRange r2)
68{
Avi Kivity08dafab2011-10-16 13:19:17 +020069 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030070}
71
Avi Kivity08dafab2011-10-16 13:19:17 +020072static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030073{
Avi Kivity08dafab2011-10-16 13:19:17 +020074 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030075}
76
Avi Kivity08dafab2011-10-16 13:19:17 +020077static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030078{
Avi Kivity08dafab2011-10-16 13:19:17 +020079 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030080 return range;
81}
82
Avi Kivity08dafab2011-10-16 13:19:17 +020083static bool addrrange_contains(AddrRange range, Int128 addr)
84{
85 return int128_ge(addr, range.start)
86 && int128_lt(addr, addrrange_end(range));
87}
88
Avi Kivity093bc2c2011-07-26 14:26:01 +030089static bool addrrange_intersects(AddrRange r1, AddrRange r2)
90{
Avi Kivity08dafab2011-10-16 13:19:17 +020091 return addrrange_contains(r1, r2.start)
92 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030093}
94
95static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
96{
Avi Kivity08dafab2011-10-16 13:19:17 +020097 Int128 start = int128_max(r1.start, r2.start);
98 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
99 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300100}
101
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200102enum ListenerDirection { Forward, Reverse };
103
Avi Kivity7376e582012-02-08 21:05:17 +0200104#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200105 do { \
106 MemoryListener *_listener; \
107 \
108 switch (_direction) { \
109 case Forward: \
110 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200111 if (_listener->_callback) { \
112 _listener->_callback(_listener, ##_args); \
113 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200114 } \
115 break; \
116 case Reverse: \
117 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
118 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200119 if (_listener->_callback) { \
120 _listener->_callback(_listener, ##_args); \
121 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200122 } \
123 break; \
124 default: \
125 abort(); \
126 } \
127 } while (0)
128
Paolo Bonzini9a546352016-09-22 16:23:06 +0200129#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200130 do { \
131 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200132 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200133 \
134 switch (_direction) { \
135 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200136 QTAILQ_FOREACH(_listener, list, link_as) { \
137 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200143 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
144 link_as) { \
145 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200146 _listener->_callback(_listener, _section, ##_args); \
147 } \
148 } \
149 break; \
150 default: \
151 abort(); \
152 } \
153 } while (0)
154
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200155/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200156#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200157 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000158 MemoryRegionSection mrs = section_from_flat_range(fr, \
159 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200160 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200161 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200162
Avi Kivity093bc2c2011-07-26 14:26:01 +0300163struct CoalescedMemoryRange {
164 AddrRange addr;
165 QTAILQ_ENTRY(CoalescedMemoryRange) link;
166};
167
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300168struct MemoryRegionIoeventfd {
169 AddrRange addr;
170 bool match_data;
171 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200172 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300173};
174
Tristan Burgess73bb7532018-05-28 23:04:45 -0400175static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
176 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300177{
Tristan Burgess73bb7532018-05-28 23:04:45 -0400178 if (int128_lt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300179 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400180 } else if (int128_gt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300181 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400182 } else if (int128_lt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300183 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400184 } else if (int128_gt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300185 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400186 } else if (a->match_data < b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300187 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400188 } else if (a->match_data > b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300189 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400190 } else if (a->match_data) {
191 if (a->data < b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300192 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400193 } else if (a->data > b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300194 return false;
195 }
196 }
Tristan Burgess73bb7532018-05-28 23:04:45 -0400197 if (a->e < b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300198 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400199 } else if (a->e > b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300200 return false;
201 }
202 return false;
203}
204
Tristan Burgess73bb7532018-05-28 23:04:45 -0400205static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
206 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300207{
208 return !memory_region_ioeventfd_before(a, b)
209 && !memory_region_ioeventfd_before(b, a);
210}
211
Avi Kivity093bc2c2011-07-26 14:26:01 +0300212/* Range of memory in the global map. Addresses are absolute. */
213struct FlatRange {
214 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200215 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300216 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300217 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200218 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300219 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300220};
221
Avi Kivity093bc2c2011-07-26 14:26:01 +0300222#define FOR_EACH_FLAT_RANGE(var, view) \
223 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200225static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000226section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200227{
228 return (MemoryRegionSection) {
229 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000230 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200231 .offset_within_region = fr->offset_in_region,
232 .size = fr->addr.size,
233 .offset_within_address_space = int128_get64(fr->addr.start),
234 .readonly = fr->readonly,
235 };
236}
237
Avi Kivity093bc2c2011-07-26 14:26:01 +0300238static bool flatrange_equal(FlatRange *a, FlatRange *b)
239{
240 return a->mr == b->mr
241 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300242 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200243 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300244 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300245}
246
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000247static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300248{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000249 FlatView *view;
250
251 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200252 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000253 view->root = mr_root;
254 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200255 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000256
257 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300258}
259
260/* Insert a range into a given position. Caller is responsible for maintaining
261 * sorting order.
262 */
263static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
264{
265 if (view->nr == view->nr_allocated) {
266 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500267 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300268 view->nr_allocated * sizeof(*view->ranges));
269 }
270 memmove(view->ranges + pos + 1, view->ranges + pos,
271 (view->nr - pos) * sizeof(FlatRange));
272 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200273 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300274 ++view->nr;
275}
276
277static void flatview_destroy(FlatView *view)
278{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200279 int i;
280
Paolo Bonzini02d96512017-09-21 12:34:00 +0200281 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000282 if (view->dispatch) {
283 address_space_dispatch_free(view->dispatch);
284 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200285 for (i = 0; i < view->nr; i++) {
286 memory_region_unref(view->ranges[i].mr);
287 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500288 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000289 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200290 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300291}
292
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200293static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200294{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200295 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200296}
297
Paolo Bonzini48564042018-03-18 18:26:36 +0100298void flatview_unref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200299{
300 if (atomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200301 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000302 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000303 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200304 }
305}
306
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300307static bool can_merge(FlatRange *r1, FlatRange *r2)
308{
Avi Kivity08dafab2011-10-16 13:19:17 +0200309 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300310 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200311 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
312 r1->addr.size),
313 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300314 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200315 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300316 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300317}
318
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000319/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300320static void flatview_simplify(FlatView *view)
321{
322 unsigned i, j;
323
324 i = 0;
325 while (i < view->nr) {
326 j = i + 1;
327 while (j < view->nr
328 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200329 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300330 ++j;
331 }
332 ++i;
333 memmove(&view->ranges[i], &view->ranges[j],
334 (view->nr - j) * sizeof(view->ranges[j]));
335 view->nr -= j - i;
336 }
337}
338
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200339static bool memory_region_big_endian(MemoryRegion *mr)
340{
341#ifdef TARGET_WORDS_BIGENDIAN
342 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
343#else
344 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
345#endif
346}
347
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200348static bool memory_region_wrong_endianness(MemoryRegion *mr)
349{
350#ifdef TARGET_WORDS_BIGENDIAN
351 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
352#else
353 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
354#endif
355}
356
357static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
358{
359 if (memory_region_wrong_endianness(mr)) {
360 switch (size) {
361 case 1:
362 break;
363 case 2:
364 *data = bswap16(*data);
365 break;
366 case 4:
367 *data = bswap32(*data);
368 break;
369 case 8:
370 *data = bswap64(*data);
371 break;
372 default:
373 abort();
374 }
375 }
376}
377
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800378static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
379{
380 MemoryRegion *root;
381 hwaddr abs_addr = offset;
382
383 abs_addr += mr->addr;
384 for (root = mr; root->container; ) {
385 root = root->container;
386 abs_addr += root->addr;
387 }
388
389 return abs_addr;
390}
391
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800392static int get_cpu_index(void)
393{
394 if (current_cpu) {
395 return current_cpu->cpu_index;
396 }
397 return -1;
398}
399
Peter Maydellcc05c432015-04-26 16:49:23 +0100400static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
401 hwaddr addr,
402 uint64_t *value,
403 unsigned size,
404 unsigned shift,
405 uint64_t mask,
406 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200407{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200408 uint64_t tmp;
409
410 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800411 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800412 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800413 } else if (mr == &io_mem_notdirty) {
414 /* Accesses to code which has previously been translated into a TB show
415 * up in the MMIO path, as accesses to the io_mem_notdirty
416 * MemoryRegion. */
417 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800418 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
419 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800420 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800421 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200422 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100423 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200424}
425
Peter Maydellcc05c432015-04-26 16:49:23 +0100426static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
427 hwaddr addr,
428 uint64_t *value,
429 unsigned size,
430 unsigned shift,
431 uint64_t mask,
432 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300433{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300434 uint64_t tmp;
435
436 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800437 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800438 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800439 } else if (mr == &io_mem_notdirty) {
440 /* Accesses to code which has previously been translated into a TB show
441 * up in the MMIO path, as accesses to the io_mem_notdirty
442 * MemoryRegion. */
443 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800444 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
445 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800446 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800447 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300448 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100449 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300450}
451
Peter Maydellcc05c432015-04-26 16:49:23 +0100452static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
453 hwaddr addr,
454 uint64_t *value,
455 unsigned size,
456 unsigned shift,
457 uint64_t mask,
458 MemTxAttrs attrs)
459{
460 uint64_t tmp = 0;
461 MemTxResult r;
462
Peter Maydellcc05c432015-04-26 16:49:23 +0100463 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800464 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800465 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800466 } else if (mr == &io_mem_notdirty) {
467 /* Accesses to code which has previously been translated into a TB show
468 * up in the MMIO path, as accesses to the io_mem_notdirty
469 * MemoryRegion. */
470 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800471 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800474 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100475 *value |= (tmp & mask) << shift;
476 return r;
477}
478
479static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
480 hwaddr addr,
481 uint64_t *value,
482 unsigned size,
483 unsigned shift,
484 uint64_t mask,
485 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200486{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200487 uint64_t tmp;
488
489 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800490 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800491 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800492 } else if (mr == &io_mem_notdirty) {
493 /* Accesses to code which has previously been translated into a TB show
494 * up in the MMIO path, as accesses to the io_mem_notdirty
495 * MemoryRegion. */
496 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800497 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
498 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800499 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800500 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200501 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100502 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200503}
504
Peter Maydellcc05c432015-04-26 16:49:23 +0100505static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
506 hwaddr addr,
507 uint64_t *value,
508 unsigned size,
509 unsigned shift,
510 uint64_t mask,
511 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300512{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300513 uint64_t tmp;
514
515 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800516 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800517 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800518 } else if (mr == &io_mem_notdirty) {
519 /* Accesses to code which has previously been translated into a TB show
520 * up in the MMIO path, as accesses to the io_mem_notdirty
521 * MemoryRegion. */
522 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800523 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
524 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800525 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800526 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300527 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100528 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300529}
530
Peter Maydellcc05c432015-04-26 16:49:23 +0100531static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
532 hwaddr addr,
533 uint64_t *value,
534 unsigned size,
535 unsigned shift,
536 uint64_t mask,
537 MemTxAttrs attrs)
538{
539 uint64_t tmp;
540
Peter Maydellcc05c432015-04-26 16:49:23 +0100541 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800542 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800543 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800544 } else if (mr == &io_mem_notdirty) {
545 /* Accesses to code which has previously been translated into a TB show
546 * up in the MMIO path, as accesses to the io_mem_notdirty
547 * MemoryRegion. */
548 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800549 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
550 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800551 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800552 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100553 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
554}
555
556static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300557 uint64_t *value,
558 unsigned size,
559 unsigned access_size_min,
560 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200561 MemTxResult (*access_fn)
562 (MemoryRegion *mr,
563 hwaddr addr,
564 uint64_t *value,
565 unsigned size,
566 unsigned shift,
567 uint64_t mask,
568 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100569 MemoryRegion *mr,
570 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300571{
572 uint64_t access_mask;
573 unsigned access_size;
574 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100575 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300576
577 if (!access_size_min) {
578 access_size_min = 1;
579 }
580 if (!access_size_max) {
581 access_size_max = 4;
582 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200583
584 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300585 access_size = MAX(MIN(size, access_size_max), access_size_min);
586 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200587 if (memory_region_big_endian(mr)) {
588 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200589 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100590 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200591 }
592 } else {
593 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200594 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100595 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200596 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300597 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100598 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300599}
600
Avi Kivitye2177952011-12-08 15:00:18 +0200601static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
602{
Avi Kivity0d673e32012-10-02 15:28:50 +0200603 AddressSpace *as;
604
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200605 while (mr->container) {
606 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200607 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200608 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
609 if (mr == as->root) {
610 return as;
611 }
Avi Kivitye2177952011-12-08 15:00:18 +0200612 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200613 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200614}
615
Avi Kivity093bc2c2011-07-26 14:26:01 +0300616/* Render a memory region into the global view. Ranges in @view obscure
617 * ranges in @mr.
618 */
619static void render_memory_region(FlatView *view,
620 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200621 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300622 AddrRange clip,
623 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300624{
625 MemoryRegion *subregion;
626 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200627 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200628 Int128 remain;
629 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300630 FlatRange fr;
631 AddrRange tmp;
632
Avi Kivity6bba19b2011-09-14 11:54:58 +0300633 if (!mr->enabled) {
634 return;
635 }
636
Avi Kivity08dafab2011-10-16 13:19:17 +0200637 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300638 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300639
640 tmp = addrrange_make(base, mr->size);
641
642 if (!addrrange_intersects(tmp, clip)) {
643 return;
644 }
645
646 clip = addrrange_intersection(tmp, clip);
647
648 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200649 int128_subfrom(&base, int128_make64(mr->alias->addr));
650 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300651 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300652 return;
653 }
654
655 /* Render subregions in priority order. */
656 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300657 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300658 }
659
Avi Kivity14a3c102011-07-26 14:26:06 +0300660 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300661 return;
662 }
663
Avi Kivity08dafab2011-10-16 13:19:17 +0200664 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300665 base = clip.start;
666 remain = clip.size;
667
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000668 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100669 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200670 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000671 fr.readonly = readonly;
672
Avi Kivity093bc2c2011-07-26 14:26:01 +0300673 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200674 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
675 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300676 continue;
677 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200678 if (int128_lt(base, view->ranges[i].addr.start)) {
679 now = int128_min(remain,
680 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300681 fr.offset_in_region = offset_in_region;
682 fr.addr = addrrange_make(base, now);
683 flatview_insert(view, i, &fr);
684 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200685 int128_addto(&base, now);
686 offset_in_region += int128_get64(now);
687 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300688 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200689 now = int128_sub(int128_min(int128_add(base, remain),
690 addrrange_end(view->ranges[i].addr)),
691 base);
692 int128_addto(&base, now);
693 offset_in_region += int128_get64(now);
694 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300695 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200696 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300697 fr.offset_in_region = offset_in_region;
698 fr.addr = addrrange_make(base, remain);
699 flatview_insert(view, i, &fr);
700 }
701}
702
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000703static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
704{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200705 while (mr->enabled) {
706 if (mr->alias) {
707 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
708 /* The alias is included in its entirety. Use it as
709 * the "real" root, so that we can share more FlatViews.
710 */
711 mr = mr->alias;
712 continue;
713 }
714 } else if (!mr->terminates) {
715 unsigned int found = 0;
716 MemoryRegion *child, *next = NULL;
717 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
718 if (child->enabled) {
719 if (++found > 1) {
720 next = NULL;
721 break;
722 }
723 if (!child->addr && int128_ge(mr->size, child->size)) {
724 /* A child is included in its entirety. If it's the only
725 * enabled one, use it in the hope of finding an alias down the
726 * way. This will also let us share FlatViews.
727 */
728 next = child;
729 }
730 }
731 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000732 if (found == 0) {
733 return NULL;
734 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200735 if (next) {
736 mr = next;
737 continue;
738 }
739 }
740
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000741 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000742 }
743
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000744 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000745}
746
Avi Kivity093bc2c2011-07-26 14:26:01 +0300747/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200748static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300749{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000750 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200751 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300752
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000753 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300754
Avi Kivity83f3c252012-10-07 12:59:55 +0200755 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200756 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200757 addrrange_make(int128_zero(), int128_2_64()), false);
758 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200759 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300760
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000761 view->dispatch = address_space_dispatch_new(view);
762 for (i = 0; i < view->nr; i++) {
763 MemoryRegionSection mrs =
764 section_from_flat_range(&view->ranges[i], view);
765 flatview_add_to_dispatch(view, &mrs);
766 }
767 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000768 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000769
Avi Kivity093bc2c2011-07-26 14:26:01 +0300770 return view;
771}
772
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300773static void address_space_add_del_ioeventfds(AddressSpace *as,
774 MemoryRegionIoeventfd *fds_new,
775 unsigned fds_new_nb,
776 MemoryRegionIoeventfd *fds_old,
777 unsigned fds_old_nb)
778{
779 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200780 MemoryRegionIoeventfd *fd;
781 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300782
783 /* Generate a symmetric difference of the old and new fd sets, adding
784 * and deleting as necessary.
785 */
786
787 iold = inew = 0;
788 while (iold < fds_old_nb || inew < fds_new_nb) {
789 if (iold < fds_old_nb
790 && (inew == fds_new_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400791 || memory_region_ioeventfd_before(&fds_old[iold],
792 &fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200793 fd = &fds_old[iold];
794 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000795 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200796 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200797 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200798 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200799 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200800 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300801 ++iold;
802 } else if (inew < fds_new_nb
803 && (iold == fds_old_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400804 || memory_region_ioeventfd_before(&fds_new[inew],
805 &fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200806 fd = &fds_new[inew];
807 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000808 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200809 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200810 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200811 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200812 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200813 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300814 ++inew;
815 } else {
816 ++iold;
817 ++inew;
818 }
819 }
820}
821
Paolo Bonzini48564042018-03-18 18:26:36 +0100822FlatView *address_space_get_flatview(AddressSpace *as)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200823{
824 FlatView *view;
825
Paolo Bonzini374f2982013-05-17 12:37:03 +0200826 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200827 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000828 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200829 /* If somebody has replaced as->current_map concurrently,
830 * flatview_ref returns false.
831 */
832 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200833 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200834 return view;
835}
836
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300837static void address_space_update_ioeventfds(AddressSpace *as)
838{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200839 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300840 FlatRange *fr;
841 unsigned ioeventfd_nb = 0;
842 MemoryRegionIoeventfd *ioeventfds = NULL;
843 AddrRange tmp;
844 unsigned i;
845
Paolo Bonzini856d7242013-05-06 11:57:21 +0200846 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200847 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300848 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
849 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200850 int128_sub(fr->addr.start,
851 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300852 if (addrrange_intersects(fr->addr, tmp)) {
853 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500854 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300855 ioeventfd_nb * sizeof(*ioeventfds));
856 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
857 ioeventfds[ioeventfd_nb-1].addr = tmp;
858 }
859 }
860 }
861
862 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
863 as->ioeventfds, as->ioeventfd_nb);
864
Anthony Liguori7267c092011-08-20 22:09:37 -0500865 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300866 as->ioeventfds = ioeventfds;
867 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200868 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300869}
870
Avi Kivityb8af1af2011-07-26 14:26:12 +0300871static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200872 const FlatView *old_view,
873 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300874 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300875{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300876 unsigned iold, inew;
877 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300878
879 /* Generate a symmetric difference of the old and new memory maps.
880 * Kill ranges in the old map, and instantiate ranges in the new map.
881 */
882 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200883 while (iold < old_view->nr || inew < new_view->nr) {
884 if (iold < old_view->nr) {
885 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300886 } else {
887 frold = NULL;
888 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200889 if (inew < new_view->nr) {
890 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300891 } else {
892 frnew = NULL;
893 }
894
895 if (frold
896 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200897 || int128_lt(frold->addr.start, frnew->addr.start)
898 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300899 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000900 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300901
Avi Kivityb8af1af2011-07-26 14:26:12 +0300902 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200903 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300904 }
905
Avi Kivity093bc2c2011-07-26 14:26:01 +0300906 ++iold;
907 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000908 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300909
Avi Kivityb8af1af2011-07-26 14:26:12 +0300910 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200911 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200912 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
913 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
914 frold->dirty_log_mask,
915 frnew->dirty_log_mask);
916 }
917 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
918 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
919 frold->dirty_log_mask,
920 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300921 }
Avi Kivity5a583342011-07-26 14:26:02 +0300922 }
923
Avi Kivity093bc2c2011-07-26 14:26:01 +0300924 ++iold;
925 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300926 } else {
927 /* In new */
928
Avi Kivityb8af1af2011-07-26 14:26:12 +0300929 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200930 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300931 }
932
Avi Kivity093bc2c2011-07-26 14:26:01 +0300933 ++inew;
934 }
935 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300936}
937
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000938static void flatviews_init(void)
939{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000940 static FlatView *empty_view;
941
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000942 if (flat_views) {
943 return;
944 }
945
946 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
947 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000948 if (!empty_view) {
949 empty_view = generate_memory_topology(NULL);
950 /* We keep it alive forever in the global variable. */
951 flatview_ref(empty_view);
952 } else {
953 g_hash_table_replace(flat_views, NULL, empty_view);
954 flatview_ref(empty_view);
955 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000956}
957
958static void flatviews_reset(void)
959{
960 AddressSpace *as;
961
962 if (flat_views) {
963 g_hash_table_unref(flat_views);
964 flat_views = NULL;
965 }
966 flatviews_init();
967
968 /* Render unique FVs */
969 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
970 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
971
972 if (g_hash_table_lookup(flat_views, physmr)) {
973 continue;
974 }
975
976 generate_memory_topology(physmr);
977 }
978}
979
980static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +0300981{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000982 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000983 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
984 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
985
986 assert(new_view);
987
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000988 if (old_view == new_view) {
989 return;
990 }
991
992 if (old_view) {
993 flatview_ref(old_view);
994 }
995
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000996 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000997
998 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000999 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1000
1001 if (!old_view2) {
1002 old_view2 = &tmpview;
1003 }
1004 address_space_update_topology_pass(as, old_view2, new_view, false);
1005 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001006 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001007
Paolo Bonzini374f2982013-05-17 12:37:03 +02001008 /* Writes are protected by the BQL. */
1009 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001010 if (old_view) {
1011 flatview_unref(old_view);
1012 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001013
1014 /* Note that all the old MemoryRegions are still alive up to this
1015 * point. This relieves most MemoryListeners from the need to
1016 * ref/unref the MemoryRegions they get---unless they use them
1017 * outside the iothread mutex, in which case precise reference
1018 * counting is necessary.
1019 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001020 if (old_view) {
1021 flatview_unref(old_view);
1022 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001023}
1024
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001025static void address_space_update_topology(AddressSpace *as)
1026{
1027 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1028
1029 flatviews_init();
1030 if (!g_hash_table_lookup(flat_views, physmr)) {
1031 generate_memory_topology(physmr);
1032 }
1033 address_space_set_flatview(as);
1034}
1035
Avi Kivity4ef4db82011-07-26 14:26:13 +03001036void memory_region_transaction_begin(void)
1037{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001038 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001039 ++memory_region_transaction_depth;
1040}
1041
1042void memory_region_transaction_commit(void)
1043{
Avi Kivity0d673e32012-10-02 15:28:50 +02001044 AddressSpace *as;
1045
Avi Kivity4ef4db82011-07-26 14:26:13 +03001046 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001047 assert(qemu_mutex_iothread_locked());
1048
Avi Kivity4ef4db82011-07-26 14:26:13 +03001049 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001050 if (!memory_region_transaction_depth) {
1051 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001052 flatviews_reset();
1053
Gonglei4dc56152014-05-08 11:47:32 +08001054 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001055
Gonglei4dc56152014-05-08 11:47:32 +08001056 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001057 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001058 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001059 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001060 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001061 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001062 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1063 } else if (ioeventfd_update_pending) {
1064 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1065 address_space_update_ioeventfds(as);
1066 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001067 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001068 }
Gonglei4dc56152014-05-08 11:47:32 +08001069 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001070}
1071
Avi Kivity545e92e2011-08-08 19:58:48 +03001072static void memory_region_destructor_none(MemoryRegion *mr)
1073{
1074}
1075
1076static void memory_region_destructor_ram(MemoryRegion *mr)
1077{
Fam Zhengf1060c52016-03-01 14:18:22 +08001078 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001079}
1080
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001081static bool memory_region_need_escape(char c)
1082{
1083 return c == '/' || c == '[' || c == '\\' || c == ']';
1084}
1085
1086static char *memory_region_escape_name(const char *name)
1087{
1088 const char *p;
1089 char *escaped, *q;
1090 uint8_t c;
1091 size_t bytes = 0;
1092
1093 for (p = name; *p; p++) {
1094 bytes += memory_region_need_escape(*p) ? 4 : 1;
1095 }
1096 if (bytes == p - name) {
1097 return g_memdup(name, bytes + 1);
1098 }
1099
1100 escaped = g_malloc(bytes + 1);
1101 for (p = name, q = escaped; *p; p++) {
1102 c = *p;
1103 if (unlikely(memory_region_need_escape(c))) {
1104 *q++ = '\\';
1105 *q++ = 'x';
1106 *q++ = "0123456789abcdef"[c >> 4];
1107 c = "0123456789abcdef"[c & 15];
1108 }
1109 *q++ = c;
1110 }
1111 *q = 0;
1112 return escaped;
1113}
1114
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001115static void memory_region_do_init(MemoryRegion *mr,
1116 Object *owner,
1117 const char *name,
1118 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001119{
Avi Kivity08dafab2011-10-16 13:19:17 +02001120 mr->size = int128_make64(size);
1121 if (size == UINT64_MAX) {
1122 mr->size = int128_2_64();
1123 }
Peter Maydell302fa282014-08-19 20:05:46 +01001124 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001125 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001126 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001127
1128 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001129 char *escaped_name = memory_region_escape_name(name);
1130 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001131
1132 if (!owner) {
1133 owner = container_get(qdev_get_machine(), "/unattached");
1134 }
1135
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001136 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001137 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001138 g_free(name_array);
1139 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001140 }
1141}
1142
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001143void memory_region_init(MemoryRegion *mr,
1144 Object *owner,
1145 const char *name,
1146 uint64_t size)
1147{
1148 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1149 memory_region_do_init(mr, owner, name, size);
1150}
1151
Eric Blaked7bce992016-01-29 06:48:55 -07001152static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1153 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001154{
1155 MemoryRegion *mr = MEMORY_REGION(obj);
1156 uint64_t value = mr->addr;
1157
Eric Blake51e72bc2016-01-29 06:48:54 -07001158 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001159}
1160
Eric Blaked7bce992016-01-29 06:48:55 -07001161static void memory_region_get_container(Object *obj, Visitor *v,
1162 const char *name, void *opaque,
1163 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001164{
1165 MemoryRegion *mr = MEMORY_REGION(obj);
1166 gchar *path = (gchar *)"";
1167
1168 if (mr->container) {
1169 path = object_get_canonical_path(OBJECT(mr->container));
1170 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001171 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001172 if (mr->container) {
1173 g_free(path);
1174 }
1175}
1176
1177static Object *memory_region_resolve_container(Object *obj, void *opaque,
1178 const char *part)
1179{
1180 MemoryRegion *mr = MEMORY_REGION(obj);
1181
1182 return OBJECT(mr->container);
1183}
1184
Eric Blaked7bce992016-01-29 06:48:55 -07001185static void memory_region_get_priority(Object *obj, Visitor *v,
1186 const char *name, void *opaque,
1187 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001188{
1189 MemoryRegion *mr = MEMORY_REGION(obj);
1190 int32_t value = mr->priority;
1191
Eric Blake51e72bc2016-01-29 06:48:54 -07001192 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001193}
1194
Eric Blaked7bce992016-01-29 06:48:55 -07001195static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1196 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001197{
1198 MemoryRegion *mr = MEMORY_REGION(obj);
1199 uint64_t value = memory_region_size(mr);
1200
Eric Blake51e72bc2016-01-29 06:48:54 -07001201 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001202}
1203
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001204static void memory_region_initfn(Object *obj)
1205{
1206 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001207 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001208
1209 mr->ops = &unassigned_mem_ops;
1210 mr->enabled = true;
1211 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001212 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001213 mr->destructor = memory_region_destructor_none;
1214 QTAILQ_INIT(&mr->subregions);
1215 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001216
1217 op = object_property_add(OBJECT(mr), "container",
1218 "link<" TYPE_MEMORY_REGION ">",
1219 memory_region_get_container,
1220 NULL, /* memory_region_set_container */
1221 NULL, NULL, &error_abort);
1222 op->resolve = memory_region_resolve_container;
1223
1224 object_property_add(OBJECT(mr), "addr", "uint64",
1225 memory_region_get_addr,
1226 NULL, /* memory_region_set_addr */
1227 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001228 object_property_add(OBJECT(mr), "priority", "uint32",
1229 memory_region_get_priority,
1230 NULL, /* memory_region_set_priority */
1231 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001232 object_property_add(OBJECT(mr), "size", "uint64",
1233 memory_region_get_size,
1234 NULL, /* memory_region_set_size, */
1235 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001236}
1237
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001238static void iommu_memory_region_initfn(Object *obj)
1239{
1240 MemoryRegion *mr = MEMORY_REGION(obj);
1241
1242 mr->is_iommu = true;
1243}
1244
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001245static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1246 unsigned size)
1247{
1248#ifdef DEBUG_UNASSIGNED
1249 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1250#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001251 if (current_cpu != NULL) {
1252 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001253 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001254 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001255}
1256
1257static void unassigned_mem_write(void *opaque, hwaddr addr,
1258 uint64_t val, unsigned size)
1259{
1260#ifdef DEBUG_UNASSIGNED
1261 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1262#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001263 if (current_cpu != NULL) {
1264 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001265 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001266}
1267
Paolo Bonzinid1970632013-05-24 13:23:38 +02001268static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
Peter Maydell8372d382018-05-31 14:50:52 +01001269 unsigned size, bool is_write,
1270 MemTxAttrs attrs)
Paolo Bonzinid1970632013-05-24 13:23:38 +02001271{
1272 return false;
1273}
1274
1275const MemoryRegionOps unassigned_mem_ops = {
1276 .valid.accepts = unassigned_mem_accepts,
1277 .endianness = DEVICE_NATIVE_ENDIAN,
1278};
1279
Alex Williamson4a2e2422016-10-31 09:53:03 -06001280static uint64_t memory_region_ram_device_read(void *opaque,
1281 hwaddr addr, unsigned size)
1282{
1283 MemoryRegion *mr = opaque;
1284 uint64_t data = (uint64_t)~0;
1285
1286 switch (size) {
1287 case 1:
1288 data = *(uint8_t *)(mr->ram_block->host + addr);
1289 break;
1290 case 2:
1291 data = *(uint16_t *)(mr->ram_block->host + addr);
1292 break;
1293 case 4:
1294 data = *(uint32_t *)(mr->ram_block->host + addr);
1295 break;
1296 case 8:
1297 data = *(uint64_t *)(mr->ram_block->host + addr);
1298 break;
1299 }
1300
1301 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1302
1303 return data;
1304}
1305
1306static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1307 uint64_t data, unsigned size)
1308{
1309 MemoryRegion *mr = opaque;
1310
1311 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1312
1313 switch (size) {
1314 case 1:
1315 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1316 break;
1317 case 2:
1318 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1319 break;
1320 case 4:
1321 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1322 break;
1323 case 8:
1324 *(uint64_t *)(mr->ram_block->host + addr) = data;
1325 break;
1326 }
1327}
1328
1329static const MemoryRegionOps ram_device_mem_ops = {
1330 .read = memory_region_ram_device_read,
1331 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001332 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001333 .valid = {
1334 .min_access_size = 1,
1335 .max_access_size = 8,
1336 .unaligned = true,
1337 },
1338 .impl = {
1339 .min_access_size = 1,
1340 .max_access_size = 8,
1341 .unaligned = true,
1342 },
1343};
1344
Paolo Bonzinid2702032013-05-24 11:55:06 +02001345bool memory_region_access_valid(MemoryRegion *mr,
1346 hwaddr addr,
1347 unsigned size,
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001348 bool is_write,
1349 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001350{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001351 int access_size_min, access_size_max;
1352 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001353
Avi Kivity093bc2c2011-07-26 14:26:01 +03001354 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1355 return false;
1356 }
1357
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001358 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001359 return true;
1360 }
1361
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001362 access_size_min = mr->ops->valid.min_access_size;
1363 if (!mr->ops->valid.min_access_size) {
1364 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001365 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001366
1367 access_size_max = mr->ops->valid.max_access_size;
1368 if (!mr->ops->valid.max_access_size) {
1369 access_size_max = 4;
1370 }
1371
1372 access_size = MAX(MIN(size, access_size_max), access_size_min);
1373 for (i = 0; i < size; i += access_size) {
1374 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
Peter Maydell8372d382018-05-31 14:50:52 +01001375 is_write, attrs)) {
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001376 return false;
1377 }
1378 }
1379
Avi Kivity093bc2c2011-07-26 14:26:01 +03001380 return true;
1381}
1382
Peter Maydellcc05c432015-04-26 16:49:23 +01001383static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1384 hwaddr addr,
1385 uint64_t *pval,
1386 unsigned size,
1387 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001388{
Peter Maydellcc05c432015-04-26 16:49:23 +01001389 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001390
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001391 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001392 return access_with_adjusted_size(addr, pval, size,
1393 mr->ops->impl.min_access_size,
1394 mr->ops->impl.max_access_size,
1395 memory_region_read_accessor,
1396 mr, attrs);
1397 } else if (mr->ops->read_with_attrs) {
1398 return access_with_adjusted_size(addr, pval, size,
1399 mr->ops->impl.min_access_size,
1400 mr->ops->impl.max_access_size,
1401 memory_region_read_with_attrs_accessor,
1402 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001403 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001404 return access_with_adjusted_size(addr, pval, size, 1, 4,
1405 memory_region_oldmmio_read_accessor,
1406 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001407 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001408}
1409
Peter Maydell3b643492015-04-26 16:49:23 +01001410MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1411 hwaddr addr,
1412 uint64_t *pval,
1413 unsigned size,
1414 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001415{
Peter Maydellcc05c432015-04-26 16:49:23 +01001416 MemTxResult r;
1417
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001418 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001419 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001420 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001421 }
Avi Kivitya621f382012-01-02 13:12:08 +02001422
Peter Maydellcc05c432015-04-26 16:49:23 +01001423 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001424 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001425 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001426}
1427
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001428/* Return true if an eventfd was signalled */
1429static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1430 hwaddr addr,
1431 uint64_t data,
1432 unsigned size,
1433 MemTxAttrs attrs)
1434{
1435 MemoryRegionIoeventfd ioeventfd = {
1436 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1437 .data = data,
1438 };
1439 unsigned i;
1440
1441 for (i = 0; i < mr->ioeventfd_nb; i++) {
1442 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1443 ioeventfd.e = mr->ioeventfds[i].e;
1444
Tristan Burgess73bb7532018-05-28 23:04:45 -04001445 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001446 event_notifier_set(ioeventfd.e);
1447 return true;
1448 }
1449 }
1450
1451 return false;
1452}
1453
Peter Maydell3b643492015-04-26 16:49:23 +01001454MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1455 hwaddr addr,
1456 uint64_t data,
1457 unsigned size,
1458 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001459{
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001460 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001461 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001462 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001463 }
1464
Avi Kivitya621f382012-01-02 13:12:08 +02001465 adjust_endianness(mr, &data, size);
1466
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001467 if ((!kvm_eventfds_enabled()) &&
1468 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1469 return MEMTX_OK;
1470 }
1471
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001472 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001473 return access_with_adjusted_size(addr, &data, size,
1474 mr->ops->impl.min_access_size,
1475 mr->ops->impl.max_access_size,
1476 memory_region_write_accessor, mr,
1477 attrs);
1478 } else if (mr->ops->write_with_attrs) {
1479 return
1480 access_with_adjusted_size(addr, &data, size,
1481 mr->ops->impl.min_access_size,
1482 mr->ops->impl.max_access_size,
1483 memory_region_write_with_attrs_accessor,
1484 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001485 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001486 return access_with_adjusted_size(addr, &data, size, 1, 4,
1487 memory_region_oldmmio_write_accessor,
1488 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001489 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001490}
1491
Avi Kivity093bc2c2011-07-26 14:26:01 +03001492void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001493 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001494 const MemoryRegionOps *ops,
1495 void *opaque,
1496 const char *name,
1497 uint64_t size)
1498{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001499 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001500 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001501 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001502 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001503}
1504
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001505void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1506 Object *owner,
1507 const char *name,
1508 uint64_t size,
1509 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001510{
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001511 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1512}
1513
1514void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1515 Object *owner,
1516 const char *name,
1517 uint64_t size,
1518 bool share,
1519 Error **errp)
1520{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001521 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001522 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001523 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001524 mr->destructor = memory_region_destructor_ram;
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001525 mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001526 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001527}
1528
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001529void memory_region_init_resizeable_ram(MemoryRegion *mr,
1530 Object *owner,
1531 const char *name,
1532 uint64_t size,
1533 uint64_t max_size,
1534 void (*resized)(const char*,
1535 uint64_t length,
1536 void *host),
1537 Error **errp)
1538{
1539 memory_region_init(mr, owner, name, size);
1540 mr->ram = true;
1541 mr->terminates = true;
1542 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001543 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1544 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001545 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001546}
1547
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001548#ifdef __linux__
1549void memory_region_init_ram_from_file(MemoryRegion *mr,
1550 struct Object *owner,
1551 const char *name,
1552 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001553 uint64_t align,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001554 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001555 const char *path,
1556 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001557{
1558 memory_region_init(mr, owner, name, size);
1559 mr->ram = true;
1560 mr->terminates = true;
1561 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001562 mr->align = align;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001563 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001564 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001565}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001566
1567void memory_region_init_ram_from_fd(MemoryRegion *mr,
1568 struct Object *owner,
1569 const char *name,
1570 uint64_t size,
1571 bool share,
1572 int fd,
1573 Error **errp)
1574{
1575 memory_region_init(mr, owner, name, size);
1576 mr->ram = true;
1577 mr->terminates = true;
1578 mr->destructor = memory_region_destructor_ram;
1579 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1580 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1581}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001582#endif
1583
Avi Kivity093bc2c2011-07-26 14:26:01 +03001584void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001585 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001586 const char *name,
1587 uint64_t size,
1588 void *ptr)
1589{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001590 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001591 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001592 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001593 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001594 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001595
1596 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1597 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001598 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001599}
1600
Alex Williamson21e00fa2016-10-31 09:53:03 -06001601void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1602 Object *owner,
1603 const char *name,
1604 uint64_t size,
1605 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301606{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001607 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1608 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001609 mr->ops = &ram_device_mem_ops;
1610 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301611}
1612
Avi Kivity093bc2c2011-07-26 14:26:01 +03001613void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001614 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001615 const char *name,
1616 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001617 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001618 uint64_t size)
1619{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001620 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001621 mr->alias = orig;
1622 mr->alias_offset = offset;
1623}
1624
Peter Maydellb59821a2017-07-07 15:42:50 +01001625void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1626 struct Object *owner,
1627 const char *name,
1628 uint64_t size,
1629 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001630{
1631 memory_region_init(mr, owner, name, size);
1632 mr->ram = true;
1633 mr->readonly = true;
1634 mr->terminates = true;
1635 mr->destructor = memory_region_destructor_ram;
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001636 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
Peter Maydella1777f72016-07-04 13:06:35 +01001637 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1638}
1639
Peter Maydellb59821a2017-07-07 15:42:50 +01001640void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1641 Object *owner,
1642 const MemoryRegionOps *ops,
1643 void *opaque,
1644 const char *name,
1645 uint64_t size,
1646 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001647{
Peter Maydell39e0b032016-07-04 13:06:35 +01001648 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001649 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001650 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001651 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001652 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001653 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001654 mr->destructor = memory_region_destructor_ram;
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001655 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001656}
1657
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001658void memory_region_init_iommu(void *_iommu_mr,
1659 size_t instance_size,
1660 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001661 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001662 const char *name,
1663 uint64_t size)
1664{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001665 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001666 struct MemoryRegion *mr;
1667
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001668 object_initialize(_iommu_mr, instance_size, mrtypename);
1669 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001670 memory_region_do_init(mr, owner, name, size);
1671 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001672 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001673 QLIST_INIT(&iommu_mr->iommu_notify);
1674 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001675}
1676
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001677static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001678{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001679 MemoryRegion *mr = MEMORY_REGION(obj);
1680
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001681 assert(!mr->container);
1682
1683 /* We know the region is not visible in any address space (it
1684 * does not have a container and cannot be a root either because
1685 * it has no references, so we can blindly clear mr->enabled.
1686 * memory_region_set_enabled instead could trigger a transaction
1687 * and cause an infinite loop.
1688 */
1689 mr->enabled = false;
1690 memory_region_transaction_begin();
1691 while (!QTAILQ_EMPTY(&mr->subregions)) {
1692 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1693 memory_region_del_subregion(mr, subregion);
1694 }
1695 memory_region_transaction_commit();
1696
Avi Kivity545e92e2011-08-08 19:58:48 +03001697 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001698 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001699 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001700 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001701}
1702
Paolo Bonzini803c0812013-05-07 06:59:09 +02001703Object *memory_region_owner(MemoryRegion *mr)
1704{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001705 Object *obj = OBJECT(mr);
1706 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001707}
1708
Paolo Bonzini46637be2013-05-07 09:06:00 +02001709void memory_region_ref(MemoryRegion *mr)
1710{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001711 /* MMIO callbacks most likely will access data that belongs
1712 * to the owner, hence the need to ref/unref the owner whenever
1713 * the memory region is in use.
1714 *
1715 * The memory region is a child of its owner. As long as the
1716 * owner doesn't call unparent itself on the memory region,
1717 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001718 * Memory regions without an owner are supposed to never go away;
1719 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001720 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001721 if (mr && mr->owner) {
1722 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001723 }
1724}
1725
1726void memory_region_unref(MemoryRegion *mr)
1727{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001728 if (mr && mr->owner) {
1729 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001730 }
1731}
1732
Avi Kivity093bc2c2011-07-26 14:26:01 +03001733uint64_t memory_region_size(MemoryRegion *mr)
1734{
Avi Kivity08dafab2011-10-16 13:19:17 +02001735 if (int128_eq(mr->size, int128_2_64())) {
1736 return UINT64_MAX;
1737 }
1738 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001739}
1740
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001741const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001742{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001743 if (!mr->name) {
1744 ((MemoryRegion *)mr)->name =
1745 object_get_canonical_path_component(OBJECT(mr));
1746 }
Peter Maydell302fa282014-08-19 20:05:46 +01001747 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001748}
1749
Alex Williamson21e00fa2016-10-31 09:53:03 -06001750bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301751{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001752 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301753}
1754
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001755uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001756{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001757 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001758 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001759 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1760 }
1761 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001762}
1763
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001764bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1765{
1766 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1767}
1768
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001769static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001770{
1771 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1772 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001773 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001774
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001775 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001776 flags |= iommu_notifier->notifier_flags;
1777 }
1778
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001779 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1780 imrc->notify_flag_changed(iommu_mr,
1781 iommu_mr->iommu_notify_flags,
1782 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001783 }
1784
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001785 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001786}
1787
Peter Xucdb30812016-09-23 13:02:26 +08001788void memory_region_register_iommu_notifier(MemoryRegion *mr,
1789 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001790{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001791 IOMMUMemoryRegion *iommu_mr;
1792
Jason Wangefcd38c2016-12-30 18:09:17 +08001793 if (mr->alias) {
1794 memory_region_register_iommu_notifier(mr->alias, n);
1795 return;
1796 }
1797
Peter Xucdb30812016-09-23 13:02:26 +08001798 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001799 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001800 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001801 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001802 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1803 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001804}
1805
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001806uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001807{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001808 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1809
1810 if (imrc->get_min_page_size) {
1811 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001812 }
1813 return TARGET_PAGE_SIZE;
1814}
1815
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001816void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001817{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001818 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001819 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001820 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001821 IOMMUTLBEntry iotlb;
1822
Peter Xufaa362e2017-04-07 18:59:11 +08001823 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001824 if (imrc->replay) {
1825 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001826 return;
1827 }
1828
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001829 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001830
David Gibsona788f222015-09-30 12:13:55 +10001831 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001832 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001833 if (iotlb.perm != IOMMU_NONE) {
1834 n->notify(n, &iotlb);
1835 }
1836
1837 /* if (2^64 - MR size) < granularity, it's possible to get an
1838 * infinite loop here. This should catch such a wraparound */
1839 if ((addr + granularity) < addr) {
1840 break;
1841 }
1842 }
1843}
1844
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001845void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001846{
1847 IOMMUNotifier *notifier;
1848
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001849 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1850 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001851 }
1852}
1853
Peter Xucdb30812016-09-23 13:02:26 +08001854void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1855 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001856{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001857 IOMMUMemoryRegion *iommu_mr;
1858
Jason Wangefcd38c2016-12-30 18:09:17 +08001859 if (mr->alias) {
1860 memory_region_unregister_iommu_notifier(mr->alias, n);
1861 return;
1862 }
Peter Xucdb30812016-09-23 13:02:26 +08001863 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001864 iommu_mr = IOMMU_MEMORY_REGION(mr);
1865 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001866}
1867
Peter Xubd2bfa42017-04-07 18:59:10 +08001868void memory_region_notify_one(IOMMUNotifier *notifier,
1869 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001870{
Peter Xucdb30812016-09-23 13:02:26 +08001871 IOMMUNotifierFlag request_flags;
1872
Peter Xubd2bfa42017-04-07 18:59:10 +08001873 /*
1874 * Skip the notification if the notification does not overlap
1875 * with registered range.
1876 */
Maxime Coquelinb021d1c2017-10-10 11:42:47 +02001877 if (notifier->start > entry->iova + entry->addr_mask ||
Peter Xubd2bfa42017-04-07 18:59:10 +08001878 notifier->end < entry->iova) {
1879 return;
1880 }
Peter Xucdb30812016-09-23 13:02:26 +08001881
Peter Xubd2bfa42017-04-07 18:59:10 +08001882 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001883 request_flags = IOMMU_NOTIFIER_MAP;
1884 } else {
1885 request_flags = IOMMU_NOTIFIER_UNMAP;
1886 }
1887
Peter Xubd2bfa42017-04-07 18:59:10 +08001888 if (notifier->notifier_flags & request_flags) {
1889 notifier->notify(notifier, entry);
1890 }
1891}
1892
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001893void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001894 IOMMUTLBEntry entry)
1895{
1896 IOMMUNotifier *iommu_notifier;
1897
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001898 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001899
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001900 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001901 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001902 }
David Gibson06866572013-05-14 19:13:56 +10001903}
1904
Alexey Kardashevskiyf1334de2018-02-06 11:08:24 -07001905int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1906 enum IOMMUMemoryRegionAttr attr,
1907 void *data)
1908{
1909 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1910
1911 if (!imrc->get_attr) {
1912 return -EINVAL;
1913 }
1914
1915 return imrc->get_attr(iommu_mr, attr, data);
1916}
1917
Avi Kivity093bc2c2011-07-26 14:26:01 +03001918void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1919{
Avi Kivity5a583342011-07-26 14:26:02 +03001920 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001921 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001922
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001923 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001924 old_logging = mr->vga_logging_count;
1925 mr->vga_logging_count += log ? 1 : -1;
1926 if (!!old_logging == !!mr->vga_logging_count) {
1927 return;
1928 }
1929
Jan Kiszka59023ef2012-08-23 13:02:30 +02001930 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001931 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001932 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001933 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001934}
1935
Avi Kivitya8170e52012-10-23 12:30:10 +02001936bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1937 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001938{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001939 assert(mr->ram_block);
1940 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1941 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001942}
1943
Avi Kivitya8170e52012-10-23 12:30:10 +02001944void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1945 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001946{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001947 assert(mr->ram_block);
1948 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1949 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001950 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001951}
1952
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01001953static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001954{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001955 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001956 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001957 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001958 FlatRange *fr;
1959
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001960 /* If the same address space has multiple log_sync listeners, we
1961 * visit that address space's FlatView multiple times. But because
1962 * log_sync listeners are rare, it's still cheaper than walking each
1963 * address space once.
1964 */
1965 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1966 if (!listener->log_sync) {
1967 continue;
1968 }
1969 as = listener->address_space;
1970 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001971 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01001972 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10001973 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001974 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001975 }
Avi Kivity5a583342011-07-26 14:26:02 +03001976 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001977 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001978 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001979}
1980
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01001981DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1982 hwaddr addr,
1983 hwaddr size,
1984 unsigned client)
1985{
1986 assert(mr->ram_block);
1987 memory_region_sync_dirty_bitmap(mr);
1988 return cpu_physical_memory_snapshot_and_clear_dirty(
1989 memory_region_get_ram_addr(mr) + addr, size, client);
1990}
1991
1992bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1993 hwaddr addr, hwaddr size)
1994{
1995 assert(mr->ram_block);
1996 return cpu_physical_memory_snapshot_get_dirty(snap,
1997 memory_region_get_ram_addr(mr) + addr, size);
1998}
1999
Avi Kivity093bc2c2011-07-26 14:26:01 +03002000void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2001{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002002 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002003 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002004 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002005 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002006 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002007 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002008}
2009
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002010void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002011{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002012 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002013 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002014 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002015 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002016 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002017 }
2018}
2019
Avi Kivitya8170e52012-10-23 12:30:10 +02002020void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2021 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002022{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002023 assert(mr->ram_block);
2024 cpu_physical_memory_test_and_clear_dirty(
2025 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002026}
2027
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002028int memory_region_get_fd(MemoryRegion *mr)
2029{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002030 int fd;
2031
2032 rcu_read_lock();
2033 while (mr->alias) {
2034 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002035 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002036 fd = mr->ram_block->fd;
2037 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002038
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002039 return fd;
2040}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002041
Avi Kivity093bc2c2011-07-26 14:26:01 +03002042void *memory_region_get_ram_ptr(MemoryRegion *mr)
2043{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002044 void *ptr;
2045 uint64_t offset = 0;
2046
2047 rcu_read_lock();
2048 while (mr->alias) {
2049 offset += mr->alias_offset;
2050 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002051 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002052 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002053 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002054 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002055
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002056 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002057}
2058
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002059MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2060{
2061 RAMBlock *block;
2062
2063 block = qemu_ram_block_from_host(ptr, false, offset);
2064 if (!block) {
2065 return NULL;
2066 }
2067
2068 return block->mr;
2069}
2070
Fam Zheng7ebb2742016-03-01 14:18:20 +08002071ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2072{
2073 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2074}
2075
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002076void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2077{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002078 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002079
Gongleifa53a0e2016-05-10 10:04:59 +08002080 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002081}
2082
Avi Kivity0d673e32012-10-02 15:28:50 +02002083static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002084{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002085 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002086 FlatRange *fr;
2087 CoalescedMemoryRange *cmr;
2088 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02002089 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002090
Paolo Bonzini856d7242013-05-06 11:57:21 +02002091 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002092 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002093 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02002094 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002095 .fv = view,
Avi Kivity95d29942012-10-02 18:21:54 +02002096 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002097 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02002098 };
2099
Paolo Bonzini9a546352016-09-22 16:23:06 +02002100 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002101 int128_get64(fr->addr.start),
2102 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002103 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2104 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02002105 int128_sub(fr->addr.start,
2106 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002107 if (!addrrange_intersects(tmp, fr->addr)) {
2108 continue;
2109 }
2110 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002111 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002112 int128_get64(tmp.start),
2113 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002114 }
2115 }
2116 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002117 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002118}
2119
Avi Kivity0d673e32012-10-02 15:28:50 +02002120static void memory_region_update_coalesced_range(MemoryRegion *mr)
2121{
2122 AddressSpace *as;
2123
2124 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2125 memory_region_update_coalesced_range_as(mr, as);
2126 }
2127}
2128
Avi Kivity093bc2c2011-07-26 14:26:01 +03002129void memory_region_set_coalescing(MemoryRegion *mr)
2130{
2131 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002132 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002133}
2134
2135void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002136 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002137 uint64_t size)
2138{
Anthony Liguori7267c092011-08-20 22:09:37 -05002139 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002140
Avi Kivity08dafab2011-10-16 13:19:17 +02002141 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002142 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2143 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002144 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002145}
2146
2147void memory_region_clear_coalescing(MemoryRegion *mr)
2148{
2149 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002150 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002151
Jan Kiszkad4105152012-08-23 13:02:29 +02002152 qemu_flush_coalesced_mmio_buffer();
2153 mr->flush_coalesced_mmio = false;
2154
Avi Kivity093bc2c2011-07-26 14:26:01 +03002155 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2156 cmr = QTAILQ_FIRST(&mr->coalesced);
2157 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002158 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002159 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002160 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002161
2162 if (updated) {
2163 memory_region_update_coalesced_range(mr);
2164 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002165}
2166
Jan Kiszkad4105152012-08-23 13:02:29 +02002167void memory_region_set_flush_coalesced(MemoryRegion *mr)
2168{
2169 mr->flush_coalesced_mmio = true;
2170}
2171
2172void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2173{
2174 qemu_flush_coalesced_mmio_buffer();
2175 if (QTAILQ_EMPTY(&mr->coalesced)) {
2176 mr->flush_coalesced_mmio = false;
2177 }
2178}
2179
Jan Kiszka196ea132015-06-18 18:47:20 +02002180void memory_region_clear_global_locking(MemoryRegion *mr)
2181{
2182 mr->global_locking = false;
2183}
2184
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002185static bool userspace_eventfd_warning;
2186
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002187void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002188 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002189 unsigned size,
2190 bool match_data,
2191 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002192 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002193{
2194 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002195 .addr.start = int128_make64(addr),
2196 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002197 .match_data = match_data,
2198 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002199 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002200 };
2201 unsigned i;
2202
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002203 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2204 userspace_eventfd_warning))) {
2205 userspace_eventfd_warning = true;
2206 error_report("Using eventfd without MMIO binding in KVM. "
2207 "Suboptimal performance expected");
2208 }
2209
Jason Wangb8aecea2015-11-06 16:02:45 +08002210 if (size) {
2211 adjust_endianness(mr, &mrfd.data, size);
2212 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002213 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002214 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002215 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002216 break;
2217 }
2218 }
2219 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002220 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002221 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2222 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2223 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2224 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002225 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002226 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002227}
2228
2229void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002230 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002231 unsigned size,
2232 bool match_data,
2233 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002234 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002235{
2236 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002237 .addr.start = int128_make64(addr),
2238 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002239 .match_data = match_data,
2240 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002241 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002242 };
2243 unsigned i;
2244
Jason Wangb8aecea2015-11-06 16:02:45 +08002245 if (size) {
2246 adjust_endianness(mr, &mrfd.data, size);
2247 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002248 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002249 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002250 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002251 break;
2252 }
2253 }
2254 assert(i != mr->ioeventfd_nb);
2255 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2256 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2257 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002258 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002259 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002260 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002261 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002262}
2263
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002264static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002265{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002266 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002267 MemoryRegion *other;
2268
Jan Kiszka59023ef2012-08-23 13:02:30 +02002269 memory_region_transaction_begin();
2270
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002271 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002272 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002273 if (subregion->priority >= other->priority) {
2274 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2275 goto done;
2276 }
2277 }
2278 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2279done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002280 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002281 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002282}
2283
Peter Crosthwaite05987012014-06-05 23:14:44 -07002284static void memory_region_add_subregion_common(MemoryRegion *mr,
2285 hwaddr offset,
2286 MemoryRegion *subregion)
2287{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002288 assert(!subregion->container);
2289 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002290 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002291 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002292}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002293
2294void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002295 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002296 MemoryRegion *subregion)
2297{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002298 subregion->priority = 0;
2299 memory_region_add_subregion_common(mr, offset, subregion);
2300}
2301
2302void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002303 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002304 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002305 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002306{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002307 subregion->priority = priority;
2308 memory_region_add_subregion_common(mr, offset, subregion);
2309}
2310
2311void memory_region_del_subregion(MemoryRegion *mr,
2312 MemoryRegion *subregion)
2313{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002314 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002315 assert(subregion->container == mr);
2316 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002317 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002318 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002319 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002320 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002321}
2322
2323void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2324{
2325 if (enabled == mr->enabled) {
2326 return;
2327 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002328 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002329 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002330 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002331 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002332}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002333
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002334void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2335{
2336 Int128 s = int128_make64(size);
2337
2338 if (size == UINT64_MAX) {
2339 s = int128_2_64();
2340 }
2341 if (int128_eq(s, mr->size)) {
2342 return;
2343 }
2344 memory_region_transaction_begin();
2345 mr->size = s;
2346 memory_region_update_pending = true;
2347 memory_region_transaction_commit();
2348}
2349
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002350static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002351{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002352 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002353
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002354 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002355 memory_region_transaction_begin();
2356 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002357 memory_region_del_subregion(container, mr);
2358 mr->container = container;
2359 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002360 memory_region_unref(mr);
2361 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002362 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002363}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002364
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002365void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2366{
2367 if (addr != mr->addr) {
2368 mr->addr = addr;
2369 memory_region_readd_subregion(mr);
2370 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002371}
2372
Avi Kivitya8170e52012-10-23 12:30:10 +02002373void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002374{
Avi Kivity47033592011-12-04 19:16:50 +02002375 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002376
Jan Kiszka59023ef2012-08-23 13:02:30 +02002377 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002378 return;
2379 }
2380
Jan Kiszka59023ef2012-08-23 13:02:30 +02002381 memory_region_transaction_begin();
2382 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002383 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002384 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002385}
2386
Igor Mammedova2b257d2014-10-31 16:38:37 +00002387uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2388{
2389 return mr->align;
2390}
2391
Avi Kivitye2177952011-12-08 15:00:18 +02002392static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2393{
2394 const AddrRange *addr = addr_;
2395 const FlatRange *fr = fr_;
2396
2397 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2398 return -1;
2399 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2400 return 1;
2401 }
2402 return 0;
2403}
2404
Paolo Bonzini99e86342013-05-06 10:26:13 +02002405static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002406{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002407 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002408 sizeof(FlatRange), cmp_flatrange_addr);
2409}
2410
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002411bool memory_region_is_mapped(MemoryRegion *mr)
2412{
2413 return mr->container ? true : false;
2414}
2415
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002416/* Same as memory_region_find, but it does not add a reference to the
2417 * returned region. It must be called from an RCU critical section.
2418 */
2419static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2420 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002421{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002422 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002423 MemoryRegion *root;
2424 AddressSpace *as;
2425 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002426 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002427 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002428
Paolo Bonzini73034e92013-05-07 15:48:28 +02002429 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002430 for (root = mr; root->container; ) {
2431 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002432 addr += root->addr;
2433 }
2434
2435 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002436 if (!as) {
2437 return ret;
2438 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002439 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002440
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002441 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002442 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002443 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002444 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002445 }
2446
Paolo Bonzini99e86342013-05-06 10:26:13 +02002447 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002448 --fr;
2449 }
2450
2451 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002452 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002453 range = addrrange_intersection(range, fr->addr);
2454 ret.offset_within_region = fr->offset_in_region;
2455 ret.offset_within_region += int128_get64(int128_sub(range.start,
2456 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002457 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002458 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002459 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002460 return ret;
2461}
2462
2463MemoryRegionSection memory_region_find(MemoryRegion *mr,
2464 hwaddr addr, uint64_t size)
2465{
2466 MemoryRegionSection ret;
2467 rcu_read_lock();
2468 ret = memory_region_find_rcu(mr, addr, size);
2469 if (ret.mr) {
2470 memory_region_ref(ret.mr);
2471 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002472 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002473 return ret;
2474}
2475
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002476bool memory_region_present(MemoryRegion *container, hwaddr addr)
2477{
2478 MemoryRegion *mr;
2479
2480 rcu_read_lock();
2481 mr = memory_region_find_rcu(container, addr, 1).mr;
2482 rcu_read_unlock();
2483 return mr && mr != container;
2484}
2485
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002486void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002487{
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002488 memory_region_sync_dirty_bitmap(NULL);
Avi Kivity7664e802011-12-11 14:47:25 +02002489}
2490
Jay Zhou19310762017-07-28 18:28:53 +08002491static VMChangeStateEntry *vmstate_change;
2492
Avi Kivity7664e802011-12-11 14:47:25 +02002493void memory_global_dirty_log_start(void)
2494{
Jay Zhou19310762017-07-28 18:28:53 +08002495 if (vmstate_change) {
2496 qemu_del_vm_change_state_handler(vmstate_change);
2497 vmstate_change = NULL;
2498 }
2499
Avi Kivity7664e802011-12-11 14:47:25 +02002500 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002501
Avi Kivity7376e582012-02-08 21:05:17 +02002502 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002503
2504 /* Refresh DIRTY_LOG_MIGRATION bit. */
2505 memory_region_transaction_begin();
2506 memory_region_update_pending = true;
2507 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002508}
2509
Jay Zhou19310762017-07-28 18:28:53 +08002510static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002511{
Avi Kivity7664e802011-12-11 14:47:25 +02002512 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002513
2514 /* Refresh DIRTY_LOG_MIGRATION bit. */
2515 memory_region_transaction_begin();
2516 memory_region_update_pending = true;
2517 memory_region_transaction_commit();
2518
Avi Kivity7376e582012-02-08 21:05:17 +02002519 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002520}
2521
Jay Zhou19310762017-07-28 18:28:53 +08002522static void memory_vm_change_state_handler(void *opaque, int running,
2523 RunState state)
2524{
2525 if (running) {
2526 memory_global_dirty_log_do_stop();
2527
2528 if (vmstate_change) {
2529 qemu_del_vm_change_state_handler(vmstate_change);
2530 vmstate_change = NULL;
2531 }
2532 }
2533}
2534
2535void memory_global_dirty_log_stop(void)
2536{
2537 if (!runstate_is_running()) {
2538 if (vmstate_change) {
2539 return;
2540 }
2541 vmstate_change = qemu_add_vm_change_state_handler(
2542 memory_vm_change_state_handler, NULL);
2543 return;
2544 }
2545
2546 memory_global_dirty_log_do_stop();
2547}
2548
Avi Kivity7664e802011-12-11 14:47:25 +02002549static void listener_add_address_space(MemoryListener *listener,
2550 AddressSpace *as)
2551{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002552 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002553 FlatRange *fr;
2554
Paolo Bonzini680a4782015-11-02 09:23:52 +01002555 if (listener->begin) {
2556 listener->begin(listener);
2557 }
Avi Kivity7664e802011-12-11 14:47:25 +02002558 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002559 if (listener->log_global_start) {
2560 listener->log_global_start(listener);
2561 }
Avi Kivity7664e802011-12-11 14:47:25 +02002562 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002563
Paolo Bonzini856d7242013-05-06 11:57:21 +02002564 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002565 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002566 MemoryRegionSection section = section_from_flat_range(fr, view);
2567
Avi Kivity975aefe2012-10-02 16:39:57 +02002568 if (listener->region_add) {
2569 listener->region_add(listener, &section);
2570 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002571 if (fr->dirty_log_mask && listener->log_start) {
2572 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2573 }
Avi Kivity7664e802011-12-11 14:47:25 +02002574 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002575 if (listener->commit) {
2576 listener->commit(listener);
2577 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002578 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002579}
2580
Peter Xud25836c2018-01-22 14:02:44 +08002581static void listener_del_address_space(MemoryListener *listener,
2582 AddressSpace *as)
2583{
2584 FlatView *view;
2585 FlatRange *fr;
2586
2587 if (listener->begin) {
2588 listener->begin(listener);
2589 }
2590 view = address_space_get_flatview(as);
2591 FOR_EACH_FLAT_RANGE(fr, view) {
2592 MemoryRegionSection section = section_from_flat_range(fr, view);
2593
2594 if (fr->dirty_log_mask && listener->log_stop) {
2595 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2596 }
2597 if (listener->region_del) {
2598 listener->region_del(listener, &section);
2599 }
2600 }
2601 if (listener->commit) {
2602 listener->commit(listener);
2603 }
2604 flatview_unref(view);
2605}
2606
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002607void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002608{
Avi Kivity72e22d22012-02-08 15:05:50 +02002609 MemoryListener *other = NULL;
2610
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002611 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002612 if (QTAILQ_EMPTY(&memory_listeners)
2613 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2614 memory_listeners)->priority) {
2615 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2616 } else {
2617 QTAILQ_FOREACH(other, &memory_listeners, link) {
2618 if (listener->priority < other->priority) {
2619 break;
2620 }
2621 }
2622 QTAILQ_INSERT_BEFORE(other, listener, link);
2623 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002624
Paolo Bonzini9a546352016-09-22 16:23:06 +02002625 if (QTAILQ_EMPTY(&as->listeners)
2626 || listener->priority >= QTAILQ_LAST(&as->listeners,
2627 memory_listeners)->priority) {
2628 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2629 } else {
2630 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2631 if (listener->priority < other->priority) {
2632 break;
2633 }
2634 }
2635 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2636 }
2637
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002638 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002639}
2640
2641void memory_listener_unregister(MemoryListener *listener)
2642{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002643 if (!listener->address_space) {
2644 return;
2645 }
2646
Peter Xud25836c2018-01-22 14:02:44 +08002647 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002648 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002649 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002650 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002651}
Avi Kivitye2177952011-12-08 15:00:18 +02002652
KONRAD Fredericc9356742016-10-19 15:06:49 +02002653bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2654{
2655 void *host;
2656 unsigned size = 0;
2657 unsigned offset = 0;
2658 Object *new_interface;
2659
2660 if (!mr || !mr->ops->request_ptr) {
2661 return false;
2662 }
2663
2664 /*
2665 * Avoid an update if the request_ptr call
2666 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2667 * a cache.
2668 */
2669 memory_region_transaction_begin();
2670
2671 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2672
2673 if (!host || !size) {
2674 memory_region_transaction_commit();
2675 return false;
2676 }
2677
2678 new_interface = object_new("mmio_interface");
2679 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2680 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2681 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2682 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2683 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2684 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2685
2686 memory_region_transaction_commit();
2687 return true;
2688}
2689
2690typedef struct MMIOPtrInvalidate {
2691 MemoryRegion *mr;
2692 hwaddr offset;
2693 unsigned size;
2694 int busy;
2695 int allocated;
2696} MMIOPtrInvalidate;
2697
2698#define MAX_MMIO_INVALIDATE 10
2699static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2700
2701static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2702 run_on_cpu_data data)
2703{
2704 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2705 MemoryRegion *mr = invalidate_data->mr;
2706 hwaddr offset = invalidate_data->offset;
2707 unsigned size = invalidate_data->size;
2708 MemoryRegionSection section = memory_region_find(mr, offset, size);
2709
2710 qemu_mutex_lock_iothread();
2711
2712 /* Reset dirty so this doesn't happen later. */
2713 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2714
2715 if (section.mr != mr) {
2716 /* memory_region_find add a ref on section.mr */
2717 memory_region_unref(section.mr);
2718 if (MMIO_INTERFACE(section.mr->owner)) {
2719 /* We found the interface just drop it. */
2720 object_property_set_bool(section.mr->owner, false, "realized",
2721 NULL);
2722 object_unref(section.mr->owner);
2723 object_unparent(section.mr->owner);
2724 }
2725 }
2726
2727 qemu_mutex_unlock_iothread();
2728
2729 if (invalidate_data->allocated) {
2730 g_free(invalidate_data);
2731 } else {
2732 invalidate_data->busy = 0;
2733 }
2734}
2735
2736void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2737 unsigned size)
2738{
2739 size_t i;
2740 MMIOPtrInvalidate *invalidate_data = NULL;
2741
2742 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2743 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2744 invalidate_data = &mmio_ptr_invalidate_list[i];
2745 break;
2746 }
2747 }
2748
2749 if (!invalidate_data) {
2750 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2751 invalidate_data->allocated = 1;
2752 }
2753
2754 invalidate_data->mr = mr;
2755 invalidate_data->offset = offset;
2756 invalidate_data->size = size;
2757
2758 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2759 RUN_ON_CPU_HOST_PTR(invalidate_data));
2760}
2761
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002762void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002763{
Paolo Bonziniac951902015-02-11 15:21:04 +01002764 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002765 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002766 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002767 as->ioeventfd_nb = 0;
2768 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002769 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002770 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002771 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002772 address_space_update_topology(as);
2773 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002774}
Avi Kivity658b2222011-07-26 14:26:08 +03002775
Paolo Bonzini374f2982013-05-17 12:37:03 +02002776static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002777{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002778 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002779
Paolo Bonzini856d7242013-05-06 11:57:21 +02002780 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002781 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002782 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002783 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002784}
2785
Paolo Bonzini374f2982013-05-17 12:37:03 +02002786void address_space_destroy(AddressSpace *as)
2787{
Paolo Bonziniac951902015-02-11 15:21:04 +01002788 MemoryRegion *root = as->root;
2789
Paolo Bonzini374f2982013-05-17 12:37:03 +02002790 /* Flush out anything from MemoryListeners listening in on this */
2791 memory_region_transaction_begin();
2792 as->root = NULL;
2793 memory_region_transaction_commit();
2794 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2795
2796 /* At this point, as->dispatch and as->current_map are dummy
2797 * entries that the guest should never use. Wait for the old
2798 * values to expire before freeing the data.
2799 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002800 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002801 call_rcu(as, do_address_space_destroy, rcu);
2802}
2803
Peter Xu4e831902017-01-16 16:40:04 +08002804static const char *memory_region_type(MemoryRegion *mr)
2805{
2806 if (memory_region_is_ram_device(mr)) {
2807 return "ramd";
2808 } else if (memory_region_is_romd(mr)) {
2809 return "romd";
2810 } else if (memory_region_is_rom(mr)) {
2811 return "rom";
2812 } else if (memory_region_is_ram(mr)) {
2813 return "ram";
2814 } else {
2815 return "i/o";
2816 }
2817}
2818
Blue Swirl314e2982011-09-11 20:22:05 +00002819typedef struct MemoryRegionList MemoryRegionList;
2820
2821struct MemoryRegionList {
2822 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002823 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002824};
2825
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002826typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002827
Peter Xu4e831902017-01-16 16:40:04 +08002828#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2829 int128_sub((size), int128_one())) : 0)
2830#define MTREE_INDENT " "
2831
Blue Swirl314e2982011-09-11 20:22:05 +00002832static void mtree_print_mr(fprintf_function mon_printf, void *f,
2833 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002834 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002835 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002836{
Jan Kiszka9479c572011-09-27 15:00:41 +02002837 MemoryRegionList *new_ml, *ml, *next_ml;
2838 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002839 const MemoryRegion *submr;
2840 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002841 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002842
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002843 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002844 return;
2845 }
2846
2847 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002848 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002849 }
2850
Peter Xub31f8412017-03-14 20:56:27 +08002851 cur_start = base + mr->addr;
2852 cur_end = cur_start + MR_SIZE(mr->size);
2853
2854 /*
2855 * Try to detect overflow of memory region. This should never
2856 * happen normally. When it happens, we dump something to warn the
2857 * user who is observing this.
2858 */
2859 if (cur_start < base || cur_end < cur_start) {
2860 mon_printf(f, "[DETECTED OVERFLOW!] ");
2861 }
2862
Blue Swirl314e2982011-09-11 20:22:05 +00002863 if (mr->alias) {
2864 MemoryRegionList *ml;
2865 bool found = false;
2866
2867 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002868 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002869 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002870 found = true;
2871 }
2872 }
2873
2874 if (!found) {
2875 ml = g_new(MemoryRegionList, 1);
2876 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002877 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002878 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002879 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002880 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002881 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002882 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002883 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002884 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002885 memory_region_name(mr),
2886 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002887 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002888 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002889 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002890 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002891 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002892 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002893 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002894 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002895 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002896 memory_region_name(mr),
2897 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002898 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002899
2900 QTAILQ_INIT(&submr_print_queue);
2901
Blue Swirl314e2982011-09-11 20:22:05 +00002902 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002903 new_ml = g_new(MemoryRegionList, 1);
2904 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002905 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002906 if (new_ml->mr->addr < ml->mr->addr ||
2907 (new_ml->mr->addr == ml->mr->addr &&
2908 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002909 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002910 new_ml = NULL;
2911 break;
2912 }
2913 }
2914 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002915 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002916 }
2917 }
2918
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002919 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002920 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002921 alias_print_queue);
2922 }
2923
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002924 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002925 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002926 }
2927}
2928
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002929struct FlatViewInfo {
2930 fprintf_function mon_printf;
2931 void *f;
2932 int counter;
2933 bool dispatch_tree;
2934};
2935
2936static void mtree_print_flatview(gpointer key, gpointer value,
2937 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08002938{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002939 FlatView *view = key;
2940 GArray *fv_address_spaces = value;
2941 struct FlatViewInfo *fvi = user_data;
2942 fprintf_function p = fvi->mon_printf;
2943 void *f = fvi->f;
Peter Xu57bb40c2017-01-16 16:40:05 +08002944 FlatRange *range = &view->ranges[0];
2945 MemoryRegion *mr;
2946 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002947 int i;
2948 AddressSpace *as;
2949
2950 p(f, "FlatView #%d\n", fvi->counter);
2951 ++fvi->counter;
2952
2953 for (i = 0; i < fv_address_spaces->len; ++i) {
2954 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2955 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2956 if (as->root->alias) {
2957 p(f, ", alias %s", memory_region_name(as->root->alias));
2958 }
2959 p(f, "\n");
2960 }
2961
2962 p(f, " Root memory region: %s\n",
2963 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08002964
2965 if (n <= 0) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002966 p(f, MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002967 return;
2968 }
2969
2970 while (n--) {
2971 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002972 if (range->offset_in_region) {
2973 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2974 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2975 int128_get64(range->addr.start),
2976 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2977 mr->priority,
2978 range->readonly ? "rom" : memory_region_type(mr),
2979 memory_region_name(mr),
2980 range->offset_in_region);
2981 } else {
2982 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2983 TARGET_FMT_plx " (prio %d, %s): %s\n",
2984 int128_get64(range->addr.start),
2985 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2986 mr->priority,
2987 range->readonly ? "rom" : memory_region_type(mr),
2988 memory_region_name(mr));
2989 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002990 range++;
2991 }
2992
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002993#if !defined(CONFIG_USER_ONLY)
2994 if (fvi->dispatch_tree && view->root) {
2995 mtree_print_dispatch(p, f, view->dispatch, view->root);
2996 }
2997#endif
2998
2999 p(f, "\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003000}
3001
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003002static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3003 gpointer user_data)
3004{
3005 FlatView *view = key;
3006 GArray *fv_address_spaces = value;
3007
3008 g_array_unref(fv_address_spaces);
3009 flatview_unref(view);
3010
3011 return true;
3012}
3013
3014void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3015 bool dispatch_tree)
Blue Swirl314e2982011-09-11 20:22:05 +00003016{
3017 MemoryRegionListHead ml_head;
3018 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003019 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003020
Peter Xu57bb40c2017-01-16 16:40:05 +08003021 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003022 FlatView *view;
3023 struct FlatViewInfo fvi = {
3024 .mon_printf = mon_printf,
3025 .f = f,
3026 .counter = 0,
3027 .dispatch_tree = dispatch_tree
3028 };
3029 GArray *fv_address_spaces;
3030 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3031
3032 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003033 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003034 view = address_space_get_flatview(as);
3035
3036 fv_address_spaces = g_hash_table_lookup(views, view);
3037 if (!fv_address_spaces) {
3038 fv_address_spaces = g_array_new(false, false, sizeof(as));
3039 g_hash_table_insert(views, view, fv_address_spaces);
3040 }
3041
3042 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003043 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003044
3045 /* Print */
3046 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3047
3048 /* Free */
3049 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3050 g_hash_table_unref(views);
3051
Peter Xu57bb40c2017-01-16 16:40:05 +08003052 return;
3053 }
3054
Blue Swirl314e2982011-09-11 20:22:05 +00003055 QTAILQ_INIT(&ml_head);
3056
Avi Kivity0d673e32012-10-02 15:28:50 +02003057 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003058 mon_printf(f, "address-space: %s\n", as->name);
3059 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3060 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003061 }
3062
Blue Swirl314e2982011-09-11 20:22:05 +00003063 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003064 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003065 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3066 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3067 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003068 }
3069
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003070 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003071 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003072 }
Blue Swirl314e2982011-09-11 20:22:05 +00003073}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003074
Peter Maydellb08199c2017-07-07 15:42:51 +01003075void memory_region_init_ram(MemoryRegion *mr,
3076 struct Object *owner,
3077 const char *name,
3078 uint64_t size,
3079 Error **errp)
3080{
3081 DeviceState *owner_dev;
3082 Error *err = NULL;
3083
3084 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3085 if (err) {
3086 error_propagate(errp, err);
3087 return;
3088 }
3089 /* This will assert if owner is neither NULL nor a DeviceState.
3090 * We only want the owner here for the purposes of defining a
3091 * unique name for migration. TODO: Ideally we should implement
3092 * a naming scheme for Objects which are not DeviceStates, in
3093 * which case we can relax this restriction.
3094 */
3095 owner_dev = DEVICE(owner);
3096 vmstate_register_ram(mr, owner_dev);
3097}
3098
3099void memory_region_init_rom(MemoryRegion *mr,
3100 struct Object *owner,
3101 const char *name,
3102 uint64_t size,
3103 Error **errp)
3104{
3105 DeviceState *owner_dev;
3106 Error *err = NULL;
3107
3108 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3109 if (err) {
3110 error_propagate(errp, err);
3111 return;
3112 }
3113 /* This will assert if owner is neither NULL nor a DeviceState.
3114 * We only want the owner here for the purposes of defining a
3115 * unique name for migration. TODO: Ideally we should implement
3116 * a naming scheme for Objects which are not DeviceStates, in
3117 * which case we can relax this restriction.
3118 */
3119 owner_dev = DEVICE(owner);
3120 vmstate_register_ram(mr, owner_dev);
3121}
3122
3123void memory_region_init_rom_device(MemoryRegion *mr,
3124 struct Object *owner,
3125 const MemoryRegionOps *ops,
3126 void *opaque,
3127 const char *name,
3128 uint64_t size,
3129 Error **errp)
3130{
3131 DeviceState *owner_dev;
3132 Error *err = NULL;
3133
3134 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3135 name, size, &err);
3136 if (err) {
3137 error_propagate(errp, err);
3138 return;
3139 }
3140 /* This will assert if owner is neither NULL nor a DeviceState.
3141 * We only want the owner here for the purposes of defining a
3142 * unique name for migration. TODO: Ideally we should implement
3143 * a naming scheme for Objects which are not DeviceStates, in
3144 * which case we can relax this restriction.
3145 */
3146 owner_dev = DEVICE(owner);
3147 vmstate_register_ram(mr, owner_dev);
3148}
3149
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003150static const TypeInfo memory_region_info = {
3151 .parent = TYPE_OBJECT,
3152 .name = TYPE_MEMORY_REGION,
3153 .instance_size = sizeof(MemoryRegion),
3154 .instance_init = memory_region_initfn,
3155 .instance_finalize = memory_region_finalize,
3156};
3157
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003158static const TypeInfo iommu_memory_region_info = {
3159 .parent = TYPE_MEMORY_REGION,
3160 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003161 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003162 .instance_size = sizeof(IOMMUMemoryRegion),
3163 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003164 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003165};
3166
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003167static void memory_register_types(void)
3168{
3169 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003170 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003171}
3172
3173type_init(memory_register_types)