blob: 676c298b606fd1ba438fa19b8a61cbdc921e82e5 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +020017#include "qemu/log.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010018#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010019#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070022#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010023#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030024#include "qemu/error-report.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +020025#include "qemu/main-loop.h"
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +020026#include "qemu/qemu-print.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040027#include "qom/object.h"
Philippe Mathieu-Daudé8b7a5502020-08-05 15:02:20 +020028#include "trace.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030029
Paolo Bonzini022c62c2012-12-17 18:19:49 +010030#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020031#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030032#include "sysemu/kvm.h"
Markus Armbruster54d31232019-08-12 07:23:59 +020033#include "sysemu/runstate.h"
Markus Armbruster14a48c12019-05-23 16:35:05 +020034#include "sysemu/tcg.h"
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +100035#include "sysemu/accel.h"
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +100036#include "hw/boards.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010037#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020038
Paolo Bonzinid1970632013-05-24 13:23:38 +020039//#define DEBUG_UNASSIGNED
40
Jan Kiszka22bde712012-11-05 16:45:56 +010041static unsigned memory_region_transaction_depth;
42static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080043static bool ioeventfd_update_pending;
Peter Xuae7a2bc2019-06-03 14:50:48 +080044bool global_dirty_log;
Avi Kivity7664e802011-12-11 14:47:25 +020045
Paolo Bonzinieae3eb32018-12-06 13:10:34 +010046static QTAILQ_HEAD(, MemoryListener) memory_listeners
Avi Kivity72e22d22012-02-08 15:05:50 +020047 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030048
Avi Kivity0d673e32012-10-02 15:28:50 +020049static QTAILQ_HEAD(, AddressSpace) address_spaces
50 = QTAILQ_HEAD_INITIALIZER(address_spaces);
51
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100052static GHashTable *flat_views;
53
Avi Kivity093bc2c2011-07-26 14:26:01 +030054typedef struct AddrRange AddrRange;
55
Avi Kivity8417ceb2011-08-03 11:56:14 +030056/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080057 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030058 * (large MemoryRegion::alias_offset).
59 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030060struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020061 Int128 start;
62 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030063};
64
Avi Kivity08dafab2011-10-16 13:19:17 +020065static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030066{
67 return (AddrRange) { start, size };
68}
69
70static bool addrrange_equal(AddrRange r1, AddrRange r2)
71{
Avi Kivity08dafab2011-10-16 13:19:17 +020072 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030073}
74
Avi Kivity08dafab2011-10-16 13:19:17 +020075static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030076{
Avi Kivity08dafab2011-10-16 13:19:17 +020077 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030078}
79
Avi Kivity08dafab2011-10-16 13:19:17 +020080static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030081{
Avi Kivity08dafab2011-10-16 13:19:17 +020082 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030083 return range;
84}
85
Avi Kivity08dafab2011-10-16 13:19:17 +020086static bool addrrange_contains(AddrRange range, Int128 addr)
87{
88 return int128_ge(addr, range.start)
89 && int128_lt(addr, addrrange_end(range));
90}
91
Avi Kivity093bc2c2011-07-26 14:26:01 +030092static bool addrrange_intersects(AddrRange r1, AddrRange r2)
93{
Avi Kivity08dafab2011-10-16 13:19:17 +020094 return addrrange_contains(r1, r2.start)
95 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030096}
97
98static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
99{
Avi Kivity08dafab2011-10-16 13:19:17 +0200100 Int128 start = int128_max(r1.start, r2.start);
101 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
102 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300103}
104
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200105enum ListenerDirection { Forward, Reverse };
106
Avi Kivity7376e582012-02-08 21:05:17 +0200107#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200108 do { \
109 MemoryListener *_listener; \
110 \
111 switch (_direction) { \
112 case Forward: \
113 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200114 if (_listener->_callback) { \
115 _listener->_callback(_listener, ##_args); \
116 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200117 } \
118 break; \
119 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100120 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200121 if (_listener->_callback) { \
122 _listener->_callback(_listener, ##_args); \
123 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200124 } \
125 break; \
126 default: \
127 abort(); \
128 } \
129 } while (0)
130
Paolo Bonzini9a546352016-09-22 16:23:06 +0200131#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200132 do { \
133 MemoryListener *_listener; \
134 \
135 switch (_direction) { \
136 case Forward: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100137 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200138 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200139 _listener->_callback(_listener, _section, ##_args); \
140 } \
141 } \
142 break; \
143 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100144 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200145 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200146 _listener->_callback(_listener, _section, ##_args); \
147 } \
148 } \
149 break; \
150 default: \
151 abort(); \
152 } \
153 } while (0)
154
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200155/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200156#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200157 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000158 MemoryRegionSection mrs = section_from_flat_range(fr, \
159 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200160 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200161 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200162
Avi Kivity093bc2c2011-07-26 14:26:01 +0300163struct CoalescedMemoryRange {
164 AddrRange addr;
165 QTAILQ_ENTRY(CoalescedMemoryRange) link;
166};
167
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300168struct MemoryRegionIoeventfd {
169 AddrRange addr;
170 bool match_data;
171 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200172 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300173};
174
Tristan Burgess73bb7532018-05-28 23:04:45 -0400175static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
176 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300177{
Tristan Burgess73bb7532018-05-28 23:04:45 -0400178 if (int128_lt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300179 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400180 } else if (int128_gt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300181 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400182 } else if (int128_lt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300183 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400184 } else if (int128_gt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300185 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400186 } else if (a->match_data < b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300187 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400188 } else if (a->match_data > b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300189 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400190 } else if (a->match_data) {
191 if (a->data < b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300192 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400193 } else if (a->data > b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300194 return false;
195 }
196 }
Tristan Burgess73bb7532018-05-28 23:04:45 -0400197 if (a->e < b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300198 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400199 } else if (a->e > b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300200 return false;
201 }
202 return false;
203}
204
Tristan Burgess73bb7532018-05-28 23:04:45 -0400205static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
206 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300207{
Elena Afanasovae6ffd752020-10-19 13:20:13 -0700208 if (int128_eq(a->addr.start, b->addr.start) &&
209 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
210 (int128_eq(a->addr.size, b->addr.size) &&
211 (a->match_data == b->match_data) &&
212 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
213 (a->e == b->e))))
214 return true;
215
216 return false;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300217}
218
Avi Kivity093bc2c2011-07-26 14:26:01 +0300219/* Range of memory in the global map. Addresses are absolute. */
220struct FlatRange {
221 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300223 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300224 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200225 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300226 bool readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400227 bool nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300228};
229
Avi Kivity093bc2c2011-07-26 14:26:01 +0300230#define FOR_EACH_FLAT_RANGE(var, view) \
231 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
232
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200233static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000234section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200235{
236 return (MemoryRegionSection) {
237 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000238 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200239 .offset_within_region = fr->offset_in_region,
240 .size = fr->addr.size,
241 .offset_within_address_space = int128_get64(fr->addr.start),
242 .readonly = fr->readonly,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400243 .nonvolatile = fr->nonvolatile,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200244 };
245}
246
Avi Kivity093bc2c2011-07-26 14:26:01 +0300247static bool flatrange_equal(FlatRange *a, FlatRange *b)
248{
249 return a->mr == b->mr
250 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300251 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200252 && a->romd_mode == b->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400253 && a->readonly == b->readonly
254 && a->nonvolatile == b->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300255}
256
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000257static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300258{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000259 FlatView *view;
260
261 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200262 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000263 view->root = mr_root;
264 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200265 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000266
267 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300268}
269
270/* Insert a range into a given position. Caller is responsible for maintaining
271 * sorting order.
272 */
273static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
274{
275 if (view->nr == view->nr_allocated) {
276 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500277 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300278 view->nr_allocated * sizeof(*view->ranges));
279 }
280 memmove(view->ranges + pos + 1, view->ranges + pos,
281 (view->nr - pos) * sizeof(FlatRange));
282 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200283 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300284 ++view->nr;
285}
286
287static void flatview_destroy(FlatView *view)
288{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200289 int i;
290
Paolo Bonzini02d96512017-09-21 12:34:00 +0200291 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000292 if (view->dispatch) {
293 address_space_dispatch_free(view->dispatch);
294 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200295 for (i = 0; i < view->nr; i++) {
296 memory_region_unref(view->ranges[i].mr);
297 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500298 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000299 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200300 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300301}
302
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200303static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200304{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100305 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200306}
307
Paolo Bonzini48564042018-03-18 18:26:36 +0100308void flatview_unref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200309{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100310 if (qatomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200311 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000312 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000313 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200314 }
315}
316
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300317static bool can_merge(FlatRange *r1, FlatRange *r2)
318{
Avi Kivity08dafab2011-10-16 13:19:17 +0200319 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300320 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200321 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
322 r1->addr.size),
323 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300324 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200325 && r1->romd_mode == r2->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400326 && r1->readonly == r2->readonly
327 && r1->nonvolatile == r2->nonvolatile;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300328}
329
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000330/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300331static void flatview_simplify(FlatView *view)
332{
King Wang838ec112019-07-12 14:52:41 +0800333 unsigned i, j, k;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300334
335 i = 0;
336 while (i < view->nr) {
337 j = i + 1;
338 while (j < view->nr
339 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200340 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300341 ++j;
342 }
343 ++i;
King Wang838ec112019-07-12 14:52:41 +0800344 for (k = i; k < j; k++) {
345 memory_region_unref(view->ranges[k].mr);
346 }
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300347 memmove(&view->ranges[i], &view->ranges[j],
348 (view->nr - j) * sizeof(view->ranges[j]));
349 view->nr -= j - i;
350 }
351}
352
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200353static bool memory_region_big_endian(MemoryRegion *mr)
354{
355#ifdef TARGET_WORDS_BIGENDIAN
356 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
357#else
358 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
359#endif
360}
361
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000362static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200363{
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000364 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
365 switch (op & MO_SIZE) {
366 case MO_8:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200367 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000368 case MO_16:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200369 *data = bswap16(*data);
370 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000371 case MO_32:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200372 *data = bswap32(*data);
373 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000374 case MO_64:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200375 *data = bswap64(*data);
376 break;
377 default:
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000378 g_assert_not_reached();
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200379 }
380 }
381}
382
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200383static inline void memory_region_shift_read_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200384 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200385 uint64_t mask,
386 uint64_t tmp)
387{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200388 if (shift >= 0) {
389 *value |= (tmp & mask) << shift;
390 } else {
391 *value |= (tmp & mask) >> -shift;
392 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200393}
394
395static inline uint64_t memory_region_shift_write_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200396 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200397 uint64_t mask)
398{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200399 uint64_t tmp;
400
401 if (shift >= 0) {
402 tmp = (*value >> shift) & mask;
403 } else {
404 tmp = (*value << -shift) & mask;
405 }
406
407 return tmp;
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200408}
409
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800410static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
411{
412 MemoryRegion *root;
413 hwaddr abs_addr = offset;
414
415 abs_addr += mr->addr;
416 for (root = mr; root->container; ) {
417 root = root->container;
418 abs_addr += root->addr;
419 }
420
421 return abs_addr;
422}
423
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800424static int get_cpu_index(void)
425{
426 if (current_cpu) {
427 return current_cpu->cpu_index;
428 }
429 return -1;
430}
431
Peter Maydellcc05c432015-04-26 16:49:23 +0100432static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
433 hwaddr addr,
434 uint64_t *value,
435 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200436 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100437 uint64_t mask,
438 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300439{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300440 uint64_t tmp;
441
442 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800443 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800444 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000445 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800446 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800447 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800448 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200449 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100450 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300451}
452
Peter Maydellcc05c432015-04-26 16:49:23 +0100453static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
454 hwaddr addr,
455 uint64_t *value,
456 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200457 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100458 uint64_t mask,
459 MemTxAttrs attrs)
460{
461 uint64_t tmp = 0;
462 MemTxResult r;
463
Peter Maydellcc05c432015-04-26 16:49:23 +0100464 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800465 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800466 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000467 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800468 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800469 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800470 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200471 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100472 return r;
473}
474
Peter Maydellcc05c432015-04-26 16:49:23 +0100475static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
476 hwaddr addr,
477 uint64_t *value,
478 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200479 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100480 uint64_t mask,
481 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300482{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200483 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Avi Kivity164a4dc2011-08-11 10:40:25 +0300484
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800485 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800486 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000487 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800488 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800489 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800490 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300491 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100492 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300493}
494
Peter Maydellcc05c432015-04-26 16:49:23 +0100495static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
496 hwaddr addr,
497 uint64_t *value,
498 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200499 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100500 uint64_t mask,
501 MemTxAttrs attrs)
502{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200503 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Peter Maydellcc05c432015-04-26 16:49:23 +0100504
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800505 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800506 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000507 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800508 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800509 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800510 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100511 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
512}
513
514static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300515 uint64_t *value,
516 unsigned size,
517 unsigned access_size_min,
518 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200519 MemTxResult (*access_fn)
520 (MemoryRegion *mr,
521 hwaddr addr,
522 uint64_t *value,
523 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200524 signed shift,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200525 uint64_t mask,
526 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100527 MemoryRegion *mr,
528 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300529{
530 uint64_t access_mask;
531 unsigned access_size;
532 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100533 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300534
535 if (!access_size_min) {
536 access_size_min = 1;
537 }
538 if (!access_size_max) {
539 access_size_max = 4;
540 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200541
542 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300543 access_size = MAX(MIN(size, access_size_max), access_size_min);
Philippe Mathieu-Daudé36960b42018-09-27 02:24:14 +0200544 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200545 if (memory_region_big_endian(mr)) {
546 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200547 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100548 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200549 }
550 } else {
551 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200552 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100553 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200554 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300555 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100556 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300557}
558
Avi Kivitye2177952011-12-08 15:00:18 +0200559static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
560{
Avi Kivity0d673e32012-10-02 15:28:50 +0200561 AddressSpace *as;
562
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200563 while (mr->container) {
564 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200565 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200566 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
567 if (mr == as->root) {
568 return as;
569 }
Avi Kivitye2177952011-12-08 15:00:18 +0200570 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200571 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200572}
573
Avi Kivity093bc2c2011-07-26 14:26:01 +0300574/* Render a memory region into the global view. Ranges in @view obscure
575 * ranges in @mr.
576 */
577static void render_memory_region(FlatView *view,
578 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200579 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300580 AddrRange clip,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400581 bool readonly,
582 bool nonvolatile)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300583{
584 MemoryRegion *subregion;
585 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200586 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200587 Int128 remain;
588 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300589 FlatRange fr;
590 AddrRange tmp;
591
Avi Kivity6bba19b2011-09-14 11:54:58 +0300592 if (!mr->enabled) {
593 return;
594 }
595
Avi Kivity08dafab2011-10-16 13:19:17 +0200596 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300597 readonly |= mr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400598 nonvolatile |= mr->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300599
600 tmp = addrrange_make(base, mr->size);
601
602 if (!addrrange_intersects(tmp, clip)) {
603 return;
604 }
605
606 clip = addrrange_intersection(tmp, clip);
607
608 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200609 int128_subfrom(&base, int128_make64(mr->alias->addr));
610 int128_subfrom(&base, int128_make64(mr->alias_offset));
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400611 render_memory_region(view, mr->alias, base, clip,
612 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300613 return;
614 }
615
616 /* Render subregions in priority order. */
617 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400618 render_memory_region(view, subregion, base, clip,
619 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300620 }
621
Avi Kivity14a3c102011-07-26 14:26:06 +0300622 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300623 return;
624 }
625
Avi Kivity08dafab2011-10-16 13:19:17 +0200626 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300627 base = clip.start;
628 remain = clip.size;
629
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000630 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100631 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200632 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000633 fr.readonly = readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400634 fr.nonvolatile = nonvolatile;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000635
Avi Kivity093bc2c2011-07-26 14:26:01 +0300636 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200637 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
638 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300639 continue;
640 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200641 if (int128_lt(base, view->ranges[i].addr.start)) {
642 now = int128_min(remain,
643 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300644 fr.offset_in_region = offset_in_region;
645 fr.addr = addrrange_make(base, now);
646 flatview_insert(view, i, &fr);
647 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200648 int128_addto(&base, now);
649 offset_in_region += int128_get64(now);
650 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300651 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200652 now = int128_sub(int128_min(int128_add(base, remain),
653 addrrange_end(view->ranges[i].addr)),
654 base);
655 int128_addto(&base, now);
656 offset_in_region += int128_get64(now);
657 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300658 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200659 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300660 fr.offset_in_region = offset_in_region;
661 fr.addr = addrrange_make(base, remain);
662 flatview_insert(view, i, &fr);
663 }
664}
665
Alexander Bulekovfb5ef4e2020-10-23 11:07:30 -0400666void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
667{
668 FlatRange *fr;
669
670 assert(fv);
671 assert(cb);
672
673 FOR_EACH_FLAT_RANGE(fr, fv) {
674 if (cb(fr->addr.start, fr->addr.size, fr->mr, opaque))
675 break;
676 }
677}
678
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000679static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
680{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200681 while (mr->enabled) {
682 if (mr->alias) {
683 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
684 /* The alias is included in its entirety. Use it as
685 * the "real" root, so that we can share more FlatViews.
686 */
687 mr = mr->alias;
688 continue;
689 }
690 } else if (!mr->terminates) {
691 unsigned int found = 0;
692 MemoryRegion *child, *next = NULL;
693 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
694 if (child->enabled) {
695 if (++found > 1) {
696 next = NULL;
697 break;
698 }
699 if (!child->addr && int128_ge(mr->size, child->size)) {
700 /* A child is included in its entirety. If it's the only
701 * enabled one, use it in the hope of finding an alias down the
702 * way. This will also let us share FlatViews.
703 */
704 next = child;
705 }
706 }
707 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000708 if (found == 0) {
709 return NULL;
710 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200711 if (next) {
712 mr = next;
713 continue;
714 }
715 }
716
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000717 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000718 }
719
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000720 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000721}
722
Avi Kivity093bc2c2011-07-26 14:26:01 +0300723/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200724static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300725{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000726 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200727 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300728
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000729 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300730
Avi Kivity83f3c252012-10-07 12:59:55 +0200731 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200732 render_memory_region(view, mr, int128_zero(),
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400733 addrrange_make(int128_zero(), int128_2_64()),
734 false, false);
Avi Kivity83f3c252012-10-07 12:59:55 +0200735 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200736 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300737
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000738 view->dispatch = address_space_dispatch_new(view);
739 for (i = 0; i < view->nr; i++) {
740 MemoryRegionSection mrs =
741 section_from_flat_range(&view->ranges[i], view);
742 flatview_add_to_dispatch(view, &mrs);
743 }
744 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000745 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000746
Avi Kivity093bc2c2011-07-26 14:26:01 +0300747 return view;
748}
749
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300750static void address_space_add_del_ioeventfds(AddressSpace *as,
751 MemoryRegionIoeventfd *fds_new,
752 unsigned fds_new_nb,
753 MemoryRegionIoeventfd *fds_old,
754 unsigned fds_old_nb)
755{
756 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200757 MemoryRegionIoeventfd *fd;
758 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300759
760 /* Generate a symmetric difference of the old and new fd sets, adding
761 * and deleting as necessary.
762 */
763
764 iold = inew = 0;
765 while (iold < fds_old_nb || inew < fds_new_nb) {
766 if (iold < fds_old_nb
767 && (inew == fds_new_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400768 || memory_region_ioeventfd_before(&fds_old[iold],
769 &fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200770 fd = &fds_old[iold];
771 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000772 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200773 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200774 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200775 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200776 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200777 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300778 ++iold;
779 } else if (inew < fds_new_nb
780 && (iold == fds_old_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400781 || memory_region_ioeventfd_before(&fds_new[inew],
782 &fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200783 fd = &fds_new[inew];
784 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000785 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200786 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200787 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200788 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200789 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200790 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300791 ++inew;
792 } else {
793 ++iold;
794 ++inew;
795 }
796 }
797}
798
Paolo Bonzini48564042018-03-18 18:26:36 +0100799FlatView *address_space_get_flatview(AddressSpace *as)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200800{
801 FlatView *view;
802
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +0100803 RCU_READ_LOCK_GUARD();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200804 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000805 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200806 /* If somebody has replaced as->current_map concurrently,
807 * flatview_ref returns false.
808 */
809 } while (!flatview_ref(view));
Paolo Bonzini856d7242013-05-06 11:57:21 +0200810 return view;
811}
812
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300813static void address_space_update_ioeventfds(AddressSpace *as)
814{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200815 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300816 FlatRange *fr;
817 unsigned ioeventfd_nb = 0;
Stefan Hajnoczi920d5572020-02-18 18:22:26 +0000818 unsigned ioeventfd_max;
819 MemoryRegionIoeventfd *ioeventfds;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300820 AddrRange tmp;
821 unsigned i;
822
Stefan Hajnoczi920d5572020-02-18 18:22:26 +0000823 /*
824 * It is likely that the number of ioeventfds hasn't changed much, so use
825 * the previous size as the starting value, with some headroom to avoid
826 * gratuitous reallocations.
827 */
828 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
829 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
830
Paolo Bonzini856d7242013-05-06 11:57:21 +0200831 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200832 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300833 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
834 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200835 int128_sub(fr->addr.start,
836 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300837 if (addrrange_intersects(fr->addr, tmp)) {
838 ++ioeventfd_nb;
Stefan Hajnoczi920d5572020-02-18 18:22:26 +0000839 if (ioeventfd_nb > ioeventfd_max) {
840 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
841 ioeventfds = g_realloc(ioeventfds,
842 ioeventfd_max * sizeof(*ioeventfds));
843 }
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300844 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
845 ioeventfds[ioeventfd_nb-1].addr = tmp;
846 }
847 }
848 }
849
850 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
851 as->ioeventfds, as->ioeventfd_nb);
852
Anthony Liguori7267c092011-08-20 22:09:37 -0500853 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300854 as->ioeventfds = ioeventfds;
855 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200856 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300857}
858
Peter Xu23f11742019-08-20 22:13:25 +0800859/*
860 * Notify the memory listeners about the coalesced IO change events of
861 * range `cmr'. Only the part that has intersection of the specified
862 * FlatRange will be sent.
863 */
864static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
865 CoalescedMemoryRange *cmr, bool add)
866{
867 AddrRange tmp;
868
869 tmp = addrrange_shift(cmr->addr,
870 int128_sub(fr->addr.start,
871 int128_make64(fr->offset_in_region)));
872 if (!addrrange_intersects(tmp, fr->addr)) {
873 return;
874 }
875 tmp = addrrange_intersection(tmp, fr->addr);
876
877 if (add) {
878 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
879 int128_get64(tmp.start),
880 int128_get64(tmp.size));
881 } else {
882 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
883 int128_get64(tmp.start),
884 int128_get64(tmp.size));
885 }
886}
887
Paolo Bonzini909bf762018-11-28 10:42:06 +0100888static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
889{
Peter Xu23f11742019-08-20 22:13:25 +0800890 CoalescedMemoryRange *cmr;
891
Peter Xu23f11742019-08-20 22:13:25 +0800892 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
893 flat_range_coalesced_io_notify(fr, as, cmr, false);
894 }
Paolo Bonzini909bf762018-11-28 10:42:06 +0100895}
896
897static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
898{
899 MemoryRegion *mr = fr->mr;
900 CoalescedMemoryRange *cmr;
Paolo Bonzini909bf762018-11-28 10:42:06 +0100901
Paolo Bonzini1f7af802018-11-28 17:29:45 +0100902 if (QTAILQ_EMPTY(&mr->coalesced)) {
903 return;
904 }
905
Paolo Bonzini909bf762018-11-28 10:42:06 +0100906 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
Peter Xu23f11742019-08-20 22:13:25 +0800907 flat_range_coalesced_io_notify(fr, as, cmr, true);
Paolo Bonzini909bf762018-11-28 10:42:06 +0100908 }
909}
910
Avi Kivityb8af1af2011-07-26 14:26:12 +0300911static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200912 const FlatView *old_view,
913 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300914 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300915{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300916 unsigned iold, inew;
917 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300918
919 /* Generate a symmetric difference of the old and new memory maps.
920 * Kill ranges in the old map, and instantiate ranges in the new map.
921 */
922 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200923 while (iold < old_view->nr || inew < new_view->nr) {
924 if (iold < old_view->nr) {
925 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300926 } else {
927 frold = NULL;
928 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200929 if (inew < new_view->nr) {
930 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300931 } else {
932 frnew = NULL;
933 }
934
935 if (frold
936 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200937 || int128_lt(frold->addr.start, frnew->addr.start)
938 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300939 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000940 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300941
Avi Kivityb8af1af2011-07-26 14:26:12 +0300942 if (!adding) {
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100943 flat_range_coalesced_io_del(frold, as);
Avi Kivity72e22d22012-02-08 15:05:50 +0200944 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300945 }
946
Avi Kivity093bc2c2011-07-26 14:26:01 +0300947 ++iold;
948 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000949 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300950
Jagannathan Raman4f826022019-02-05 17:50:19 -0500951 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200952 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200953 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
954 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
955 frold->dirty_log_mask,
956 frnew->dirty_log_mask);
957 }
958 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
959 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
960 frold->dirty_log_mask,
961 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300962 }
Avi Kivity5a583342011-07-26 14:26:02 +0300963 }
964
Avi Kivity093bc2c2011-07-26 14:26:01 +0300965 ++iold;
966 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300967 } else {
968 /* In new */
969
Avi Kivityb8af1af2011-07-26 14:26:12 +0300970 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200971 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100972 flat_range_coalesced_io_add(frnew, as);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300973 }
974
Avi Kivity093bc2c2011-07-26 14:26:01 +0300975 ++inew;
976 }
977 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300978}
979
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000980static void flatviews_init(void)
981{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000982 static FlatView *empty_view;
983
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000984 if (flat_views) {
985 return;
986 }
987
988 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
989 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000990 if (!empty_view) {
991 empty_view = generate_memory_topology(NULL);
992 /* We keep it alive forever in the global variable. */
993 flatview_ref(empty_view);
994 } else {
995 g_hash_table_replace(flat_views, NULL, empty_view);
996 flatview_ref(empty_view);
997 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000998}
999
1000static void flatviews_reset(void)
1001{
1002 AddressSpace *as;
1003
1004 if (flat_views) {
1005 g_hash_table_unref(flat_views);
1006 flat_views = NULL;
1007 }
1008 flatviews_init();
1009
1010 /* Render unique FVs */
1011 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1012 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1013
1014 if (g_hash_table_lookup(flat_views, physmr)) {
1015 continue;
1016 }
1017
1018 generate_memory_topology(physmr);
1019 }
1020}
1021
1022static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +03001023{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001024 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001025 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1026 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1027
1028 assert(new_view);
1029
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001030 if (old_view == new_view) {
1031 return;
1032 }
1033
1034 if (old_view) {
1035 flatview_ref(old_view);
1036 }
1037
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001038 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001039
1040 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001041 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1042
1043 if (!old_view2) {
1044 old_view2 = &tmpview;
1045 }
1046 address_space_update_topology_pass(as, old_view2, new_view, false);
1047 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001048 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001049
Paolo Bonzini374f2982013-05-17 12:37:03 +02001050 /* Writes are protected by the BQL. */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +01001051 qatomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001052 if (old_view) {
1053 flatview_unref(old_view);
1054 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001055
1056 /* Note that all the old MemoryRegions are still alive up to this
1057 * point. This relieves most MemoryListeners from the need to
1058 * ref/unref the MemoryRegions they get---unless they use them
1059 * outside the iothread mutex, in which case precise reference
1060 * counting is necessary.
1061 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001062 if (old_view) {
1063 flatview_unref(old_view);
1064 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001065}
1066
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001067static void address_space_update_topology(AddressSpace *as)
1068{
1069 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1070
1071 flatviews_init();
1072 if (!g_hash_table_lookup(flat_views, physmr)) {
1073 generate_memory_topology(physmr);
1074 }
1075 address_space_set_flatview(as);
1076}
1077
Avi Kivity4ef4db82011-07-26 14:26:13 +03001078void memory_region_transaction_begin(void)
1079{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001080 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001081 ++memory_region_transaction_depth;
1082}
1083
1084void memory_region_transaction_commit(void)
1085{
Avi Kivity0d673e32012-10-02 15:28:50 +02001086 AddressSpace *as;
1087
Avi Kivity4ef4db82011-07-26 14:26:13 +03001088 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001089 assert(qemu_mutex_iothread_locked());
1090
Avi Kivity4ef4db82011-07-26 14:26:13 +03001091 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001092 if (!memory_region_transaction_depth) {
1093 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001094 flatviews_reset();
1095
Gonglei4dc56152014-05-08 11:47:32 +08001096 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001097
Gonglei4dc56152014-05-08 11:47:32 +08001098 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001099 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001100 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001101 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001102 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001103 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001104 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1105 } else if (ioeventfd_update_pending) {
1106 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1107 address_space_update_ioeventfds(as);
1108 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001109 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001110 }
Gonglei4dc56152014-05-08 11:47:32 +08001111 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001112}
1113
Avi Kivity545e92e2011-08-08 19:58:48 +03001114static void memory_region_destructor_none(MemoryRegion *mr)
1115{
1116}
1117
1118static void memory_region_destructor_ram(MemoryRegion *mr)
1119{
Fam Zhengf1060c52016-03-01 14:18:22 +08001120 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001121}
1122
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001123static bool memory_region_need_escape(char c)
1124{
1125 return c == '/' || c == '[' || c == '\\' || c == ']';
1126}
1127
1128static char *memory_region_escape_name(const char *name)
1129{
1130 const char *p;
1131 char *escaped, *q;
1132 uint8_t c;
1133 size_t bytes = 0;
1134
1135 for (p = name; *p; p++) {
1136 bytes += memory_region_need_escape(*p) ? 4 : 1;
1137 }
1138 if (bytes == p - name) {
1139 return g_memdup(name, bytes + 1);
1140 }
1141
1142 escaped = g_malloc(bytes + 1);
1143 for (p = name, q = escaped; *p; p++) {
1144 c = *p;
1145 if (unlikely(memory_region_need_escape(c))) {
1146 *q++ = '\\';
1147 *q++ = 'x';
1148 *q++ = "0123456789abcdef"[c >> 4];
1149 c = "0123456789abcdef"[c & 15];
1150 }
1151 *q++ = c;
1152 }
1153 *q = 0;
1154 return escaped;
1155}
1156
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001157static void memory_region_do_init(MemoryRegion *mr,
1158 Object *owner,
1159 const char *name,
1160 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001161{
Avi Kivity08dafab2011-10-16 13:19:17 +02001162 mr->size = int128_make64(size);
1163 if (size == UINT64_MAX) {
1164 mr->size = int128_2_64();
1165 }
Peter Maydell302fa282014-08-19 20:05:46 +01001166 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001167 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001168 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001169
1170 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001171 char *escaped_name = memory_region_escape_name(name);
1172 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001173
1174 if (!owner) {
1175 owner = container_get(qdev_get_machine(), "/unattached");
1176 }
1177
Markus Armbrusterd2623122020-05-05 17:29:22 +02001178 object_property_add_child(owner, name_array, OBJECT(mr));
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001179 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001180 g_free(name_array);
1181 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001182 }
1183}
1184
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001185void memory_region_init(MemoryRegion *mr,
1186 Object *owner,
1187 const char *name,
1188 uint64_t size)
1189{
1190 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1191 memory_region_do_init(mr, owner, name, size);
1192}
1193
Eric Blaked7bce992016-01-29 06:48:55 -07001194static void memory_region_get_container(Object *obj, Visitor *v,
1195 const char *name, void *opaque,
1196 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001197{
1198 MemoryRegion *mr = MEMORY_REGION(obj);
Markus Armbrusterddfb0ba2020-05-05 17:29:10 +02001199 char *path = (char *)"";
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001200
1201 if (mr->container) {
1202 path = object_get_canonical_path(OBJECT(mr->container));
1203 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001204 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001205 if (mr->container) {
1206 g_free(path);
1207 }
1208}
1209
1210static Object *memory_region_resolve_container(Object *obj, void *opaque,
1211 const char *part)
1212{
1213 MemoryRegion *mr = MEMORY_REGION(obj);
1214
1215 return OBJECT(mr->container);
1216}
1217
Eric Blaked7bce992016-01-29 06:48:55 -07001218static void memory_region_get_priority(Object *obj, Visitor *v,
1219 const char *name, void *opaque,
1220 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001221{
1222 MemoryRegion *mr = MEMORY_REGION(obj);
1223 int32_t value = mr->priority;
1224
Eric Blake51e72bc2016-01-29 06:48:54 -07001225 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001226}
1227
Eric Blaked7bce992016-01-29 06:48:55 -07001228static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1229 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001230{
1231 MemoryRegion *mr = MEMORY_REGION(obj);
1232 uint64_t value = memory_region_size(mr);
1233
Eric Blake51e72bc2016-01-29 06:48:54 -07001234 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001235}
1236
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001237static void memory_region_initfn(Object *obj)
1238{
1239 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001240 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001241
1242 mr->ops = &unassigned_mem_ops;
1243 mr->enabled = true;
1244 mr->romd_mode = true;
1245 mr->destructor = memory_region_destructor_none;
1246 QTAILQ_INIT(&mr->subregions);
1247 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001248
1249 op = object_property_add(OBJECT(mr), "container",
1250 "link<" TYPE_MEMORY_REGION ">",
1251 memory_region_get_container,
1252 NULL, /* memory_region_set_container */
Markus Armbrusterd2623122020-05-05 17:29:22 +02001253 NULL, NULL);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001254 op->resolve = memory_region_resolve_container;
1255
Felipe Franciosi64a7b8d2020-02-04 13:16:01 +00001256 object_property_add_uint64_ptr(OBJECT(mr), "addr",
Markus Armbrusterd2623122020-05-05 17:29:22 +02001257 &mr->addr, OBJ_PROP_FLAG_READ);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001258 object_property_add(OBJECT(mr), "priority", "uint32",
1259 memory_region_get_priority,
1260 NULL, /* memory_region_set_priority */
Markus Armbrusterd2623122020-05-05 17:29:22 +02001261 NULL, NULL);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001262 object_property_add(OBJECT(mr), "size", "uint64",
1263 memory_region_get_size,
1264 NULL, /* memory_region_set_size, */
Markus Armbrusterd2623122020-05-05 17:29:22 +02001265 NULL, NULL);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001266}
1267
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001268static void iommu_memory_region_initfn(Object *obj)
1269{
1270 MemoryRegion *mr = MEMORY_REGION(obj);
1271
1272 mr->is_iommu = true;
1273}
1274
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001275static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1276 unsigned size)
1277{
1278#ifdef DEBUG_UNASSIGNED
1279 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1280#endif
Jan Kiszka68a74392013-09-02 18:43:31 +02001281 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001282}
1283
1284static void unassigned_mem_write(void *opaque, hwaddr addr,
1285 uint64_t val, unsigned size)
1286{
1287#ifdef DEBUG_UNASSIGNED
1288 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1289#endif
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001290}
1291
Paolo Bonzinid1970632013-05-24 13:23:38 +02001292static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
Peter Maydell8372d382018-05-31 14:50:52 +01001293 unsigned size, bool is_write,
1294 MemTxAttrs attrs)
Paolo Bonzinid1970632013-05-24 13:23:38 +02001295{
1296 return false;
1297}
1298
1299const MemoryRegionOps unassigned_mem_ops = {
1300 .valid.accepts = unassigned_mem_accepts,
1301 .endianness = DEVICE_NATIVE_ENDIAN,
1302};
1303
Alex Williamson4a2e2422016-10-31 09:53:03 -06001304static uint64_t memory_region_ram_device_read(void *opaque,
1305 hwaddr addr, unsigned size)
1306{
1307 MemoryRegion *mr = opaque;
1308 uint64_t data = (uint64_t)~0;
1309
1310 switch (size) {
1311 case 1:
1312 data = *(uint8_t *)(mr->ram_block->host + addr);
1313 break;
1314 case 2:
1315 data = *(uint16_t *)(mr->ram_block->host + addr);
1316 break;
1317 case 4:
1318 data = *(uint32_t *)(mr->ram_block->host + addr);
1319 break;
1320 case 8:
1321 data = *(uint64_t *)(mr->ram_block->host + addr);
1322 break;
1323 }
1324
1325 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1326
1327 return data;
1328}
1329
1330static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1331 uint64_t data, unsigned size)
1332{
1333 MemoryRegion *mr = opaque;
1334
1335 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1336
1337 switch (size) {
1338 case 1:
1339 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1340 break;
1341 case 2:
1342 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1343 break;
1344 case 4:
1345 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1346 break;
1347 case 8:
1348 *(uint64_t *)(mr->ram_block->host + addr) = data;
1349 break;
1350 }
1351}
1352
1353static const MemoryRegionOps ram_device_mem_ops = {
1354 .read = memory_region_ram_device_read,
1355 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001356 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001357 .valid = {
1358 .min_access_size = 1,
1359 .max_access_size = 8,
1360 .unaligned = true,
1361 },
1362 .impl = {
1363 .min_access_size = 1,
1364 .max_access_size = 8,
1365 .unaligned = true,
1366 },
1367};
1368
Paolo Bonzinid2702032013-05-24 11:55:06 +02001369bool memory_region_access_valid(MemoryRegion *mr,
1370 hwaddr addr,
1371 unsigned size,
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001372 bool is_write,
1373 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001374{
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001375 if (mr->ops->valid.accepts
1376 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +02001377 qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
1378 "0x%" HWADDR_PRIX ", size %u, "
1379 "region '%s', reason: rejected\n",
1380 addr, size, memory_region_name(mr));
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001381 return false;
1382 }
Avi Kivity897fa7c2011-11-13 13:05:27 +02001383
Avi Kivity093bc2c2011-07-26 14:26:01 +03001384 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +02001385 qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
1386 "0x%" HWADDR_PRIX ", size %u, "
1387 "region '%s', reason: unaligned\n",
1388 addr, size, memory_region_name(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001389 return false;
1390 }
1391
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001392 /* Treat zero as compatibility all valid */
1393 if (!mr->ops->valid.max_access_size) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001394 return true;
1395 }
1396
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001397 if (size > mr->ops->valid.max_access_size
1398 || size < mr->ops->valid.min_access_size) {
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +02001399 qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
1400 "0x%" HWADDR_PRIX ", size %u, "
1401 "region '%s', reason: invalid size "
1402 "(min:%u max:%u)\n",
1403 addr, size, memory_region_name(mr),
1404 mr->ops->valid.min_access_size,
1405 mr->ops->valid.max_access_size);
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001406 return false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001407 }
1408 return true;
1409}
1410
Peter Maydellcc05c432015-04-26 16:49:23 +01001411static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1412 hwaddr addr,
1413 uint64_t *pval,
1414 unsigned size,
1415 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001416{
Peter Maydellcc05c432015-04-26 16:49:23 +01001417 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001418
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001419 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001420 return access_with_adjusted_size(addr, pval, size,
1421 mr->ops->impl.min_access_size,
1422 mr->ops->impl.max_access_size,
1423 memory_region_read_accessor,
1424 mr, attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001425 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001426 return access_with_adjusted_size(addr, pval, size,
1427 mr->ops->impl.min_access_size,
1428 mr->ops->impl.max_access_size,
1429 memory_region_read_with_attrs_accessor,
1430 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001431 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001432}
1433
Peter Maydell3b643492015-04-26 16:49:23 +01001434MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1435 hwaddr addr,
1436 uint64_t *pval,
Tony Nguyene67c9042019-08-24 04:36:48 +10001437 MemOp op,
Peter Maydell3b643492015-04-26 16:49:23 +01001438 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001439{
Tony Nguyene67c9042019-08-24 04:36:48 +10001440 unsigned size = memop_size(op);
Peter Maydellcc05c432015-04-26 16:49:23 +01001441 MemTxResult r;
1442
Alexander Bulekova3c20e92020-10-23 11:07:35 -04001443 fuzz_dma_read_cb(addr, size, mr, false);
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001444 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001445 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001446 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001447 }
Avi Kivitya621f382012-01-02 13:12:08 +02001448
Peter Maydellcc05c432015-04-26 16:49:23 +01001449 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Tony Nguyen9bf825b2019-08-24 04:36:54 +10001450 adjust_endianness(mr, pval, op);
Peter Maydellcc05c432015-04-26 16:49:23 +01001451 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001452}
1453
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001454/* Return true if an eventfd was signalled */
1455static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1456 hwaddr addr,
1457 uint64_t data,
1458 unsigned size,
1459 MemTxAttrs attrs)
1460{
1461 MemoryRegionIoeventfd ioeventfd = {
1462 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1463 .data = data,
1464 };
1465 unsigned i;
1466
1467 for (i = 0; i < mr->ioeventfd_nb; i++) {
1468 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1469 ioeventfd.e = mr->ioeventfds[i].e;
1470
Tristan Burgess73bb7532018-05-28 23:04:45 -04001471 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001472 event_notifier_set(ioeventfd.e);
1473 return true;
1474 }
1475 }
1476
1477 return false;
1478}
1479
Peter Maydell3b643492015-04-26 16:49:23 +01001480MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1481 hwaddr addr,
1482 uint64_t data,
Tony Nguyene67c9042019-08-24 04:36:48 +10001483 MemOp op,
Peter Maydell3b643492015-04-26 16:49:23 +01001484 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001485{
Tony Nguyene67c9042019-08-24 04:36:48 +10001486 unsigned size = memop_size(op);
1487
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001488 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001489 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001490 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001491 }
1492
Tony Nguyen9bf825b2019-08-24 04:36:54 +10001493 adjust_endianness(mr, &data, op);
Avi Kivitya621f382012-01-02 13:12:08 +02001494
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001495 if ((!kvm_eventfds_enabled()) &&
1496 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1497 return MEMTX_OK;
1498 }
1499
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001500 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001501 return access_with_adjusted_size(addr, &data, size,
1502 mr->ops->impl.min_access_size,
1503 mr->ops->impl.max_access_size,
1504 memory_region_write_accessor, mr,
1505 attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001506 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001507 return
1508 access_with_adjusted_size(addr, &data, size,
1509 mr->ops->impl.min_access_size,
1510 mr->ops->impl.max_access_size,
1511 memory_region_write_with_attrs_accessor,
1512 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001513 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001514}
1515
Avi Kivity093bc2c2011-07-26 14:26:01 +03001516void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001517 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001518 const MemoryRegionOps *ops,
1519 void *opaque,
1520 const char *name,
1521 uint64_t size)
1522{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001523 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001524 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001525 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001526 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001527}
1528
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001529void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1530 Object *owner,
1531 const char *name,
1532 uint64_t size,
1533 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001534{
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001535 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1536}
1537
1538void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1539 Object *owner,
1540 const char *name,
1541 uint64_t size,
1542 bool share,
1543 Error **errp)
1544{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001545 Error *err = NULL;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001546 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001547 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001548 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001549 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001550 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001551 if (err) {
1552 mr->size = int128_zero();
1553 object_unparent(OBJECT(mr));
1554 error_propagate(errp, err);
1555 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001556}
1557
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001558void memory_region_init_resizeable_ram(MemoryRegion *mr,
1559 Object *owner,
1560 const char *name,
1561 uint64_t size,
1562 uint64_t max_size,
1563 void (*resized)(const char*,
1564 uint64_t length,
1565 void *host),
1566 Error **errp)
1567{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001568 Error *err = NULL;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001569 memory_region_init(mr, owner, name, size);
1570 mr->ram = true;
1571 mr->terminates = true;
1572 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001573 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001574 mr, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001575 if (err) {
1576 mr->size = int128_zero();
1577 object_unparent(OBJECT(mr));
1578 error_propagate(errp, err);
1579 }
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001580}
1581
Hikaru Nishidad5dbde42018-09-24 21:32:05 +09001582#ifdef CONFIG_POSIX
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001583void memory_region_init_ram_from_file(MemoryRegion *mr,
1584 struct Object *owner,
1585 const char *name,
1586 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001587 uint64_t align,
Junyan Hecbfc0172018-07-18 15:47:58 +08001588 uint32_t ram_flags,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001589 const char *path,
Stefan Hajnoczi369d6dc2021-01-04 17:13:18 +00001590 bool readonly,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001591 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001592{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001593 Error *err = NULL;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001594 memory_region_init(mr, owner, name, size);
1595 mr->ram = true;
Stefan Hajnoczi369d6dc2021-01-04 17:13:18 +00001596 mr->readonly = readonly;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001597 mr->terminates = true;
1598 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001599 mr->align = align;
Stefan Hajnoczi369d6dc2021-01-04 17:13:18 +00001600 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1601 readonly, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001602 if (err) {
1603 mr->size = int128_zero();
1604 object_unparent(OBJECT(mr));
1605 error_propagate(errp, err);
1606 }
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001607}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001608
1609void memory_region_init_ram_from_fd(MemoryRegion *mr,
1610 struct Object *owner,
1611 const char *name,
1612 uint64_t size,
1613 bool share,
1614 int fd,
1615 Error **errp)
1616{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001617 Error *err = NULL;
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001618 memory_region_init(mr, owner, name, size);
1619 mr->ram = true;
1620 mr->terminates = true;
1621 mr->destructor = memory_region_destructor_ram;
Junyan Hecbfc0172018-07-18 15:47:58 +08001622 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1623 share ? RAM_SHARED : 0,
Stefan Hajnoczi369d6dc2021-01-04 17:13:18 +00001624 fd, false, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001625 if (err) {
1626 mr->size = int128_zero();
1627 object_unparent(OBJECT(mr));
1628 error_propagate(errp, err);
1629 }
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001630}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001631#endif
1632
Avi Kivity093bc2c2011-07-26 14:26:01 +03001633void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001634 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001635 const char *name,
1636 uint64_t size,
1637 void *ptr)
1638{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001639 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001640 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001641 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001642 mr->destructor = memory_region_destructor_ram;
Hu Taoef701d72014-09-09 13:27:54 +08001643
1644 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1645 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001646 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001647}
1648
Alex Williamson21e00fa2016-10-31 09:53:03 -06001649void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1650 Object *owner,
1651 const char *name,
1652 uint64_t size,
1653 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301654{
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001655 memory_region_init(mr, owner, name, size);
1656 mr->ram = true;
1657 mr->terminates = true;
Alex Williamson21e00fa2016-10-31 09:53:03 -06001658 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001659 mr->ops = &ram_device_mem_ops;
1660 mr->opaque = mr;
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001661 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini0a2949e2020-10-28 03:52:01 -04001662
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001663 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1664 assert(ptr != NULL);
1665 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301666}
1667
Avi Kivity093bc2c2011-07-26 14:26:01 +03001668void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001669 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001670 const char *name,
1671 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001672 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001673 uint64_t size)
1674{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001675 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001676 mr->alias = orig;
1677 mr->alias_offset = offset;
1678}
1679
Peter Maydellb59821a2017-07-07 15:42:50 +01001680void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1681 struct Object *owner,
1682 const char *name,
1683 uint64_t size,
1684 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001685{
Philippe Mathieu-Daudé83696c82020-02-24 10:58:17 +01001686 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
Peter Maydella1777f72016-07-04 13:06:35 +01001687 mr->readonly = true;
Peter Maydella1777f72016-07-04 13:06:35 +01001688}
1689
Peter Maydellb59821a2017-07-07 15:42:50 +01001690void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1691 Object *owner,
1692 const MemoryRegionOps *ops,
1693 void *opaque,
1694 const char *name,
1695 uint64_t size,
1696 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001697{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001698 Error *err = NULL;
Peter Maydell39e0b032016-07-04 13:06:35 +01001699 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001700 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001701 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001702 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001703 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001704 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001705 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001706 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1707 if (err) {
1708 mr->size = int128_zero();
1709 object_unparent(OBJECT(mr));
1710 error_propagate(errp, err);
1711 }
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001712}
1713
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001714void memory_region_init_iommu(void *_iommu_mr,
1715 size_t instance_size,
1716 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001717 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001718 const char *name,
1719 uint64_t size)
1720{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001721 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001722 struct MemoryRegion *mr;
1723
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001724 object_initialize(_iommu_mr, instance_size, mrtypename);
1725 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001726 memory_region_do_init(mr, owner, name, size);
1727 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001728 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001729 QLIST_INIT(&iommu_mr->iommu_notify);
1730 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001731}
1732
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001733static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001734{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001735 MemoryRegion *mr = MEMORY_REGION(obj);
1736
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001737 assert(!mr->container);
1738
1739 /* We know the region is not visible in any address space (it
1740 * does not have a container and cannot be a root either because
1741 * it has no references, so we can blindly clear mr->enabled.
1742 * memory_region_set_enabled instead could trigger a transaction
1743 * and cause an infinite loop.
1744 */
1745 mr->enabled = false;
1746 memory_region_transaction_begin();
1747 while (!QTAILQ_EMPTY(&mr->subregions)) {
1748 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1749 memory_region_del_subregion(mr, subregion);
1750 }
1751 memory_region_transaction_commit();
1752
Avi Kivity545e92e2011-08-08 19:58:48 +03001753 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001754 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001755 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001756 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001757}
1758
Paolo Bonzini803c0812013-05-07 06:59:09 +02001759Object *memory_region_owner(MemoryRegion *mr)
1760{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001761 Object *obj = OBJECT(mr);
1762 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001763}
1764
Paolo Bonzini46637be2013-05-07 09:06:00 +02001765void memory_region_ref(MemoryRegion *mr)
1766{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001767 /* MMIO callbacks most likely will access data that belongs
1768 * to the owner, hence the need to ref/unref the owner whenever
1769 * the memory region is in use.
1770 *
1771 * The memory region is a child of its owner. As long as the
1772 * owner doesn't call unparent itself on the memory region,
1773 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001774 * Memory regions without an owner are supposed to never go away;
1775 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001776 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001777 if (mr && mr->owner) {
1778 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001779 }
1780}
1781
1782void memory_region_unref(MemoryRegion *mr)
1783{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001784 if (mr && mr->owner) {
1785 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001786 }
1787}
1788
Avi Kivity093bc2c2011-07-26 14:26:01 +03001789uint64_t memory_region_size(MemoryRegion *mr)
1790{
Avi Kivity08dafab2011-10-16 13:19:17 +02001791 if (int128_eq(mr->size, int128_2_64())) {
1792 return UINT64_MAX;
1793 }
1794 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001795}
1796
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001797const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001798{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001799 if (!mr->name) {
1800 ((MemoryRegion *)mr)->name =
Markus Armbruster7a309cc2020-07-14 18:02:00 +02001801 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001802 }
Peter Maydell302fa282014-08-19 20:05:46 +01001803 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001804}
1805
Alex Williamson21e00fa2016-10-31 09:53:03 -06001806bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301807{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001808 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301809}
1810
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001811uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001812{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001813 uint8_t mask = mr->dirty_log_mask;
Zenghui Yu1370d612020-11-16 21:22:10 +08001814 RAMBlock *rb = mr->ram_block;
1815
1816 if (global_dirty_log && ((rb && qemu_ram_is_migratable(rb)) ||
1817 memory_region_is_iommu(mr))) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001818 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1819 }
Paolo Bonzini0a2949e2020-10-28 03:52:01 -04001820
1821 if (tcg_enabled() && rb) {
1822 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1823 mask |= (1 << DIRTY_MEMORY_CODE);
1824 }
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001825 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001826}
1827
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001828bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1829{
1830 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1831}
1832
Eric Auger549d40052019-09-24 10:25:17 +02001833static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1834 Error **errp)
Peter Xu5bf3d312016-09-23 13:02:27 +08001835{
1836 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1837 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001838 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Eric Auger549d40052019-09-24 10:25:17 +02001839 int ret = 0;
Peter Xu5bf3d312016-09-23 13:02:27 +08001840
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001841 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001842 flags |= iommu_notifier->notifier_flags;
1843 }
1844
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001845 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
Eric Auger549d40052019-09-24 10:25:17 +02001846 ret = imrc->notify_flag_changed(iommu_mr,
1847 iommu_mr->iommu_notify_flags,
1848 flags, errp);
Peter Xu5bf3d312016-09-23 13:02:27 +08001849 }
1850
Eric Auger549d40052019-09-24 10:25:17 +02001851 if (!ret) {
1852 iommu_mr->iommu_notify_flags = flags;
1853 }
1854 return ret;
Peter Xu5bf3d312016-09-23 13:02:27 +08001855}
1856
Bharat Bhushan457f8cb2020-10-30 19:05:07 +01001857int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1858 uint64_t page_size_mask,
1859 Error **errp)
1860{
1861 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1862 int ret = 0;
1863
1864 if (imrc->iommu_set_page_size_mask) {
1865 ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
1866 }
1867 return ret;
1868}
1869
Eric Auger549d40052019-09-24 10:25:17 +02001870int memory_region_register_iommu_notifier(MemoryRegion *mr,
1871 IOMMUNotifier *n, Error **errp)
David Gibson06866572013-05-14 19:13:56 +10001872{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001873 IOMMUMemoryRegion *iommu_mr;
Eric Auger549d40052019-09-24 10:25:17 +02001874 int ret;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001875
Jason Wangefcd38c2016-12-30 18:09:17 +08001876 if (mr->alias) {
Eric Auger549d40052019-09-24 10:25:17 +02001877 return memory_region_register_iommu_notifier(mr->alias, n, errp);
Jason Wangefcd38c2016-12-30 18:09:17 +08001878 }
1879
Peter Xucdb30812016-09-23 13:02:26 +08001880 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001881 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001882 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001883 assert(n->start <= n->end);
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001884 assert(n->iommu_idx >= 0 &&
1885 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1886
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001887 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
Eric Auger549d40052019-09-24 10:25:17 +02001888 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
1889 if (ret) {
1890 QLIST_REMOVE(n, node);
1891 }
1892 return ret;
David Gibson06866572013-05-14 19:13:56 +10001893}
1894
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001895uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001896{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001897 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1898
1899 if (imrc->get_min_page_size) {
1900 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001901 }
1902 return TARGET_PAGE_SIZE;
1903}
1904
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001905void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001906{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001907 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001908 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001909 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001910 IOMMUTLBEntry iotlb;
1911
Peter Xufaa362e2017-04-07 18:59:11 +08001912 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001913 if (imrc->replay) {
1914 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001915 return;
1916 }
1917
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001918 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001919
David Gibsona788f222015-09-30 12:13:55 +10001920 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Peter Maydell2c91bcf2018-06-15 14:57:16 +01001921 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
David Gibsona788f222015-09-30 12:13:55 +10001922 if (iotlb.perm != IOMMU_NONE) {
1923 n->notify(n, &iotlb);
1924 }
1925
1926 /* if (2^64 - MR size) < granularity, it's possible to get an
1927 * infinite loop here. This should catch such a wraparound */
1928 if ((addr + granularity) < addr) {
1929 break;
1930 }
1931 }
1932}
1933
Peter Xucdb30812016-09-23 13:02:26 +08001934void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1935 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001936{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001937 IOMMUMemoryRegion *iommu_mr;
1938
Jason Wangefcd38c2016-12-30 18:09:17 +08001939 if (mr->alias) {
1940 memory_region_unregister_iommu_notifier(mr->alias, n);
1941 return;
1942 }
Peter Xucdb30812016-09-23 13:02:26 +08001943 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001944 iommu_mr = IOMMU_MEMORY_REGION(mr);
Eric Auger549d40052019-09-24 10:25:17 +02001945 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
David Gibson06866572013-05-14 19:13:56 +10001946}
1947
Eugenio Pérez3b5ebf82020-11-16 17:55:02 +01001948void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001949 IOMMUTLBEvent *event)
David Gibson06866572013-05-14 19:13:56 +10001950{
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001951 IOMMUTLBEntry *entry = &event->entry;
Yan Zhao03c71402019-06-25 11:21:18 +08001952 hwaddr entry_end = entry->iova + entry->addr_mask;
Eugenio Pérez18048572020-11-16 17:55:06 +01001953 IOMMUTLBEntry tmp = *entry;
Peter Xucdb30812016-09-23 13:02:26 +08001954
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001955 if (event->type == IOMMU_NOTIFIER_UNMAP) {
1956 assert(entry->perm == IOMMU_NONE);
1957 }
1958
Peter Xubd2bfa42017-04-07 18:59:10 +08001959 /*
1960 * Skip the notification if the notification does not overlap
1961 * with registered range.
1962 */
Yan Zhao03c71402019-06-25 11:21:18 +08001963 if (notifier->start > entry_end || notifier->end < entry->iova) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001964 return;
1965 }
Peter Xucdb30812016-09-23 13:02:26 +08001966
Eugenio Pérez18048572020-11-16 17:55:06 +01001967 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1968 /* Crop (iova, addr_mask) to range */
1969 tmp.iova = MAX(tmp.iova, notifier->start);
1970 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
1971 } else {
1972 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
1973 }
Yan Zhao03c71402019-06-25 11:21:18 +08001974
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001975 if (event->type & notifier->notifier_flags) {
Eugenio Pérez18048572020-11-16 17:55:06 +01001976 notifier->notify(notifier, &tmp);
Peter Xubd2bfa42017-04-07 18:59:10 +08001977 }
1978}
1979
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001980void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001981 int iommu_idx,
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001982 IOMMUTLBEvent event)
Peter Xubd2bfa42017-04-07 18:59:10 +08001983{
1984 IOMMUNotifier *iommu_notifier;
1985
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001986 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001987
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001988 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001989 if (iommu_notifier->iommu_idx == iommu_idx) {
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001990 memory_region_notify_iommu_one(iommu_notifier, &event);
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001991 }
Peter Xucdb30812016-09-23 13:02:26 +08001992 }
David Gibson06866572013-05-14 19:13:56 +10001993}
1994
Alexey Kardashevskiyf1334de2018-02-06 11:08:24 -07001995int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1996 enum IOMMUMemoryRegionAttr attr,
1997 void *data)
1998{
1999 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2000
2001 if (!imrc->get_attr) {
2002 return -EINVAL;
2003 }
2004
2005 return imrc->get_attr(iommu_mr, attr, data);
2006}
2007
Peter Maydell21f40202018-06-15 14:57:15 +01002008int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2009 MemTxAttrs attrs)
2010{
2011 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2012
2013 if (!imrc->attrs_to_index) {
2014 return 0;
2015 }
2016
2017 return imrc->attrs_to_index(iommu_mr, attrs);
2018}
2019
2020int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2021{
2022 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2023
2024 if (!imrc->num_indexes) {
2025 return 1;
2026 }
2027
2028 return imrc->num_indexes(iommu_mr);
2029}
2030
Avi Kivity093bc2c2011-07-26 14:26:01 +03002031void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2032{
Avi Kivity5a583342011-07-26 14:26:02 +03002033 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002034 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03002035
Paolo Bonzinidbddac62015-03-23 10:31:53 +01002036 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002037 old_logging = mr->vga_logging_count;
2038 mr->vga_logging_count += log ? 1 : -1;
2039 if (!!old_logging == !!mr->vga_logging_count) {
2040 return;
2041 }
2042
Jan Kiszka59023ef2012-08-23 13:02:30 +02002043 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03002044 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01002045 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002046 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002047}
2048
Avi Kivitya8170e52012-10-23 12:30:10 +02002049void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2050 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002051{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002052 assert(mr->ram_block);
2053 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2054 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01002055 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002056}
2057
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002058static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002059{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002060 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02002061 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002062 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03002063 FlatRange *fr;
2064
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002065 /* If the same address space has multiple log_sync listeners, we
2066 * visit that address space's FlatView multiple times. But because
2067 * log_sync listeners are rare, it's still cheaper than walking each
2068 * address space once.
2069 */
2070 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2071 if (!listener->log_sync) {
2072 continue;
2073 }
2074 as = listener->address_space;
2075 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002076 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002077 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002078 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002079 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02002080 }
Avi Kivity5a583342011-07-26 14:26:02 +03002081 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002082 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03002083 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002084}
2085
Peter Xu077874e2019-06-03 14:50:51 +08002086void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2087 hwaddr len)
2088{
2089 MemoryRegionSection mrs;
2090 MemoryListener *listener;
2091 AddressSpace *as;
2092 FlatView *view;
2093 FlatRange *fr;
2094 hwaddr sec_start, sec_end, sec_size;
2095
2096 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2097 if (!listener->log_clear) {
2098 continue;
2099 }
2100 as = listener->address_space;
2101 view = address_space_get_flatview(as);
2102 FOR_EACH_FLAT_RANGE(fr, view) {
2103 if (!fr->dirty_log_mask || fr->mr != mr) {
2104 /*
2105 * Clear dirty bitmap operation only applies to those
2106 * regions whose dirty logging is at least enabled
2107 */
2108 continue;
2109 }
2110
2111 mrs = section_from_flat_range(fr, view);
2112
2113 sec_start = MAX(mrs.offset_within_region, start);
2114 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2115 sec_end = MIN(sec_end, start + len);
2116
2117 if (sec_start >= sec_end) {
2118 /*
2119 * If this memory region section has no intersection
2120 * with the requested range, skip.
2121 */
2122 continue;
2123 }
2124
2125 /* Valid case; shrink the section if needed */
2126 mrs.offset_within_address_space +=
2127 sec_start - mrs.offset_within_region;
2128 mrs.offset_within_region = sec_start;
2129 sec_size = sec_end - sec_start;
2130 mrs.size = int128_make64(sec_size);
2131 listener->log_clear(listener, &mrs);
2132 }
2133 flatview_unref(view);
2134 }
2135}
2136
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002137DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2138 hwaddr addr,
2139 hwaddr size,
2140 unsigned client)
2141{
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002142 DirtyBitmapSnapshot *snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002143 assert(mr->ram_block);
2144 memory_region_sync_dirty_bitmap(mr);
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002145 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2146 memory_global_after_dirty_log_sync();
2147 return snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002148}
2149
2150bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2151 hwaddr addr, hwaddr size)
2152{
2153 assert(mr->ram_block);
2154 return cpu_physical_memory_snapshot_get_dirty(snap,
2155 memory_region_get_ram_addr(mr) + addr, size);
2156}
2157
Avi Kivity093bc2c2011-07-26 14:26:01 +03002158void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2159{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002160 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002161 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002162 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002163 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002164 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002165 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002166}
2167
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002168void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2169{
2170 if (mr->nonvolatile != nonvolatile) {
2171 memory_region_transaction_begin();
2172 mr->nonvolatile = nonvolatile;
2173 memory_region_update_pending |= mr->enabled;
2174 memory_region_transaction_commit();
2175 }
2176}
2177
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002178void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002179{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002180 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002181 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002182 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002183 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002184 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002185 }
2186}
2187
Avi Kivitya8170e52012-10-23 12:30:10 +02002188void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2189 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002190{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002191 assert(mr->ram_block);
2192 cpu_physical_memory_test_and_clear_dirty(
2193 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002194}
2195
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002196int memory_region_get_fd(MemoryRegion *mr)
2197{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002198 int fd;
2199
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002200 RCU_READ_LOCK_GUARD();
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002201 while (mr->alias) {
2202 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002203 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002204 fd = mr->ram_block->fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002205
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002206 return fd;
2207}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002208
Avi Kivity093bc2c2011-07-26 14:26:01 +03002209void *memory_region_get_ram_ptr(MemoryRegion *mr)
2210{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002211 void *ptr;
2212 uint64_t offset = 0;
2213
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002214 RCU_READ_LOCK_GUARD();
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002215 while (mr->alias) {
2216 offset += mr->alias_offset;
2217 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002218 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002219 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002220 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002221
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002222 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002223}
2224
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002225MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2226{
2227 RAMBlock *block;
2228
2229 block = qemu_ram_block_from_host(ptr, false, offset);
2230 if (!block) {
2231 return NULL;
2232 }
2233
2234 return block->mr;
2235}
2236
Fam Zheng7ebb2742016-03-01 14:18:20 +08002237ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2238{
2239 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2240}
2241
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002242void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2243{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002244 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002245
Gongleifa53a0e2016-05-10 10:04:59 +08002246 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002247}
2248
Philippe Mathieu-Daudé9ecc9962020-05-08 08:24:54 +02002249void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2250{
2251 if (mr->ram_block) {
Philippe Mathieu-Daudéab7e41e2020-05-08 08:24:56 +02002252 qemu_ram_msync(mr->ram_block, addr, size);
Philippe Mathieu-Daudé9ecc9962020-05-08 08:24:54 +02002253 }
2254}
Beata Michalska61c490e2019-11-21 00:08:41 +00002255
Philippe Mathieu-Daudé4dfe59d2020-05-08 08:24:53 +02002256void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
Beata Michalska61c490e2019-11-21 00:08:41 +00002257{
2258 /*
2259 * Might be extended case needed to cover
2260 * different types of memory regions
2261 */
Philippe Mathieu-Daudé9ecc9962020-05-08 08:24:54 +02002262 if (mr->dirty_log_mask) {
2263 memory_region_msync(mr, addr, size);
Beata Michalska61c490e2019-11-21 00:08:41 +00002264 }
2265}
2266
Peter Xub960fc12019-08-20 22:13:28 +08002267/*
2268 * Call proper memory listeners about the change on the newly
2269 * added/removed CoalescedMemoryRange.
2270 */
2271static void memory_region_update_coalesced_range(MemoryRegion *mr,
2272 CoalescedMemoryRange *cmr,
2273 bool add)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002274{
Peter Xub960fc12019-08-20 22:13:28 +08002275 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002276 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002277 FlatRange *fr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002278
Avi Kivity0d673e32012-10-02 15:28:50 +02002279 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Peter Xub960fc12019-08-20 22:13:28 +08002280 view = address_space_get_flatview(as);
2281 FOR_EACH_FLAT_RANGE(fr, view) {
2282 if (fr->mr == mr) {
2283 flat_range_coalesced_io_notify(fr, as, cmr, add);
2284 }
2285 }
2286 flatview_unref(view);
Avi Kivity0d673e32012-10-02 15:28:50 +02002287 }
2288}
2289
Avi Kivity093bc2c2011-07-26 14:26:01 +03002290void memory_region_set_coalescing(MemoryRegion *mr)
2291{
2292 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002293 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002294}
2295
2296void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002297 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002298 uint64_t size)
2299{
Anthony Liguori7267c092011-08-20 22:09:37 -05002300 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002301
Avi Kivity08dafab2011-10-16 13:19:17 +02002302 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002303 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002304 memory_region_update_coalesced_range(mr, cmr, true);
Jan Kiszkad4105152012-08-23 13:02:29 +02002305 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002306}
2307
2308void memory_region_clear_coalescing(MemoryRegion *mr)
2309{
2310 CoalescedMemoryRange *cmr;
Peter Xu9c1aa1c2019-08-20 22:13:27 +08002311
2312 if (QTAILQ_EMPTY(&mr->coalesced)) {
2313 return;
2314 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002315
Jan Kiszkad4105152012-08-23 13:02:29 +02002316 qemu_flush_coalesced_mmio_buffer();
2317 mr->flush_coalesced_mmio = false;
2318
Avi Kivity093bc2c2011-07-26 14:26:01 +03002319 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2320 cmr = QTAILQ_FIRST(&mr->coalesced);
2321 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002322 memory_region_update_coalesced_range(mr, cmr, false);
Anthony Liguori7267c092011-08-20 22:09:37 -05002323 g_free(cmr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002324 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002325}
2326
Jan Kiszkad4105152012-08-23 13:02:29 +02002327void memory_region_set_flush_coalesced(MemoryRegion *mr)
2328{
2329 mr->flush_coalesced_mmio = true;
2330}
2331
2332void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2333{
2334 qemu_flush_coalesced_mmio_buffer();
2335 if (QTAILQ_EMPTY(&mr->coalesced)) {
2336 mr->flush_coalesced_mmio = false;
2337 }
2338}
2339
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002340static bool userspace_eventfd_warning;
2341
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002342void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002343 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002344 unsigned size,
2345 bool match_data,
2346 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002347 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002348{
2349 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002350 .addr.start = int128_make64(addr),
2351 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002352 .match_data = match_data,
2353 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002354 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002355 };
2356 unsigned i;
2357
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002358 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2359 userspace_eventfd_warning))) {
2360 userspace_eventfd_warning = true;
2361 error_report("Using eventfd without MMIO binding in KVM. "
2362 "Suboptimal performance expected");
2363 }
2364
Jason Wangb8aecea2015-11-06 16:02:45 +08002365 if (size) {
Tony Nguyen9bf825b2019-08-24 04:36:54 +10002366 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
Jason Wangb8aecea2015-11-06 16:02:45 +08002367 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002368 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002369 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002370 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002371 break;
2372 }
2373 }
2374 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002375 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002376 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2377 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2378 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2379 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002380 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002381 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002382}
2383
2384void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002385 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002386 unsigned size,
2387 bool match_data,
2388 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002389 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002390{
2391 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002392 .addr.start = int128_make64(addr),
2393 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002394 .match_data = match_data,
2395 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002396 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002397 };
2398 unsigned i;
2399
Jason Wangb8aecea2015-11-06 16:02:45 +08002400 if (size) {
Tony Nguyen9bf825b2019-08-24 04:36:54 +10002401 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
Jason Wangb8aecea2015-11-06 16:02:45 +08002402 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002403 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002404 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002405 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002406 break;
2407 }
2408 }
2409 assert(i != mr->ioeventfd_nb);
2410 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2411 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2412 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002413 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002414 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002415 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002416 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002417}
2418
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002419static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002420{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002421 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002422 MemoryRegion *other;
2423
Jan Kiszka59023ef2012-08-23 13:02:30 +02002424 memory_region_transaction_begin();
2425
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002426 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002427 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002428 if (subregion->priority >= other->priority) {
2429 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2430 goto done;
2431 }
2432 }
2433 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2434done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002435 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002436 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002437}
2438
Peter Crosthwaite05987012014-06-05 23:14:44 -07002439static void memory_region_add_subregion_common(MemoryRegion *mr,
2440 hwaddr offset,
2441 MemoryRegion *subregion)
2442{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002443 assert(!subregion->container);
2444 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002445 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002446 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002447}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002448
2449void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002450 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002451 MemoryRegion *subregion)
2452{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002453 subregion->priority = 0;
2454 memory_region_add_subregion_common(mr, offset, subregion);
2455}
2456
2457void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002458 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002459 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002460 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002461{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002462 subregion->priority = priority;
2463 memory_region_add_subregion_common(mr, offset, subregion);
2464}
2465
2466void memory_region_del_subregion(MemoryRegion *mr,
2467 MemoryRegion *subregion)
2468{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002469 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002470 assert(subregion->container == mr);
2471 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002472 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002473 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002474 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002475 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002476}
2477
2478void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2479{
2480 if (enabled == mr->enabled) {
2481 return;
2482 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002483 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002484 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002485 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002486 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002487}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002488
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002489void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2490{
2491 Int128 s = int128_make64(size);
2492
2493 if (size == UINT64_MAX) {
2494 s = int128_2_64();
2495 }
2496 if (int128_eq(s, mr->size)) {
2497 return;
2498 }
2499 memory_region_transaction_begin();
2500 mr->size = s;
2501 memory_region_update_pending = true;
2502 memory_region_transaction_commit();
2503}
2504
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002505static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002506{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002507 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002508
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002509 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002510 memory_region_transaction_begin();
2511 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002512 memory_region_del_subregion(container, mr);
2513 mr->container = container;
2514 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002515 memory_region_unref(mr);
2516 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002517 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002518}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002519
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002520void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2521{
2522 if (addr != mr->addr) {
2523 mr->addr = addr;
2524 memory_region_readd_subregion(mr);
2525 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002526}
2527
Avi Kivitya8170e52012-10-23 12:30:10 +02002528void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002529{
Avi Kivity47033592011-12-04 19:16:50 +02002530 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002531
Jan Kiszka59023ef2012-08-23 13:02:30 +02002532 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002533 return;
2534 }
2535
Jan Kiszka59023ef2012-08-23 13:02:30 +02002536 memory_region_transaction_begin();
2537 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002538 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002539 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002540}
2541
Igor Mammedova2b257d2014-10-31 16:38:37 +00002542uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2543{
2544 return mr->align;
2545}
2546
Avi Kivitye2177952011-12-08 15:00:18 +02002547static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2548{
2549 const AddrRange *addr = addr_;
2550 const FlatRange *fr = fr_;
2551
2552 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2553 return -1;
2554 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2555 return 1;
2556 }
2557 return 0;
2558}
2559
Paolo Bonzini99e86342013-05-06 10:26:13 +02002560static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002561{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002562 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002563 sizeof(FlatRange), cmp_flatrange_addr);
2564}
2565
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002566bool memory_region_is_mapped(MemoryRegion *mr)
2567{
2568 return mr->container ? true : false;
2569}
2570
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002571/* Same as memory_region_find, but it does not add a reference to the
2572 * returned region. It must be called from an RCU critical section.
2573 */
2574static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2575 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002576{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002577 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002578 MemoryRegion *root;
2579 AddressSpace *as;
2580 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002581 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002582 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002583
Paolo Bonzini73034e92013-05-07 15:48:28 +02002584 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002585 for (root = mr; root->container; ) {
2586 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002587 addr += root->addr;
2588 }
2589
2590 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002591 if (!as) {
2592 return ret;
2593 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002594 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002595
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002596 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002597 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002598 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002599 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002600 }
2601
Paolo Bonzini99e86342013-05-06 10:26:13 +02002602 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002603 --fr;
2604 }
2605
2606 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002607 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002608 range = addrrange_intersection(range, fr->addr);
2609 ret.offset_within_region = fr->offset_in_region;
2610 ret.offset_within_region += int128_get64(int128_sub(range.start,
2611 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002612 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002613 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002614 ret.readonly = fr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002615 ret.nonvolatile = fr->nonvolatile;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002616 return ret;
2617}
2618
2619MemoryRegionSection memory_region_find(MemoryRegion *mr,
2620 hwaddr addr, uint64_t size)
2621{
2622 MemoryRegionSection ret;
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002623 RCU_READ_LOCK_GUARD();
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002624 ret = memory_region_find_rcu(mr, addr, size);
2625 if (ret.mr) {
2626 memory_region_ref(ret.mr);
2627 }
Avi Kivitye2177952011-12-08 15:00:18 +02002628 return ret;
2629}
2630
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002631bool memory_region_present(MemoryRegion *container, hwaddr addr)
2632{
2633 MemoryRegion *mr;
2634
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002635 RCU_READ_LOCK_GUARD();
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002636 mr = memory_region_find_rcu(container, addr, 1).mr;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002637 return mr && mr != container;
2638}
2639
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002640void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002641{
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002642 memory_region_sync_dirty_bitmap(NULL);
Avi Kivity7664e802011-12-11 14:47:25 +02002643}
2644
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002645void memory_global_after_dirty_log_sync(void)
2646{
2647 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2648}
2649
Jay Zhou19310762017-07-28 18:28:53 +08002650static VMChangeStateEntry *vmstate_change;
2651
Avi Kivity7664e802011-12-11 14:47:25 +02002652void memory_global_dirty_log_start(void)
2653{
Jay Zhou19310762017-07-28 18:28:53 +08002654 if (vmstate_change) {
2655 qemu_del_vm_change_state_handler(vmstate_change);
2656 vmstate_change = NULL;
2657 }
2658
Avi Kivity7664e802011-12-11 14:47:25 +02002659 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002660
Avi Kivity7376e582012-02-08 21:05:17 +02002661 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002662
Wei Yang39adb532019-04-26 10:09:27 +08002663 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002664 memory_region_transaction_begin();
2665 memory_region_update_pending = true;
2666 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002667}
2668
Jay Zhou19310762017-07-28 18:28:53 +08002669static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002670{
Avi Kivity7664e802011-12-11 14:47:25 +02002671 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002672
Wei Yang39adb532019-04-26 10:09:27 +08002673 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002674 memory_region_transaction_begin();
2675 memory_region_update_pending = true;
2676 memory_region_transaction_commit();
2677
Avi Kivity7376e582012-02-08 21:05:17 +02002678 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002679}
2680
Jay Zhou19310762017-07-28 18:28:53 +08002681static void memory_vm_change_state_handler(void *opaque, int running,
2682 RunState state)
2683{
2684 if (running) {
2685 memory_global_dirty_log_do_stop();
2686
2687 if (vmstate_change) {
2688 qemu_del_vm_change_state_handler(vmstate_change);
2689 vmstate_change = NULL;
2690 }
2691 }
2692}
2693
2694void memory_global_dirty_log_stop(void)
2695{
2696 if (!runstate_is_running()) {
2697 if (vmstate_change) {
2698 return;
2699 }
2700 vmstate_change = qemu_add_vm_change_state_handler(
2701 memory_vm_change_state_handler, NULL);
2702 return;
2703 }
2704
2705 memory_global_dirty_log_do_stop();
2706}
2707
Avi Kivity7664e802011-12-11 14:47:25 +02002708static void listener_add_address_space(MemoryListener *listener,
2709 AddressSpace *as)
2710{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002711 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002712 FlatRange *fr;
2713
Paolo Bonzini680a4782015-11-02 09:23:52 +01002714 if (listener->begin) {
2715 listener->begin(listener);
2716 }
Avi Kivity7664e802011-12-11 14:47:25 +02002717 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002718 if (listener->log_global_start) {
2719 listener->log_global_start(listener);
2720 }
Avi Kivity7664e802011-12-11 14:47:25 +02002721 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002722
Paolo Bonzini856d7242013-05-06 11:57:21 +02002723 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002724 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002725 MemoryRegionSection section = section_from_flat_range(fr, view);
2726
Avi Kivity975aefe2012-10-02 16:39:57 +02002727 if (listener->region_add) {
2728 listener->region_add(listener, &section);
2729 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002730 if (fr->dirty_log_mask && listener->log_start) {
2731 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2732 }
Avi Kivity7664e802011-12-11 14:47:25 +02002733 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002734 if (listener->commit) {
2735 listener->commit(listener);
2736 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002737 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002738}
2739
Peter Xud25836c2018-01-22 14:02:44 +08002740static void listener_del_address_space(MemoryListener *listener,
2741 AddressSpace *as)
2742{
2743 FlatView *view;
2744 FlatRange *fr;
2745
2746 if (listener->begin) {
2747 listener->begin(listener);
2748 }
2749 view = address_space_get_flatview(as);
2750 FOR_EACH_FLAT_RANGE(fr, view) {
2751 MemoryRegionSection section = section_from_flat_range(fr, view);
2752
2753 if (fr->dirty_log_mask && listener->log_stop) {
2754 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2755 }
2756 if (listener->region_del) {
2757 listener->region_del(listener, &section);
2758 }
2759 }
2760 if (listener->commit) {
2761 listener->commit(listener);
2762 }
2763 flatview_unref(view);
2764}
2765
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002766void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002767{
Avi Kivity72e22d22012-02-08 15:05:50 +02002768 MemoryListener *other = NULL;
2769
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002770 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002771 if (QTAILQ_EMPTY(&memory_listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002772 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
Avi Kivity72e22d22012-02-08 15:05:50 +02002773 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2774 } else {
2775 QTAILQ_FOREACH(other, &memory_listeners, link) {
2776 if (listener->priority < other->priority) {
2777 break;
2778 }
2779 }
2780 QTAILQ_INSERT_BEFORE(other, listener, link);
2781 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002782
Paolo Bonzini9a546352016-09-22 16:23:06 +02002783 if (QTAILQ_EMPTY(&as->listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002784 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
Paolo Bonzini9a546352016-09-22 16:23:06 +02002785 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2786 } else {
2787 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2788 if (listener->priority < other->priority) {
2789 break;
2790 }
2791 }
2792 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2793 }
2794
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002795 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002796}
2797
2798void memory_listener_unregister(MemoryListener *listener)
2799{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002800 if (!listener->address_space) {
2801 return;
2802 }
2803
Peter Xud25836c2018-01-22 14:02:44 +08002804 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002805 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002806 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002807 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002808}
Avi Kivitye2177952011-12-08 15:00:18 +02002809
Greg Kurza2166412019-06-21 11:27:33 +02002810void address_space_remove_listeners(AddressSpace *as)
2811{
2812 while (!QTAILQ_EMPTY(&as->listeners)) {
2813 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2814 }
2815}
2816
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002817void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002818{
Paolo Bonziniac951902015-02-11 15:21:04 +01002819 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002820 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002821 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002822 as->ioeventfd_nb = 0;
2823 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002824 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002825 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002826 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002827 address_space_update_topology(as);
2828 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002829}
Avi Kivity658b2222011-07-26 14:26:08 +03002830
Paolo Bonzini374f2982013-05-17 12:37:03 +02002831static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002832{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002833 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002834
Paolo Bonzini856d7242013-05-06 11:57:21 +02002835 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002836 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002837 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002838 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002839}
2840
Paolo Bonzini374f2982013-05-17 12:37:03 +02002841void address_space_destroy(AddressSpace *as)
2842{
Paolo Bonziniac951902015-02-11 15:21:04 +01002843 MemoryRegion *root = as->root;
2844
Paolo Bonzini374f2982013-05-17 12:37:03 +02002845 /* Flush out anything from MemoryListeners listening in on this */
2846 memory_region_transaction_begin();
2847 as->root = NULL;
2848 memory_region_transaction_commit();
2849 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2850
2851 /* At this point, as->dispatch and as->current_map are dummy
2852 * entries that the guest should never use. Wait for the old
2853 * values to expire before freeing the data.
2854 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002855 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002856 call_rcu(as, do_address_space_destroy, rcu);
2857}
2858
Peter Xu4e831902017-01-16 16:40:04 +08002859static const char *memory_region_type(MemoryRegion *mr)
2860{
Philippe Mathieu-Daudé39fa93c2020-02-24 10:13:00 +01002861 if (mr->alias) {
2862 return memory_region_type(mr->alias);
2863 }
Peter Xu4e831902017-01-16 16:40:04 +08002864 if (memory_region_is_ram_device(mr)) {
2865 return "ramd";
2866 } else if (memory_region_is_romd(mr)) {
2867 return "romd";
2868 } else if (memory_region_is_rom(mr)) {
2869 return "rom";
2870 } else if (memory_region_is_ram(mr)) {
2871 return "ram";
2872 } else {
2873 return "i/o";
2874 }
2875}
2876
Blue Swirl314e2982011-09-11 20:22:05 +00002877typedef struct MemoryRegionList MemoryRegionList;
2878
2879struct MemoryRegionList {
2880 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002881 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002882};
2883
Paolo Bonzinib58deb32018-12-06 11:58:10 +01002884typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002885
Peter Xu4e831902017-01-16 16:40:04 +08002886#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2887 int128_sub((size), int128_one())) : 0)
2888#define MTREE_INDENT " "
2889
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002890static void mtree_expand_owner(const char *label, Object *obj)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002891{
2892 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2893
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002894 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002895 if (dev && dev->id) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002896 qemu_printf(" id=%s", dev->id);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002897 } else {
Markus Armbrusterddfb0ba2020-05-05 17:29:10 +02002898 char *canonical_path = object_get_canonical_path(obj);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002899 if (canonical_path) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002900 qemu_printf(" path=%s", canonical_path);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002901 g_free(canonical_path);
2902 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002903 qemu_printf(" type=%s", object_get_typename(obj));
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002904 }
2905 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002906 qemu_printf("}");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002907}
2908
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002909static void mtree_print_mr_owner(const MemoryRegion *mr)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002910{
2911 Object *owner = mr->owner;
2912 Object *parent = memory_region_owner((MemoryRegion *)mr);
2913
2914 if (!owner && !parent) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002915 qemu_printf(" orphan");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002916 return;
2917 }
2918 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002919 mtree_expand_owner("owner", owner);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002920 }
2921 if (parent && parent != owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002922 mtree_expand_owner("parent", parent);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002923 }
2924}
2925
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002926static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002927 hwaddr base,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002928 MemoryRegionListHead *alias_print_queue,
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02002929 bool owner, bool display_disabled)
Blue Swirl314e2982011-09-11 20:22:05 +00002930{
Jan Kiszka9479c572011-09-27 15:00:41 +02002931 MemoryRegionList *new_ml, *ml, *next_ml;
2932 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002933 const MemoryRegion *submr;
2934 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002935 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002936
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002937 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002938 return;
2939 }
2940
Peter Xub31f8412017-03-14 20:56:27 +08002941 cur_start = base + mr->addr;
2942 cur_end = cur_start + MR_SIZE(mr->size);
2943
2944 /*
2945 * Try to detect overflow of memory region. This should never
2946 * happen normally. When it happens, we dump something to warn the
2947 * user who is observing this.
2948 */
2949 if (cur_start < base || cur_end < cur_start) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002950 qemu_printf("[DETECTED OVERFLOW!] ");
Peter Xub31f8412017-03-14 20:56:27 +08002951 }
2952
Blue Swirl314e2982011-09-11 20:22:05 +00002953 if (mr->alias) {
2954 MemoryRegionList *ml;
2955 bool found = false;
2956
2957 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002958 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002959 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002960 found = true;
2961 }
2962 }
2963
2964 if (!found) {
2965 ml = g_new(MemoryRegionList, 1);
2966 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002967 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002968 }
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02002969 if (mr->enabled || display_disabled) {
2970 for (i = 0; i < level; i++) {
2971 qemu_printf(MTREE_INDENT);
2972 }
2973 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2974 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2975 "-" TARGET_FMT_plx "%s",
2976 cur_start, cur_end,
2977 mr->priority,
2978 mr->nonvolatile ? "nv-" : "",
2979 memory_region_type((MemoryRegion *)mr),
2980 memory_region_name(mr),
2981 memory_region_name(mr->alias),
2982 mr->alias_offset,
2983 mr->alias_offset + MR_SIZE(mr->size),
2984 mr->enabled ? "" : " [disabled]");
2985 if (owner) {
2986 mtree_print_mr_owner(mr);
2987 }
2988 qemu_printf("\n");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002989 }
Blue Swirl314e2982011-09-11 20:22:05 +00002990 } else {
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02002991 if (mr->enabled || display_disabled) {
2992 for (i = 0; i < level; i++) {
2993 qemu_printf(MTREE_INDENT);
2994 }
2995 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2996 " (prio %d, %s%s): %s%s",
2997 cur_start, cur_end,
2998 mr->priority,
2999 mr->nonvolatile ? "nv-" : "",
3000 memory_region_type((MemoryRegion *)mr),
3001 memory_region_name(mr),
3002 mr->enabled ? "" : " [disabled]");
3003 if (owner) {
3004 mtree_print_mr_owner(mr);
3005 }
3006 qemu_printf("\n");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003007 }
Blue Swirl314e2982011-09-11 20:22:05 +00003008 }
Jan Kiszka9479c572011-09-27 15:00:41 +02003009
3010 QTAILQ_INIT(&submr_print_queue);
3011
Blue Swirl314e2982011-09-11 20:22:05 +00003012 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02003013 new_ml = g_new(MemoryRegionList, 1);
3014 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003015 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02003016 if (new_ml->mr->addr < ml->mr->addr ||
3017 (new_ml->mr->addr == ml->mr->addr &&
3018 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003019 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02003020 new_ml = NULL;
3021 break;
3022 }
3023 }
3024 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003025 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02003026 }
3027 }
3028
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003029 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003030 mtree_print_mr(ml->mr, level + 1, cur_start,
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003031 alias_print_queue, owner, display_disabled);
Jan Kiszka9479c572011-09-27 15:00:41 +02003032 }
3033
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003034 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02003035 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003036 }
3037}
3038
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003039struct FlatViewInfo {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003040 int counter;
3041 bool dispatch_tree;
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003042 bool owner;
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003043 AccelClass *ac;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003044};
3045
3046static void mtree_print_flatview(gpointer key, gpointer value,
3047 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08003048{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003049 FlatView *view = key;
3050 GArray *fv_address_spaces = value;
3051 struct FlatViewInfo *fvi = user_data;
Peter Xu57bb40c2017-01-16 16:40:05 +08003052 FlatRange *range = &view->ranges[0];
3053 MemoryRegion *mr;
3054 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003055 int i;
3056 AddressSpace *as;
3057
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003058 qemu_printf("FlatView #%d\n", fvi->counter);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003059 ++fvi->counter;
3060
3061 for (i = 0; i < fv_address_spaces->len; ++i) {
3062 as = g_array_index(fv_address_spaces, AddressSpace*, i);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003063 qemu_printf(" AS \"%s\", root: %s",
3064 as->name, memory_region_name(as->root));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003065 if (as->root->alias) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003066 qemu_printf(", alias %s", memory_region_name(as->root->alias));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003067 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003068 qemu_printf("\n");
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003069 }
3070
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003071 qemu_printf(" Root memory region: %s\n",
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003072 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08003073
3074 if (n <= 0) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003075 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003076 return;
3077 }
3078
3079 while (n--) {
3080 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003081 if (range->offset_in_region) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003082 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3083 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3084 int128_get64(range->addr.start),
3085 int128_get64(range->addr.start)
3086 + MR_SIZE(range->addr.size),
3087 mr->priority,
3088 range->nonvolatile ? "nv-" : "",
3089 range->readonly ? "rom" : memory_region_type(mr),
3090 memory_region_name(mr),
3091 range->offset_in_region);
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003092 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003093 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3094 " (prio %d, %s%s): %s",
3095 int128_get64(range->addr.start),
3096 int128_get64(range->addr.start)
3097 + MR_SIZE(range->addr.size),
3098 mr->priority,
3099 range->nonvolatile ? "nv-" : "",
3100 range->readonly ? "rom" : memory_region_type(mr),
3101 memory_region_name(mr));
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003102 }
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003103 if (fvi->owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003104 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003105 }
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003106
3107 if (fvi->ac) {
3108 for (i = 0; i < fv_address_spaces->len; ++i) {
3109 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3110 if (fvi->ac->has_memory(current_machine, as,
3111 int128_get64(range->addr.start),
3112 MR_SIZE(range->addr.size) + 1)) {
Paolo Bonzini53b62be2019-11-13 11:50:03 +01003113 qemu_printf(" %s", fvi->ac->name);
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003114 }
3115 }
3116 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003117 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003118 range++;
3119 }
3120
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003121#if !defined(CONFIG_USER_ONLY)
3122 if (fvi->dispatch_tree && view->root) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003123 mtree_print_dispatch(view->dispatch, view->root);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003124 }
3125#endif
3126
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003127 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003128}
3129
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003130static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3131 gpointer user_data)
3132{
3133 FlatView *view = key;
3134 GArray *fv_address_spaces = value;
3135
3136 g_array_unref(fv_address_spaces);
3137 flatview_unref(view);
3138
3139 return true;
3140}
3141
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003142void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
Blue Swirl314e2982011-09-11 20:22:05 +00003143{
3144 MemoryRegionListHead ml_head;
3145 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003146 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003147
Peter Xu57bb40c2017-01-16 16:40:05 +08003148 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003149 FlatView *view;
3150 struct FlatViewInfo fvi = {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003151 .counter = 0,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003152 .dispatch_tree = dispatch_tree,
3153 .owner = owner,
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003154 };
3155 GArray *fv_address_spaces;
3156 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
Philippe Mathieu-Daudé4f7f5892020-01-21 12:03:48 +01003157 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003158
3159 if (ac->has_memory) {
3160 fvi.ac = ac;
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003161 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003162
3163 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003164 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003165 view = address_space_get_flatview(as);
3166
3167 fv_address_spaces = g_hash_table_lookup(views, view);
3168 if (!fv_address_spaces) {
3169 fv_address_spaces = g_array_new(false, false, sizeof(as));
3170 g_hash_table_insert(views, view, fv_address_spaces);
3171 }
3172
3173 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003174 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003175
3176 /* Print */
3177 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3178
3179 /* Free */
3180 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3181 g_hash_table_unref(views);
3182
Peter Xu57bb40c2017-01-16 16:40:05 +08003183 return;
3184 }
3185
Blue Swirl314e2982011-09-11 20:22:05 +00003186 QTAILQ_INIT(&ml_head);
3187
Avi Kivity0d673e32012-10-02 15:28:50 +02003188 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003189 qemu_printf("address-space: %s\n", as->name);
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003190 mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003191 qemu_printf("\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003192 }
3193
Blue Swirl314e2982011-09-11 20:22:05 +00003194 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003195 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003196 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003197 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003198 qemu_printf("\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003199 }
3200
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003201 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003202 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003203 }
Blue Swirl314e2982011-09-11 20:22:05 +00003204}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003205
Peter Maydellb08199c2017-07-07 15:42:51 +01003206void memory_region_init_ram(MemoryRegion *mr,
3207 struct Object *owner,
3208 const char *name,
3209 uint64_t size,
3210 Error **errp)
3211{
3212 DeviceState *owner_dev;
3213 Error *err = NULL;
3214
3215 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3216 if (err) {
3217 error_propagate(errp, err);
3218 return;
3219 }
3220 /* This will assert if owner is neither NULL nor a DeviceState.
3221 * We only want the owner here for the purposes of defining a
3222 * unique name for migration. TODO: Ideally we should implement
3223 * a naming scheme for Objects which are not DeviceStates, in
3224 * which case we can relax this restriction.
3225 */
3226 owner_dev = DEVICE(owner);
3227 vmstate_register_ram(mr, owner_dev);
3228}
3229
3230void memory_region_init_rom(MemoryRegion *mr,
3231 struct Object *owner,
3232 const char *name,
3233 uint64_t size,
3234 Error **errp)
3235{
3236 DeviceState *owner_dev;
3237 Error *err = NULL;
3238
3239 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3240 if (err) {
3241 error_propagate(errp, err);
3242 return;
3243 }
3244 /* This will assert if owner is neither NULL nor a DeviceState.
3245 * We only want the owner here for the purposes of defining a
3246 * unique name for migration. TODO: Ideally we should implement
3247 * a naming scheme for Objects which are not DeviceStates, in
3248 * which case we can relax this restriction.
3249 */
3250 owner_dev = DEVICE(owner);
3251 vmstate_register_ram(mr, owner_dev);
3252}
3253
3254void memory_region_init_rom_device(MemoryRegion *mr,
3255 struct Object *owner,
3256 const MemoryRegionOps *ops,
3257 void *opaque,
3258 const char *name,
3259 uint64_t size,
3260 Error **errp)
3261{
3262 DeviceState *owner_dev;
3263 Error *err = NULL;
3264
3265 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3266 name, size, &err);
3267 if (err) {
3268 error_propagate(errp, err);
3269 return;
3270 }
3271 /* This will assert if owner is neither NULL nor a DeviceState.
3272 * We only want the owner here for the purposes of defining a
3273 * unique name for migration. TODO: Ideally we should implement
3274 * a naming scheme for Objects which are not DeviceStates, in
3275 * which case we can relax this restriction.
3276 */
3277 owner_dev = DEVICE(owner);
3278 vmstate_register_ram(mr, owner_dev);
3279}
3280
Alexander Bulekove7d32222020-10-23 11:07:34 -04003281/*
3282 * Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
3283 * the fuzz_dma_read_cb callback
3284 */
3285#ifdef CONFIG_FUZZ
3286void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3287 size_t len,
3288 MemoryRegion *mr,
3289 bool is_write)
3290{
3291}
3292#endif
3293
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003294static const TypeInfo memory_region_info = {
3295 .parent = TYPE_OBJECT,
3296 .name = TYPE_MEMORY_REGION,
Markus Armbruster1b53ecd2019-08-12 07:23:34 +02003297 .class_size = sizeof(MemoryRegionClass),
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003298 .instance_size = sizeof(MemoryRegion),
3299 .instance_init = memory_region_initfn,
3300 .instance_finalize = memory_region_finalize,
3301};
3302
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003303static const TypeInfo iommu_memory_region_info = {
3304 .parent = TYPE_MEMORY_REGION,
3305 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003306 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003307 .instance_size = sizeof(IOMMUMemoryRegion),
3308 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003309 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003310};
3311
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003312static void memory_register_types(void)
3313{
3314 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003315 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003316}
3317
3318type_init(memory_register_types)