blob: 61a254c3f9e47fa60da70375eccd11e3c4567a86 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010019#include "exec/memory.h"
20#include "exec/address-spaces.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070021#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010022#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030023#include "qemu/error-report.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +020024#include "qemu/main-loop.h"
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +020025#include "qemu/qemu-print.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Markus Armbruster54d31232019-08-12 07:23:59 +020032#include "sysemu/runstate.h"
Markus Armbruster14a48c12019-05-23 16:35:05 +020033#include "sysemu/tcg.h"
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +100034#include "sysemu/accel.h"
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +100035#include "hw/boards.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010036#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020037
Paolo Bonzinid1970632013-05-24 13:23:38 +020038//#define DEBUG_UNASSIGNED
39
Jan Kiszka22bde712012-11-05 16:45:56 +010040static unsigned memory_region_transaction_depth;
41static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080042static bool ioeventfd_update_pending;
Peter Xuae7a2bc2019-06-03 14:50:48 +080043bool global_dirty_log;
Avi Kivity7664e802011-12-11 14:47:25 +020044
Paolo Bonzinieae3eb32018-12-06 13:10:34 +010045static QTAILQ_HEAD(, MemoryListener) memory_listeners
Avi Kivity72e22d22012-02-08 15:05:50 +020046 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030047
Avi Kivity0d673e32012-10-02 15:28:50 +020048static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100051static GHashTable *flat_views;
52
Avi Kivity093bc2c2011-07-26 14:26:01 +030053typedef struct AddrRange AddrRange;
54
Avi Kivity8417ceb2011-08-03 11:56:14 +030055/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080056 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030057 * (large MemoryRegion::alias_offset).
58 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030059struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020060 Int128 start;
61 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030062};
63
Avi Kivity08dafab2011-10-16 13:19:17 +020064static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030065{
66 return (AddrRange) { start, size };
67}
68
69static bool addrrange_equal(AddrRange r1, AddrRange r2)
70{
Avi Kivity08dafab2011-10-16 13:19:17 +020071 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030072}
73
Avi Kivity08dafab2011-10-16 13:19:17 +020074static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030075{
Avi Kivity08dafab2011-10-16 13:19:17 +020076 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030077}
78
Avi Kivity08dafab2011-10-16 13:19:17 +020079static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030080{
Avi Kivity08dafab2011-10-16 13:19:17 +020081 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030082 return range;
83}
84
Avi Kivity08dafab2011-10-16 13:19:17 +020085static bool addrrange_contains(AddrRange range, Int128 addr)
86{
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
89}
90
Avi Kivity093bc2c2011-07-26 14:26:01 +030091static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92{
Avi Kivity08dafab2011-10-16 13:19:17 +020093 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030095}
96
97static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98{
Avi Kivity08dafab2011-10-16 13:19:17 +020099 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300102}
103
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200104enum ListenerDirection { Forward, Reverse };
105
Avi Kivity7376e582012-02-08 21:05:17 +0200106#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200107 do { \
108 MemoryListener *_listener; \
109 \
110 switch (_direction) { \
111 case Forward: \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
115 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200116 } \
117 break; \
118 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
Paolo Bonzini9a546352016-09-22 16:23:06 +0200130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200131 do { \
132 MemoryListener *_listener; \
133 \
134 switch (_direction) { \
135 case Forward: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200137 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200144 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200156 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200160 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200161
Avi Kivity093bc2c2011-07-26 14:26:01 +0300162struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165};
166
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300167struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200171 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300172};
173
Tristan Burgess73bb7532018-05-28 23:04:45 -0400174static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300176{
Tristan Burgess73bb7532018-05-28 23:04:45 -0400177 if (int128_lt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300178 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400179 } else if (int128_gt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400181 } else if (int128_lt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400183 } else if (int128_gt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400185 } else if (a->match_data < b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300186 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400187 } else if (a->match_data > b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300188 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400189 } else if (a->match_data) {
190 if (a->data < b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300191 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400192 } else if (a->data > b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300193 return false;
194 }
195 }
Tristan Burgess73bb7532018-05-28 23:04:45 -0400196 if (a->e < b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300197 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400198 } else if (a->e > b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return false;
200 }
201 return false;
202}
203
Tristan Burgess73bb7532018-05-28 23:04:45 -0400204static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300206{
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
209}
210
Avi Kivity093bc2c2011-07-26 14:26:01 +0300211/* Range of memory in the global map. Addresses are absolute. */
212struct FlatRange {
213 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200214 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300215 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300216 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200217 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300218 bool readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400219 bool nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300220};
221
Avi Kivity093bc2c2011-07-26 14:26:01 +0300222#define FOR_EACH_FLAT_RANGE(var, view) \
223 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200225static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000226section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200227{
228 return (MemoryRegionSection) {
229 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000230 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200231 .offset_within_region = fr->offset_in_region,
232 .size = fr->addr.size,
233 .offset_within_address_space = int128_get64(fr->addr.start),
234 .readonly = fr->readonly,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400235 .nonvolatile = fr->nonvolatile,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200236 };
237}
238
Avi Kivity093bc2c2011-07-26 14:26:01 +0300239static bool flatrange_equal(FlatRange *a, FlatRange *b)
240{
241 return a->mr == b->mr
242 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300243 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200244 && a->romd_mode == b->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400245 && a->readonly == b->readonly
246 && a->nonvolatile == b->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300247}
248
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000249static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300250{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000251 FlatView *view;
252
253 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200254 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000255 view->root = mr_root;
256 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200257 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000258
259 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300260}
261
262/* Insert a range into a given position. Caller is responsible for maintaining
263 * sorting order.
264 */
265static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
266{
267 if (view->nr == view->nr_allocated) {
268 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500269 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300270 view->nr_allocated * sizeof(*view->ranges));
271 }
272 memmove(view->ranges + pos + 1, view->ranges + pos,
273 (view->nr - pos) * sizeof(FlatRange));
274 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200275 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300276 ++view->nr;
277}
278
279static void flatview_destroy(FlatView *view)
280{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200281 int i;
282
Paolo Bonzini02d96512017-09-21 12:34:00 +0200283 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000284 if (view->dispatch) {
285 address_space_dispatch_free(view->dispatch);
286 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200287 for (i = 0; i < view->nr; i++) {
288 memory_region_unref(view->ranges[i].mr);
289 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500290 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000291 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200292 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300293}
294
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200295static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200296{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200297 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200298}
299
Paolo Bonzini48564042018-03-18 18:26:36 +0100300void flatview_unref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200301{
302 if (atomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200303 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000304 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000305 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200306 }
307}
308
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300309static bool can_merge(FlatRange *r1, FlatRange *r2)
310{
Avi Kivity08dafab2011-10-16 13:19:17 +0200311 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300312 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200313 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
314 r1->addr.size),
315 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300316 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200317 && r1->romd_mode == r2->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400318 && r1->readonly == r2->readonly
319 && r1->nonvolatile == r2->nonvolatile;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300320}
321
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000322/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300323static void flatview_simplify(FlatView *view)
324{
King Wang838ec112019-07-12 14:52:41 +0800325 unsigned i, j, k;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300326
327 i = 0;
328 while (i < view->nr) {
329 j = i + 1;
330 while (j < view->nr
331 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300333 ++j;
334 }
335 ++i;
King Wang838ec112019-07-12 14:52:41 +0800336 for (k = i; k < j; k++) {
337 memory_region_unref(view->ranges[k].mr);
338 }
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300339 memmove(&view->ranges[i], &view->ranges[j],
340 (view->nr - j) * sizeof(view->ranges[j]));
341 view->nr -= j - i;
342 }
343}
344
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200345static bool memory_region_big_endian(MemoryRegion *mr)
346{
347#ifdef TARGET_WORDS_BIGENDIAN
348 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
349#else
350 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
351#endif
352}
353
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000354static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200355{
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000356 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
357 switch (op & MO_SIZE) {
358 case MO_8:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200359 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000360 case MO_16:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200361 *data = bswap16(*data);
362 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000363 case MO_32:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200364 *data = bswap32(*data);
365 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000366 case MO_64:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200367 *data = bswap64(*data);
368 break;
369 default:
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000370 g_assert_not_reached();
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200371 }
372 }
373}
374
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200375static inline void memory_region_shift_read_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200376 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200377 uint64_t mask,
378 uint64_t tmp)
379{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200380 if (shift >= 0) {
381 *value |= (tmp & mask) << shift;
382 } else {
383 *value |= (tmp & mask) >> -shift;
384 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200385}
386
387static inline uint64_t memory_region_shift_write_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200388 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200389 uint64_t mask)
390{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200391 uint64_t tmp;
392
393 if (shift >= 0) {
394 tmp = (*value >> shift) & mask;
395 } else {
396 tmp = (*value << -shift) & mask;
397 }
398
399 return tmp;
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200400}
401
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800402static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
403{
404 MemoryRegion *root;
405 hwaddr abs_addr = offset;
406
407 abs_addr += mr->addr;
408 for (root = mr; root->container; ) {
409 root = root->container;
410 abs_addr += root->addr;
411 }
412
413 return abs_addr;
414}
415
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800416static int get_cpu_index(void)
417{
418 if (current_cpu) {
419 return current_cpu->cpu_index;
420 }
421 return -1;
422}
423
Peter Maydellcc05c432015-04-26 16:49:23 +0100424static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
425 hwaddr addr,
426 uint64_t *value,
427 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200428 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100429 uint64_t mask,
430 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300431{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300432 uint64_t tmp;
433
434 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800435 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800436 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800437 } else if (mr == &io_mem_notdirty) {
438 /* Accesses to code which has previously been translated into a TB show
439 * up in the MMIO path, as accesses to the io_mem_notdirty
440 * MemoryRegion. */
441 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800442 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
443 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800444 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800445 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200446 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100447 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300448}
449
Peter Maydellcc05c432015-04-26 16:49:23 +0100450static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
451 hwaddr addr,
452 uint64_t *value,
453 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200454 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100455 uint64_t mask,
456 MemTxAttrs attrs)
457{
458 uint64_t tmp = 0;
459 MemTxResult r;
460
Peter Maydellcc05c432015-04-26 16:49:23 +0100461 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800462 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800463 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800464 } else if (mr == &io_mem_notdirty) {
465 /* Accesses to code which has previously been translated into a TB show
466 * up in the MMIO path, as accesses to the io_mem_notdirty
467 * MemoryRegion. */
468 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800469 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
470 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800471 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800472 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200473 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100474 return r;
475}
476
Peter Maydellcc05c432015-04-26 16:49:23 +0100477static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
478 hwaddr addr,
479 uint64_t *value,
480 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200481 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100482 uint64_t mask,
483 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300484{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200485 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Avi Kivity164a4dc2011-08-11 10:40:25 +0300486
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800487 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800488 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800489 } else if (mr == &io_mem_notdirty) {
490 /* Accesses to code which has previously been translated into a TB show
491 * up in the MMIO path, as accesses to the io_mem_notdirty
492 * MemoryRegion. */
493 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800494 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
495 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800496 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800497 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300498 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100499 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300500}
501
Peter Maydellcc05c432015-04-26 16:49:23 +0100502static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
503 hwaddr addr,
504 uint64_t *value,
505 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200506 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100507 uint64_t mask,
508 MemTxAttrs attrs)
509{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200510 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Peter Maydellcc05c432015-04-26 16:49:23 +0100511
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800512 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800513 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800514 } else if (mr == &io_mem_notdirty) {
515 /* Accesses to code which has previously been translated into a TB show
516 * up in the MMIO path, as accesses to the io_mem_notdirty
517 * MemoryRegion. */
518 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800519 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
520 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800521 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800522 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100523 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
524}
525
526static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300527 uint64_t *value,
528 unsigned size,
529 unsigned access_size_min,
530 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200531 MemTxResult (*access_fn)
532 (MemoryRegion *mr,
533 hwaddr addr,
534 uint64_t *value,
535 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200536 signed shift,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200537 uint64_t mask,
538 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100539 MemoryRegion *mr,
540 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300541{
542 uint64_t access_mask;
543 unsigned access_size;
544 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100545 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300546
547 if (!access_size_min) {
548 access_size_min = 1;
549 }
550 if (!access_size_max) {
551 access_size_max = 4;
552 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200553
554 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300555 access_size = MAX(MIN(size, access_size_max), access_size_min);
Philippe Mathieu-Daudé36960b42018-09-27 02:24:14 +0200556 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200557 if (memory_region_big_endian(mr)) {
558 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200559 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100560 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200561 }
562 } else {
563 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200564 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100565 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200566 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300567 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100568 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300569}
570
Avi Kivitye2177952011-12-08 15:00:18 +0200571static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
572{
Avi Kivity0d673e32012-10-02 15:28:50 +0200573 AddressSpace *as;
574
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200575 while (mr->container) {
576 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200577 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200578 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
579 if (mr == as->root) {
580 return as;
581 }
Avi Kivitye2177952011-12-08 15:00:18 +0200582 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200583 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200584}
585
Avi Kivity093bc2c2011-07-26 14:26:01 +0300586/* Render a memory region into the global view. Ranges in @view obscure
587 * ranges in @mr.
588 */
589static void render_memory_region(FlatView *view,
590 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200591 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300592 AddrRange clip,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400593 bool readonly,
594 bool nonvolatile)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300595{
596 MemoryRegion *subregion;
597 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200598 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200599 Int128 remain;
600 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300601 FlatRange fr;
602 AddrRange tmp;
603
Avi Kivity6bba19b2011-09-14 11:54:58 +0300604 if (!mr->enabled) {
605 return;
606 }
607
Avi Kivity08dafab2011-10-16 13:19:17 +0200608 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300609 readonly |= mr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400610 nonvolatile |= mr->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300611
612 tmp = addrrange_make(base, mr->size);
613
614 if (!addrrange_intersects(tmp, clip)) {
615 return;
616 }
617
618 clip = addrrange_intersection(tmp, clip);
619
620 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200621 int128_subfrom(&base, int128_make64(mr->alias->addr));
622 int128_subfrom(&base, int128_make64(mr->alias_offset));
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400623 render_memory_region(view, mr->alias, base, clip,
624 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300625 return;
626 }
627
628 /* Render subregions in priority order. */
629 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400630 render_memory_region(view, subregion, base, clip,
631 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300632 }
633
Avi Kivity14a3c102011-07-26 14:26:06 +0300634 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300635 return;
636 }
637
Avi Kivity08dafab2011-10-16 13:19:17 +0200638 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300639 base = clip.start;
640 remain = clip.size;
641
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000642 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100643 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200644 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000645 fr.readonly = readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400646 fr.nonvolatile = nonvolatile;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000647
Avi Kivity093bc2c2011-07-26 14:26:01 +0300648 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200649 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
650 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300651 continue;
652 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200653 if (int128_lt(base, view->ranges[i].addr.start)) {
654 now = int128_min(remain,
655 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300656 fr.offset_in_region = offset_in_region;
657 fr.addr = addrrange_make(base, now);
658 flatview_insert(view, i, &fr);
659 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200660 int128_addto(&base, now);
661 offset_in_region += int128_get64(now);
662 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300663 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200664 now = int128_sub(int128_min(int128_add(base, remain),
665 addrrange_end(view->ranges[i].addr)),
666 base);
667 int128_addto(&base, now);
668 offset_in_region += int128_get64(now);
669 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300670 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200671 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300672 fr.offset_in_region = offset_in_region;
673 fr.addr = addrrange_make(base, remain);
674 flatview_insert(view, i, &fr);
675 }
676}
677
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000678static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
679{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200680 while (mr->enabled) {
681 if (mr->alias) {
682 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
683 /* The alias is included in its entirety. Use it as
684 * the "real" root, so that we can share more FlatViews.
685 */
686 mr = mr->alias;
687 continue;
688 }
689 } else if (!mr->terminates) {
690 unsigned int found = 0;
691 MemoryRegion *child, *next = NULL;
692 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
693 if (child->enabled) {
694 if (++found > 1) {
695 next = NULL;
696 break;
697 }
698 if (!child->addr && int128_ge(mr->size, child->size)) {
699 /* A child is included in its entirety. If it's the only
700 * enabled one, use it in the hope of finding an alias down the
701 * way. This will also let us share FlatViews.
702 */
703 next = child;
704 }
705 }
706 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000707 if (found == 0) {
708 return NULL;
709 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200710 if (next) {
711 mr = next;
712 continue;
713 }
714 }
715
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000716 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000717 }
718
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000719 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000720}
721
Avi Kivity093bc2c2011-07-26 14:26:01 +0300722/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200723static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300724{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000725 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200726 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300727
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000728 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300729
Avi Kivity83f3c252012-10-07 12:59:55 +0200730 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200731 render_memory_region(view, mr, int128_zero(),
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400732 addrrange_make(int128_zero(), int128_2_64()),
733 false, false);
Avi Kivity83f3c252012-10-07 12:59:55 +0200734 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200735 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300736
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000737 view->dispatch = address_space_dispatch_new(view);
738 for (i = 0; i < view->nr; i++) {
739 MemoryRegionSection mrs =
740 section_from_flat_range(&view->ranges[i], view);
741 flatview_add_to_dispatch(view, &mrs);
742 }
743 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000744 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000745
Avi Kivity093bc2c2011-07-26 14:26:01 +0300746 return view;
747}
748
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300749static void address_space_add_del_ioeventfds(AddressSpace *as,
750 MemoryRegionIoeventfd *fds_new,
751 unsigned fds_new_nb,
752 MemoryRegionIoeventfd *fds_old,
753 unsigned fds_old_nb)
754{
755 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200756 MemoryRegionIoeventfd *fd;
757 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300758
759 /* Generate a symmetric difference of the old and new fd sets, adding
760 * and deleting as necessary.
761 */
762
763 iold = inew = 0;
764 while (iold < fds_old_nb || inew < fds_new_nb) {
765 if (iold < fds_old_nb
766 && (inew == fds_new_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400767 || memory_region_ioeventfd_before(&fds_old[iold],
768 &fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200769 fd = &fds_old[iold];
770 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000771 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200772 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200773 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200774 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200775 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200776 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300777 ++iold;
778 } else if (inew < fds_new_nb
779 && (iold == fds_old_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400780 || memory_region_ioeventfd_before(&fds_new[inew],
781 &fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200782 fd = &fds_new[inew];
783 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000784 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200785 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200786 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200787 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200788 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200789 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300790 ++inew;
791 } else {
792 ++iold;
793 ++inew;
794 }
795 }
796}
797
Paolo Bonzini48564042018-03-18 18:26:36 +0100798FlatView *address_space_get_flatview(AddressSpace *as)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200799{
800 FlatView *view;
801
Paolo Bonzini374f2982013-05-17 12:37:03 +0200802 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200803 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000804 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200805 /* If somebody has replaced as->current_map concurrently,
806 * flatview_ref returns false.
807 */
808 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200809 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200810 return view;
811}
812
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300813static void address_space_update_ioeventfds(AddressSpace *as)
814{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200815 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300816 FlatRange *fr;
817 unsigned ioeventfd_nb = 0;
818 MemoryRegionIoeventfd *ioeventfds = NULL;
819 AddrRange tmp;
820 unsigned i;
821
Paolo Bonzini856d7242013-05-06 11:57:21 +0200822 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200823 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300824 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
825 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200826 int128_sub(fr->addr.start,
827 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300828 if (addrrange_intersects(fr->addr, tmp)) {
829 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500830 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300831 ioeventfd_nb * sizeof(*ioeventfds));
832 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
833 ioeventfds[ioeventfd_nb-1].addr = tmp;
834 }
835 }
836 }
837
838 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
839 as->ioeventfds, as->ioeventfd_nb);
840
Anthony Liguori7267c092011-08-20 22:09:37 -0500841 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300842 as->ioeventfds = ioeventfds;
843 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200844 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300845}
846
Peter Xu23f11742019-08-20 22:13:25 +0800847/*
848 * Notify the memory listeners about the coalesced IO change events of
849 * range `cmr'. Only the part that has intersection of the specified
850 * FlatRange will be sent.
851 */
852static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
853 CoalescedMemoryRange *cmr, bool add)
854{
855 AddrRange tmp;
856
857 tmp = addrrange_shift(cmr->addr,
858 int128_sub(fr->addr.start,
859 int128_make64(fr->offset_in_region)));
860 if (!addrrange_intersects(tmp, fr->addr)) {
861 return;
862 }
863 tmp = addrrange_intersection(tmp, fr->addr);
864
865 if (add) {
866 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
867 int128_get64(tmp.start),
868 int128_get64(tmp.size));
869 } else {
870 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
871 int128_get64(tmp.start),
872 int128_get64(tmp.size));
873 }
874}
875
Paolo Bonzini909bf762018-11-28 10:42:06 +0100876static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
877{
Peter Xu23f11742019-08-20 22:13:25 +0800878 CoalescedMemoryRange *cmr;
879
Peter Xu23f11742019-08-20 22:13:25 +0800880 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
881 flat_range_coalesced_io_notify(fr, as, cmr, false);
882 }
Paolo Bonzini909bf762018-11-28 10:42:06 +0100883}
884
885static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
886{
887 MemoryRegion *mr = fr->mr;
888 CoalescedMemoryRange *cmr;
Paolo Bonzini909bf762018-11-28 10:42:06 +0100889
Paolo Bonzini1f7af802018-11-28 17:29:45 +0100890 if (QTAILQ_EMPTY(&mr->coalesced)) {
891 return;
892 }
893
Paolo Bonzini909bf762018-11-28 10:42:06 +0100894 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
Peter Xu23f11742019-08-20 22:13:25 +0800895 flat_range_coalesced_io_notify(fr, as, cmr, true);
Paolo Bonzini909bf762018-11-28 10:42:06 +0100896 }
897}
898
Avi Kivityb8af1af2011-07-26 14:26:12 +0300899static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200900 const FlatView *old_view,
901 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300902 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300903{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300904 unsigned iold, inew;
905 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300906
907 /* Generate a symmetric difference of the old and new memory maps.
908 * Kill ranges in the old map, and instantiate ranges in the new map.
909 */
910 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200911 while (iold < old_view->nr || inew < new_view->nr) {
912 if (iold < old_view->nr) {
913 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300914 } else {
915 frold = NULL;
916 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200917 if (inew < new_view->nr) {
918 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300919 } else {
920 frnew = NULL;
921 }
922
923 if (frold
924 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200925 || int128_lt(frold->addr.start, frnew->addr.start)
926 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300927 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000928 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300929
Avi Kivityb8af1af2011-07-26 14:26:12 +0300930 if (!adding) {
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100931 flat_range_coalesced_io_del(frold, as);
Avi Kivity72e22d22012-02-08 15:05:50 +0200932 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300933 }
934
Avi Kivity093bc2c2011-07-26 14:26:01 +0300935 ++iold;
936 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000937 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300938
Jagannathan Raman4f826022019-02-05 17:50:19 -0500939 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200940 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200941 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
942 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
943 frold->dirty_log_mask,
944 frnew->dirty_log_mask);
945 }
946 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
947 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
948 frold->dirty_log_mask,
949 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300950 }
Avi Kivity5a583342011-07-26 14:26:02 +0300951 }
952
Avi Kivity093bc2c2011-07-26 14:26:01 +0300953 ++iold;
954 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300955 } else {
956 /* In new */
957
Avi Kivityb8af1af2011-07-26 14:26:12 +0300958 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200959 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100960 flat_range_coalesced_io_add(frnew, as);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300961 }
962
Avi Kivity093bc2c2011-07-26 14:26:01 +0300963 ++inew;
964 }
965 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300966}
967
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000968static void flatviews_init(void)
969{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000970 static FlatView *empty_view;
971
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000972 if (flat_views) {
973 return;
974 }
975
976 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
977 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000978 if (!empty_view) {
979 empty_view = generate_memory_topology(NULL);
980 /* We keep it alive forever in the global variable. */
981 flatview_ref(empty_view);
982 } else {
983 g_hash_table_replace(flat_views, NULL, empty_view);
984 flatview_ref(empty_view);
985 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000986}
987
988static void flatviews_reset(void)
989{
990 AddressSpace *as;
991
992 if (flat_views) {
993 g_hash_table_unref(flat_views);
994 flat_views = NULL;
995 }
996 flatviews_init();
997
998 /* Render unique FVs */
999 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1000 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1001
1002 if (g_hash_table_lookup(flat_views, physmr)) {
1003 continue;
1004 }
1005
1006 generate_memory_topology(physmr);
1007 }
1008}
1009
1010static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +03001011{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001012 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001013 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1014 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1015
1016 assert(new_view);
1017
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001018 if (old_view == new_view) {
1019 return;
1020 }
1021
1022 if (old_view) {
1023 flatview_ref(old_view);
1024 }
1025
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001026 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001027
1028 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001029 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1030
1031 if (!old_view2) {
1032 old_view2 = &tmpview;
1033 }
1034 address_space_update_topology_pass(as, old_view2, new_view, false);
1035 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001036 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001037
Paolo Bonzini374f2982013-05-17 12:37:03 +02001038 /* Writes are protected by the BQL. */
1039 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001040 if (old_view) {
1041 flatview_unref(old_view);
1042 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001043
1044 /* Note that all the old MemoryRegions are still alive up to this
1045 * point. This relieves most MemoryListeners from the need to
1046 * ref/unref the MemoryRegions they get---unless they use them
1047 * outside the iothread mutex, in which case precise reference
1048 * counting is necessary.
1049 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001050 if (old_view) {
1051 flatview_unref(old_view);
1052 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001053}
1054
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001055static void address_space_update_topology(AddressSpace *as)
1056{
1057 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1058
1059 flatviews_init();
1060 if (!g_hash_table_lookup(flat_views, physmr)) {
1061 generate_memory_topology(physmr);
1062 }
1063 address_space_set_flatview(as);
1064}
1065
Avi Kivity4ef4db82011-07-26 14:26:13 +03001066void memory_region_transaction_begin(void)
1067{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001068 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001069 ++memory_region_transaction_depth;
1070}
1071
1072void memory_region_transaction_commit(void)
1073{
Avi Kivity0d673e32012-10-02 15:28:50 +02001074 AddressSpace *as;
1075
Avi Kivity4ef4db82011-07-26 14:26:13 +03001076 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001077 assert(qemu_mutex_iothread_locked());
1078
Avi Kivity4ef4db82011-07-26 14:26:13 +03001079 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001080 if (!memory_region_transaction_depth) {
1081 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001082 flatviews_reset();
1083
Gonglei4dc56152014-05-08 11:47:32 +08001084 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001085
Gonglei4dc56152014-05-08 11:47:32 +08001086 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001087 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001088 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001089 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001090 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001091 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001092 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1093 } else if (ioeventfd_update_pending) {
1094 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1095 address_space_update_ioeventfds(as);
1096 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001097 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001098 }
Gonglei4dc56152014-05-08 11:47:32 +08001099 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001100}
1101
Avi Kivity545e92e2011-08-08 19:58:48 +03001102static void memory_region_destructor_none(MemoryRegion *mr)
1103{
1104}
1105
1106static void memory_region_destructor_ram(MemoryRegion *mr)
1107{
Fam Zhengf1060c52016-03-01 14:18:22 +08001108 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001109}
1110
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001111static bool memory_region_need_escape(char c)
1112{
1113 return c == '/' || c == '[' || c == '\\' || c == ']';
1114}
1115
1116static char *memory_region_escape_name(const char *name)
1117{
1118 const char *p;
1119 char *escaped, *q;
1120 uint8_t c;
1121 size_t bytes = 0;
1122
1123 for (p = name; *p; p++) {
1124 bytes += memory_region_need_escape(*p) ? 4 : 1;
1125 }
1126 if (bytes == p - name) {
1127 return g_memdup(name, bytes + 1);
1128 }
1129
1130 escaped = g_malloc(bytes + 1);
1131 for (p = name, q = escaped; *p; p++) {
1132 c = *p;
1133 if (unlikely(memory_region_need_escape(c))) {
1134 *q++ = '\\';
1135 *q++ = 'x';
1136 *q++ = "0123456789abcdef"[c >> 4];
1137 c = "0123456789abcdef"[c & 15];
1138 }
1139 *q++ = c;
1140 }
1141 *q = 0;
1142 return escaped;
1143}
1144
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001145static void memory_region_do_init(MemoryRegion *mr,
1146 Object *owner,
1147 const char *name,
1148 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001149{
Avi Kivity08dafab2011-10-16 13:19:17 +02001150 mr->size = int128_make64(size);
1151 if (size == UINT64_MAX) {
1152 mr->size = int128_2_64();
1153 }
Peter Maydell302fa282014-08-19 20:05:46 +01001154 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001155 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001156 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001157
1158 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001159 char *escaped_name = memory_region_escape_name(name);
1160 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001161
1162 if (!owner) {
1163 owner = container_get(qdev_get_machine(), "/unattached");
1164 }
1165
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001166 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001167 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001168 g_free(name_array);
1169 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001170 }
1171}
1172
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001173void memory_region_init(MemoryRegion *mr,
1174 Object *owner,
1175 const char *name,
1176 uint64_t size)
1177{
1178 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1179 memory_region_do_init(mr, owner, name, size);
1180}
1181
Eric Blaked7bce992016-01-29 06:48:55 -07001182static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1183 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001184{
1185 MemoryRegion *mr = MEMORY_REGION(obj);
1186 uint64_t value = mr->addr;
1187
Eric Blake51e72bc2016-01-29 06:48:54 -07001188 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001189}
1190
Eric Blaked7bce992016-01-29 06:48:55 -07001191static void memory_region_get_container(Object *obj, Visitor *v,
1192 const char *name, void *opaque,
1193 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001194{
1195 MemoryRegion *mr = MEMORY_REGION(obj);
1196 gchar *path = (gchar *)"";
1197
1198 if (mr->container) {
1199 path = object_get_canonical_path(OBJECT(mr->container));
1200 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001201 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001202 if (mr->container) {
1203 g_free(path);
1204 }
1205}
1206
1207static Object *memory_region_resolve_container(Object *obj, void *opaque,
1208 const char *part)
1209{
1210 MemoryRegion *mr = MEMORY_REGION(obj);
1211
1212 return OBJECT(mr->container);
1213}
1214
Eric Blaked7bce992016-01-29 06:48:55 -07001215static void memory_region_get_priority(Object *obj, Visitor *v,
1216 const char *name, void *opaque,
1217 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001218{
1219 MemoryRegion *mr = MEMORY_REGION(obj);
1220 int32_t value = mr->priority;
1221
Eric Blake51e72bc2016-01-29 06:48:54 -07001222 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001223}
1224
Eric Blaked7bce992016-01-29 06:48:55 -07001225static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1226 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001227{
1228 MemoryRegion *mr = MEMORY_REGION(obj);
1229 uint64_t value = memory_region_size(mr);
1230
Eric Blake51e72bc2016-01-29 06:48:54 -07001231 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001232}
1233
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001234static void memory_region_initfn(Object *obj)
1235{
1236 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001237 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001238
1239 mr->ops = &unassigned_mem_ops;
1240 mr->enabled = true;
1241 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001242 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001243 mr->destructor = memory_region_destructor_none;
1244 QTAILQ_INIT(&mr->subregions);
1245 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001246
1247 op = object_property_add(OBJECT(mr), "container",
1248 "link<" TYPE_MEMORY_REGION ">",
1249 memory_region_get_container,
1250 NULL, /* memory_region_set_container */
1251 NULL, NULL, &error_abort);
1252 op->resolve = memory_region_resolve_container;
1253
1254 object_property_add(OBJECT(mr), "addr", "uint64",
1255 memory_region_get_addr,
1256 NULL, /* memory_region_set_addr */
1257 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001258 object_property_add(OBJECT(mr), "priority", "uint32",
1259 memory_region_get_priority,
1260 NULL, /* memory_region_set_priority */
1261 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001262 object_property_add(OBJECT(mr), "size", "uint64",
1263 memory_region_get_size,
1264 NULL, /* memory_region_set_size, */
1265 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001266}
1267
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001268static void iommu_memory_region_initfn(Object *obj)
1269{
1270 MemoryRegion *mr = MEMORY_REGION(obj);
1271
1272 mr->is_iommu = true;
1273}
1274
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001275static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1276 unsigned size)
1277{
1278#ifdef DEBUG_UNASSIGNED
1279 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1280#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001281 if (current_cpu != NULL) {
Peter Maydelldbea78a2018-08-14 17:17:19 +01001282 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1283 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001284 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001285 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001286}
1287
1288static void unassigned_mem_write(void *opaque, hwaddr addr,
1289 uint64_t val, unsigned size)
1290{
1291#ifdef DEBUG_UNASSIGNED
1292 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1293#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001294 if (current_cpu != NULL) {
1295 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001296 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001297}
1298
Paolo Bonzinid1970632013-05-24 13:23:38 +02001299static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
Peter Maydell8372d382018-05-31 14:50:52 +01001300 unsigned size, bool is_write,
1301 MemTxAttrs attrs)
Paolo Bonzinid1970632013-05-24 13:23:38 +02001302{
1303 return false;
1304}
1305
1306const MemoryRegionOps unassigned_mem_ops = {
1307 .valid.accepts = unassigned_mem_accepts,
1308 .endianness = DEVICE_NATIVE_ENDIAN,
1309};
1310
Alex Williamson4a2e2422016-10-31 09:53:03 -06001311static uint64_t memory_region_ram_device_read(void *opaque,
1312 hwaddr addr, unsigned size)
1313{
1314 MemoryRegion *mr = opaque;
1315 uint64_t data = (uint64_t)~0;
1316
1317 switch (size) {
1318 case 1:
1319 data = *(uint8_t *)(mr->ram_block->host + addr);
1320 break;
1321 case 2:
1322 data = *(uint16_t *)(mr->ram_block->host + addr);
1323 break;
1324 case 4:
1325 data = *(uint32_t *)(mr->ram_block->host + addr);
1326 break;
1327 case 8:
1328 data = *(uint64_t *)(mr->ram_block->host + addr);
1329 break;
1330 }
1331
1332 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1333
1334 return data;
1335}
1336
1337static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1338 uint64_t data, unsigned size)
1339{
1340 MemoryRegion *mr = opaque;
1341
1342 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1343
1344 switch (size) {
1345 case 1:
1346 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1347 break;
1348 case 2:
1349 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1350 break;
1351 case 4:
1352 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1353 break;
1354 case 8:
1355 *(uint64_t *)(mr->ram_block->host + addr) = data;
1356 break;
1357 }
1358}
1359
1360static const MemoryRegionOps ram_device_mem_ops = {
1361 .read = memory_region_ram_device_read,
1362 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001363 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001364 .valid = {
1365 .min_access_size = 1,
1366 .max_access_size = 8,
1367 .unaligned = true,
1368 },
1369 .impl = {
1370 .min_access_size = 1,
1371 .max_access_size = 8,
1372 .unaligned = true,
1373 },
1374};
1375
Paolo Bonzinid2702032013-05-24 11:55:06 +02001376bool memory_region_access_valid(MemoryRegion *mr,
1377 hwaddr addr,
1378 unsigned size,
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001379 bool is_write,
1380 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001381{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001382 int access_size_min, access_size_max;
1383 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001384
Avi Kivity093bc2c2011-07-26 14:26:01 +03001385 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1386 return false;
1387 }
1388
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001389 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001390 return true;
1391 }
1392
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001393 access_size_min = mr->ops->valid.min_access_size;
1394 if (!mr->ops->valid.min_access_size) {
1395 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001396 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001397
1398 access_size_max = mr->ops->valid.max_access_size;
1399 if (!mr->ops->valid.max_access_size) {
1400 access_size_max = 4;
1401 }
1402
1403 access_size = MAX(MIN(size, access_size_max), access_size_min);
1404 for (i = 0; i < size; i += access_size) {
1405 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
Peter Maydell8372d382018-05-31 14:50:52 +01001406 is_write, attrs)) {
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001407 return false;
1408 }
1409 }
1410
Avi Kivity093bc2c2011-07-26 14:26:01 +03001411 return true;
1412}
1413
Peter Maydellcc05c432015-04-26 16:49:23 +01001414static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1415 hwaddr addr,
1416 uint64_t *pval,
1417 unsigned size,
1418 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001419{
Peter Maydellcc05c432015-04-26 16:49:23 +01001420 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001421
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001422 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001423 return access_with_adjusted_size(addr, pval, size,
1424 mr->ops->impl.min_access_size,
1425 mr->ops->impl.max_access_size,
1426 memory_region_read_accessor,
1427 mr, attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001428 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001429 return access_with_adjusted_size(addr, pval, size,
1430 mr->ops->impl.min_access_size,
1431 mr->ops->impl.max_access_size,
1432 memory_region_read_with_attrs_accessor,
1433 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001434 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001435}
1436
Peter Maydell3b643492015-04-26 16:49:23 +01001437MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1438 hwaddr addr,
1439 uint64_t *pval,
Tony Nguyene67c9042019-08-24 04:36:48 +10001440 MemOp op,
Peter Maydell3b643492015-04-26 16:49:23 +01001441 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001442{
Tony Nguyene67c9042019-08-24 04:36:48 +10001443 unsigned size = memop_size(op);
Peter Maydellcc05c432015-04-26 16:49:23 +01001444 MemTxResult r;
1445
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001446 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001447 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001448 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001449 }
Avi Kivitya621f382012-01-02 13:12:08 +02001450
Peter Maydellcc05c432015-04-26 16:49:23 +01001451 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Tony Nguyen9bf825b2019-08-24 04:36:54 +10001452 adjust_endianness(mr, pval, op);
Peter Maydellcc05c432015-04-26 16:49:23 +01001453 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001454}
1455
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001456/* Return true if an eventfd was signalled */
1457static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1458 hwaddr addr,
1459 uint64_t data,
1460 unsigned size,
1461 MemTxAttrs attrs)
1462{
1463 MemoryRegionIoeventfd ioeventfd = {
1464 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1465 .data = data,
1466 };
1467 unsigned i;
1468
1469 for (i = 0; i < mr->ioeventfd_nb; i++) {
1470 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1471 ioeventfd.e = mr->ioeventfds[i].e;
1472
Tristan Burgess73bb7532018-05-28 23:04:45 -04001473 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001474 event_notifier_set(ioeventfd.e);
1475 return true;
1476 }
1477 }
1478
1479 return false;
1480}
1481
Peter Maydell3b643492015-04-26 16:49:23 +01001482MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1483 hwaddr addr,
1484 uint64_t data,
Tony Nguyene67c9042019-08-24 04:36:48 +10001485 MemOp op,
Peter Maydell3b643492015-04-26 16:49:23 +01001486 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001487{
Tony Nguyene67c9042019-08-24 04:36:48 +10001488 unsigned size = memop_size(op);
1489
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001490 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001491 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001492 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001493 }
1494
Tony Nguyen9bf825b2019-08-24 04:36:54 +10001495 adjust_endianness(mr, &data, op);
Avi Kivitya621f382012-01-02 13:12:08 +02001496
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001497 if ((!kvm_eventfds_enabled()) &&
1498 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1499 return MEMTX_OK;
1500 }
1501
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001502 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001503 return access_with_adjusted_size(addr, &data, size,
1504 mr->ops->impl.min_access_size,
1505 mr->ops->impl.max_access_size,
1506 memory_region_write_accessor, mr,
1507 attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001508 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001509 return
1510 access_with_adjusted_size(addr, &data, size,
1511 mr->ops->impl.min_access_size,
1512 mr->ops->impl.max_access_size,
1513 memory_region_write_with_attrs_accessor,
1514 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001515 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001516}
1517
Avi Kivity093bc2c2011-07-26 14:26:01 +03001518void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001519 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001520 const MemoryRegionOps *ops,
1521 void *opaque,
1522 const char *name,
1523 uint64_t size)
1524{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001525 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001526 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001527 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001528 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001529}
1530
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001531void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1532 Object *owner,
1533 const char *name,
1534 uint64_t size,
1535 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001536{
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001537 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1538}
1539
1540void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1541 Object *owner,
1542 const char *name,
1543 uint64_t size,
1544 bool share,
1545 Error **errp)
1546{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001547 Error *err = NULL;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001548 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001549 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001550 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001551 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001552 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001553 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001554 if (err) {
1555 mr->size = int128_zero();
1556 object_unparent(OBJECT(mr));
1557 error_propagate(errp, err);
1558 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001559}
1560
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001561void memory_region_init_resizeable_ram(MemoryRegion *mr,
1562 Object *owner,
1563 const char *name,
1564 uint64_t size,
1565 uint64_t max_size,
1566 void (*resized)(const char*,
1567 uint64_t length,
1568 void *host),
1569 Error **errp)
1570{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001571 Error *err = NULL;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001572 memory_region_init(mr, owner, name, size);
1573 mr->ram = true;
1574 mr->terminates = true;
1575 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001576 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001577 mr, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001578 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001579 if (err) {
1580 mr->size = int128_zero();
1581 object_unparent(OBJECT(mr));
1582 error_propagate(errp, err);
1583 }
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001584}
1585
Hikaru Nishidad5dbde42018-09-24 21:32:05 +09001586#ifdef CONFIG_POSIX
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001587void memory_region_init_ram_from_file(MemoryRegion *mr,
1588 struct Object *owner,
1589 const char *name,
1590 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001591 uint64_t align,
Junyan Hecbfc0172018-07-18 15:47:58 +08001592 uint32_t ram_flags,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001593 const char *path,
1594 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001595{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001596 Error *err = NULL;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001597 memory_region_init(mr, owner, name, size);
1598 mr->ram = true;
1599 mr->terminates = true;
1600 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001601 mr->align = align;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001602 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001603 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001604 if (err) {
1605 mr->size = int128_zero();
1606 object_unparent(OBJECT(mr));
1607 error_propagate(errp, err);
1608 }
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001609}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001610
1611void memory_region_init_ram_from_fd(MemoryRegion *mr,
1612 struct Object *owner,
1613 const char *name,
1614 uint64_t size,
1615 bool share,
1616 int fd,
1617 Error **errp)
1618{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001619 Error *err = NULL;
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001620 memory_region_init(mr, owner, name, size);
1621 mr->ram = true;
1622 mr->terminates = true;
1623 mr->destructor = memory_region_destructor_ram;
Junyan Hecbfc0172018-07-18 15:47:58 +08001624 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1625 share ? RAM_SHARED : 0,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001626 fd, &err);
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001627 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001628 if (err) {
1629 mr->size = int128_zero();
1630 object_unparent(OBJECT(mr));
1631 error_propagate(errp, err);
1632 }
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001633}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001634#endif
1635
Avi Kivity093bc2c2011-07-26 14:26:01 +03001636void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001637 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001638 const char *name,
1639 uint64_t size,
1640 void *ptr)
1641{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001642 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001643 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001644 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001645 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001646 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001647
1648 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1649 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001650 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001651}
1652
Alex Williamson21e00fa2016-10-31 09:53:03 -06001653void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1654 Object *owner,
1655 const char *name,
1656 uint64_t size,
1657 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301658{
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001659 memory_region_init(mr, owner, name, size);
1660 mr->ram = true;
1661 mr->terminates = true;
Alex Williamson21e00fa2016-10-31 09:53:03 -06001662 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001663 mr->ops = &ram_device_mem_ops;
1664 mr->opaque = mr;
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001665 mr->destructor = memory_region_destructor_ram;
1666 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1667 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1668 assert(ptr != NULL);
1669 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301670}
1671
Avi Kivity093bc2c2011-07-26 14:26:01 +03001672void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001673 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001674 const char *name,
1675 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001676 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001677 uint64_t size)
1678{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001679 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001680 mr->alias = orig;
1681 mr->alias_offset = offset;
1682}
1683
Peter Maydellb59821a2017-07-07 15:42:50 +01001684void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1685 struct Object *owner,
1686 const char *name,
1687 uint64_t size,
1688 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001689{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001690 Error *err = NULL;
Peter Maydella1777f72016-07-04 13:06:35 +01001691 memory_region_init(mr, owner, name, size);
1692 mr->ram = true;
1693 mr->readonly = true;
1694 mr->terminates = true;
1695 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001696 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
Peter Maydella1777f72016-07-04 13:06:35 +01001697 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001698 if (err) {
1699 mr->size = int128_zero();
1700 object_unparent(OBJECT(mr));
1701 error_propagate(errp, err);
1702 }
Peter Maydella1777f72016-07-04 13:06:35 +01001703}
1704
Peter Maydellb59821a2017-07-07 15:42:50 +01001705void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1706 Object *owner,
1707 const MemoryRegionOps *ops,
1708 void *opaque,
1709 const char *name,
1710 uint64_t size,
1711 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001712{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001713 Error *err = NULL;
Peter Maydell39e0b032016-07-04 13:06:35 +01001714 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001715 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001716 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001717 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001718 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001719 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001720 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001721 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1722 if (err) {
1723 mr->size = int128_zero();
1724 object_unparent(OBJECT(mr));
1725 error_propagate(errp, err);
1726 }
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001727}
1728
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001729void memory_region_init_iommu(void *_iommu_mr,
1730 size_t instance_size,
1731 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001732 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001733 const char *name,
1734 uint64_t size)
1735{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001736 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001737 struct MemoryRegion *mr;
1738
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001739 object_initialize(_iommu_mr, instance_size, mrtypename);
1740 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001741 memory_region_do_init(mr, owner, name, size);
1742 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001743 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001744 QLIST_INIT(&iommu_mr->iommu_notify);
1745 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001746}
1747
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001748static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001749{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001750 MemoryRegion *mr = MEMORY_REGION(obj);
1751
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001752 assert(!mr->container);
1753
1754 /* We know the region is not visible in any address space (it
1755 * does not have a container and cannot be a root either because
1756 * it has no references, so we can blindly clear mr->enabled.
1757 * memory_region_set_enabled instead could trigger a transaction
1758 * and cause an infinite loop.
1759 */
1760 mr->enabled = false;
1761 memory_region_transaction_begin();
1762 while (!QTAILQ_EMPTY(&mr->subregions)) {
1763 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1764 memory_region_del_subregion(mr, subregion);
1765 }
1766 memory_region_transaction_commit();
1767
Avi Kivity545e92e2011-08-08 19:58:48 +03001768 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001769 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001770 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001771 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001772}
1773
Paolo Bonzini803c0812013-05-07 06:59:09 +02001774Object *memory_region_owner(MemoryRegion *mr)
1775{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001776 Object *obj = OBJECT(mr);
1777 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001778}
1779
Paolo Bonzini46637be2013-05-07 09:06:00 +02001780void memory_region_ref(MemoryRegion *mr)
1781{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001782 /* MMIO callbacks most likely will access data that belongs
1783 * to the owner, hence the need to ref/unref the owner whenever
1784 * the memory region is in use.
1785 *
1786 * The memory region is a child of its owner. As long as the
1787 * owner doesn't call unparent itself on the memory region,
1788 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001789 * Memory regions without an owner are supposed to never go away;
1790 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001791 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001792 if (mr && mr->owner) {
1793 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001794 }
1795}
1796
1797void memory_region_unref(MemoryRegion *mr)
1798{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001799 if (mr && mr->owner) {
1800 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001801 }
1802}
1803
Avi Kivity093bc2c2011-07-26 14:26:01 +03001804uint64_t memory_region_size(MemoryRegion *mr)
1805{
Avi Kivity08dafab2011-10-16 13:19:17 +02001806 if (int128_eq(mr->size, int128_2_64())) {
1807 return UINT64_MAX;
1808 }
1809 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001810}
1811
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001812const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001813{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001814 if (!mr->name) {
1815 ((MemoryRegion *)mr)->name =
1816 object_get_canonical_path_component(OBJECT(mr));
1817 }
Peter Maydell302fa282014-08-19 20:05:46 +01001818 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001819}
1820
Alex Williamson21e00fa2016-10-31 09:53:03 -06001821bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301822{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001823 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301824}
1825
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001826uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001827{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001828 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001829 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001830 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1831 }
1832 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001833}
1834
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001835bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1836{
1837 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1838}
1839
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001840static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001841{
1842 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1843 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001844 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001845
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001846 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001847 flags |= iommu_notifier->notifier_flags;
1848 }
1849
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001850 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1851 imrc->notify_flag_changed(iommu_mr,
1852 iommu_mr->iommu_notify_flags,
1853 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001854 }
1855
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001856 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001857}
1858
Peter Xucdb30812016-09-23 13:02:26 +08001859void memory_region_register_iommu_notifier(MemoryRegion *mr,
1860 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001861{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001862 IOMMUMemoryRegion *iommu_mr;
1863
Jason Wangefcd38c2016-12-30 18:09:17 +08001864 if (mr->alias) {
1865 memory_region_register_iommu_notifier(mr->alias, n);
1866 return;
1867 }
1868
Peter Xucdb30812016-09-23 13:02:26 +08001869 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001870 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001871 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001872 assert(n->start <= n->end);
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001873 assert(n->iommu_idx >= 0 &&
1874 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1875
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001876 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1877 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001878}
1879
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001880uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001881{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001882 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1883
1884 if (imrc->get_min_page_size) {
1885 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001886 }
1887 return TARGET_PAGE_SIZE;
1888}
1889
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001890void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001891{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001892 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001893 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001894 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001895 IOMMUTLBEntry iotlb;
1896
Peter Xufaa362e2017-04-07 18:59:11 +08001897 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001898 if (imrc->replay) {
1899 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001900 return;
1901 }
1902
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001903 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001904
David Gibsona788f222015-09-30 12:13:55 +10001905 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Peter Maydell2c91bcf2018-06-15 14:57:16 +01001906 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
David Gibsona788f222015-09-30 12:13:55 +10001907 if (iotlb.perm != IOMMU_NONE) {
1908 n->notify(n, &iotlb);
1909 }
1910
1911 /* if (2^64 - MR size) < granularity, it's possible to get an
1912 * infinite loop here. This should catch such a wraparound */
1913 if ((addr + granularity) < addr) {
1914 break;
1915 }
1916 }
1917}
1918
Peter Xucdb30812016-09-23 13:02:26 +08001919void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1920 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001921{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001922 IOMMUMemoryRegion *iommu_mr;
1923
Jason Wangefcd38c2016-12-30 18:09:17 +08001924 if (mr->alias) {
1925 memory_region_unregister_iommu_notifier(mr->alias, n);
1926 return;
1927 }
Peter Xucdb30812016-09-23 13:02:26 +08001928 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001929 iommu_mr = IOMMU_MEMORY_REGION(mr);
1930 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001931}
1932
Peter Xubd2bfa42017-04-07 18:59:10 +08001933void memory_region_notify_one(IOMMUNotifier *notifier,
1934 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001935{
Peter Xucdb30812016-09-23 13:02:26 +08001936 IOMMUNotifierFlag request_flags;
Yan Zhao03c71402019-06-25 11:21:18 +08001937 hwaddr entry_end = entry->iova + entry->addr_mask;
Peter Xucdb30812016-09-23 13:02:26 +08001938
Peter Xubd2bfa42017-04-07 18:59:10 +08001939 /*
1940 * Skip the notification if the notification does not overlap
1941 * with registered range.
1942 */
Yan Zhao03c71402019-06-25 11:21:18 +08001943 if (notifier->start > entry_end || notifier->end < entry->iova) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001944 return;
1945 }
Peter Xucdb30812016-09-23 13:02:26 +08001946
Yan Zhao03c71402019-06-25 11:21:18 +08001947 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
1948
Peter Xubd2bfa42017-04-07 18:59:10 +08001949 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001950 request_flags = IOMMU_NOTIFIER_MAP;
1951 } else {
1952 request_flags = IOMMU_NOTIFIER_UNMAP;
1953 }
1954
Peter Xubd2bfa42017-04-07 18:59:10 +08001955 if (notifier->notifier_flags & request_flags) {
1956 notifier->notify(notifier, entry);
1957 }
1958}
1959
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001960void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001961 int iommu_idx,
Peter Xubd2bfa42017-04-07 18:59:10 +08001962 IOMMUTLBEntry entry)
1963{
1964 IOMMUNotifier *iommu_notifier;
1965
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001966 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001967
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001968 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001969 if (iommu_notifier->iommu_idx == iommu_idx) {
1970 memory_region_notify_one(iommu_notifier, &entry);
1971 }
Peter Xucdb30812016-09-23 13:02:26 +08001972 }
David Gibson06866572013-05-14 19:13:56 +10001973}
1974
Alexey Kardashevskiyf1334de2018-02-06 11:08:24 -07001975int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1976 enum IOMMUMemoryRegionAttr attr,
1977 void *data)
1978{
1979 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1980
1981 if (!imrc->get_attr) {
1982 return -EINVAL;
1983 }
1984
1985 return imrc->get_attr(iommu_mr, attr, data);
1986}
1987
Peter Maydell21f40202018-06-15 14:57:15 +01001988int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1989 MemTxAttrs attrs)
1990{
1991 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1992
1993 if (!imrc->attrs_to_index) {
1994 return 0;
1995 }
1996
1997 return imrc->attrs_to_index(iommu_mr, attrs);
1998}
1999
2000int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2001{
2002 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2003
2004 if (!imrc->num_indexes) {
2005 return 1;
2006 }
2007
2008 return imrc->num_indexes(iommu_mr);
2009}
2010
Avi Kivity093bc2c2011-07-26 14:26:01 +03002011void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2012{
Avi Kivity5a583342011-07-26 14:26:02 +03002013 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002014 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03002015
Paolo Bonzinidbddac62015-03-23 10:31:53 +01002016 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002017 old_logging = mr->vga_logging_count;
2018 mr->vga_logging_count += log ? 1 : -1;
2019 if (!!old_logging == !!mr->vga_logging_count) {
2020 return;
2021 }
2022
Jan Kiszka59023ef2012-08-23 13:02:30 +02002023 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03002024 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01002025 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002026 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002027}
2028
Avi Kivitya8170e52012-10-23 12:30:10 +02002029void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2030 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002031{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002032 assert(mr->ram_block);
2033 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2034 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01002035 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002036}
2037
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002038static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002039{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002040 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02002041 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002042 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03002043 FlatRange *fr;
2044
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002045 /* If the same address space has multiple log_sync listeners, we
2046 * visit that address space's FlatView multiple times. But because
2047 * log_sync listeners are rare, it's still cheaper than walking each
2048 * address space once.
2049 */
2050 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2051 if (!listener->log_sync) {
2052 continue;
2053 }
2054 as = listener->address_space;
2055 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002056 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002057 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002058 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002059 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02002060 }
Avi Kivity5a583342011-07-26 14:26:02 +03002061 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002062 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03002063 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002064}
2065
Peter Xu077874e2019-06-03 14:50:51 +08002066void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2067 hwaddr len)
2068{
2069 MemoryRegionSection mrs;
2070 MemoryListener *listener;
2071 AddressSpace *as;
2072 FlatView *view;
2073 FlatRange *fr;
2074 hwaddr sec_start, sec_end, sec_size;
2075
2076 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2077 if (!listener->log_clear) {
2078 continue;
2079 }
2080 as = listener->address_space;
2081 view = address_space_get_flatview(as);
2082 FOR_EACH_FLAT_RANGE(fr, view) {
2083 if (!fr->dirty_log_mask || fr->mr != mr) {
2084 /*
2085 * Clear dirty bitmap operation only applies to those
2086 * regions whose dirty logging is at least enabled
2087 */
2088 continue;
2089 }
2090
2091 mrs = section_from_flat_range(fr, view);
2092
2093 sec_start = MAX(mrs.offset_within_region, start);
2094 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2095 sec_end = MIN(sec_end, start + len);
2096
2097 if (sec_start >= sec_end) {
2098 /*
2099 * If this memory region section has no intersection
2100 * with the requested range, skip.
2101 */
2102 continue;
2103 }
2104
2105 /* Valid case; shrink the section if needed */
2106 mrs.offset_within_address_space +=
2107 sec_start - mrs.offset_within_region;
2108 mrs.offset_within_region = sec_start;
2109 sec_size = sec_end - sec_start;
2110 mrs.size = int128_make64(sec_size);
2111 listener->log_clear(listener, &mrs);
2112 }
2113 flatview_unref(view);
2114 }
2115}
2116
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002117DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2118 hwaddr addr,
2119 hwaddr size,
2120 unsigned client)
2121{
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002122 DirtyBitmapSnapshot *snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002123 assert(mr->ram_block);
2124 memory_region_sync_dirty_bitmap(mr);
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002125 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2126 memory_global_after_dirty_log_sync();
2127 return snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002128}
2129
2130bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2131 hwaddr addr, hwaddr size)
2132{
2133 assert(mr->ram_block);
2134 return cpu_physical_memory_snapshot_get_dirty(snap,
2135 memory_region_get_ram_addr(mr) + addr, size);
2136}
2137
Avi Kivity093bc2c2011-07-26 14:26:01 +03002138void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2139{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002140 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002141 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002142 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002143 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002144 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002145 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002146}
2147
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002148void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2149{
2150 if (mr->nonvolatile != nonvolatile) {
2151 memory_region_transaction_begin();
2152 mr->nonvolatile = nonvolatile;
2153 memory_region_update_pending |= mr->enabled;
2154 memory_region_transaction_commit();
2155 }
2156}
2157
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002158void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002159{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002160 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002161 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002162 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002163 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002164 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002165 }
2166}
2167
Avi Kivitya8170e52012-10-23 12:30:10 +02002168void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2169 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002170{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002171 assert(mr->ram_block);
2172 cpu_physical_memory_test_and_clear_dirty(
2173 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002174}
2175
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002176int memory_region_get_fd(MemoryRegion *mr)
2177{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002178 int fd;
2179
2180 rcu_read_lock();
2181 while (mr->alias) {
2182 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002183 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002184 fd = mr->ram_block->fd;
2185 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002186
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002187 return fd;
2188}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002189
Avi Kivity093bc2c2011-07-26 14:26:01 +03002190void *memory_region_get_ram_ptr(MemoryRegion *mr)
2191{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002192 void *ptr;
2193 uint64_t offset = 0;
2194
2195 rcu_read_lock();
2196 while (mr->alias) {
2197 offset += mr->alias_offset;
2198 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002199 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002200 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002201 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002202 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002203
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002204 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002205}
2206
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002207MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2208{
2209 RAMBlock *block;
2210
2211 block = qemu_ram_block_from_host(ptr, false, offset);
2212 if (!block) {
2213 return NULL;
2214 }
2215
2216 return block->mr;
2217}
2218
Fam Zheng7ebb2742016-03-01 14:18:20 +08002219ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2220{
2221 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2222}
2223
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002224void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2225{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002226 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002227
Gongleifa53a0e2016-05-10 10:04:59 +08002228 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002229}
2230
Peter Xub960fc12019-08-20 22:13:28 +08002231/*
2232 * Call proper memory listeners about the change on the newly
2233 * added/removed CoalescedMemoryRange.
2234 */
2235static void memory_region_update_coalesced_range(MemoryRegion *mr,
2236 CoalescedMemoryRange *cmr,
2237 bool add)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002238{
Peter Xub960fc12019-08-20 22:13:28 +08002239 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002240 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002241 FlatRange *fr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002242
Avi Kivity0d673e32012-10-02 15:28:50 +02002243 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Peter Xub960fc12019-08-20 22:13:28 +08002244 view = address_space_get_flatview(as);
2245 FOR_EACH_FLAT_RANGE(fr, view) {
2246 if (fr->mr == mr) {
2247 flat_range_coalesced_io_notify(fr, as, cmr, add);
2248 }
2249 }
2250 flatview_unref(view);
Avi Kivity0d673e32012-10-02 15:28:50 +02002251 }
2252}
2253
Avi Kivity093bc2c2011-07-26 14:26:01 +03002254void memory_region_set_coalescing(MemoryRegion *mr)
2255{
2256 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002257 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002258}
2259
2260void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002261 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002262 uint64_t size)
2263{
Anthony Liguori7267c092011-08-20 22:09:37 -05002264 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002265
Avi Kivity08dafab2011-10-16 13:19:17 +02002266 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002267 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002268 memory_region_update_coalesced_range(mr, cmr, true);
Jan Kiszkad4105152012-08-23 13:02:29 +02002269 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002270}
2271
2272void memory_region_clear_coalescing(MemoryRegion *mr)
2273{
2274 CoalescedMemoryRange *cmr;
Peter Xu9c1aa1c2019-08-20 22:13:27 +08002275
2276 if (QTAILQ_EMPTY(&mr->coalesced)) {
2277 return;
2278 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002279
Jan Kiszkad4105152012-08-23 13:02:29 +02002280 qemu_flush_coalesced_mmio_buffer();
2281 mr->flush_coalesced_mmio = false;
2282
Avi Kivity093bc2c2011-07-26 14:26:01 +03002283 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2284 cmr = QTAILQ_FIRST(&mr->coalesced);
2285 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002286 memory_region_update_coalesced_range(mr, cmr, false);
Anthony Liguori7267c092011-08-20 22:09:37 -05002287 g_free(cmr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002288 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002289}
2290
Jan Kiszkad4105152012-08-23 13:02:29 +02002291void memory_region_set_flush_coalesced(MemoryRegion *mr)
2292{
2293 mr->flush_coalesced_mmio = true;
2294}
2295
2296void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2297{
2298 qemu_flush_coalesced_mmio_buffer();
2299 if (QTAILQ_EMPTY(&mr->coalesced)) {
2300 mr->flush_coalesced_mmio = false;
2301 }
2302}
2303
Jan Kiszka196ea132015-06-18 18:47:20 +02002304void memory_region_clear_global_locking(MemoryRegion *mr)
2305{
2306 mr->global_locking = false;
2307}
2308
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002309static bool userspace_eventfd_warning;
2310
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002311void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002312 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002313 unsigned size,
2314 bool match_data,
2315 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002316 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002317{
2318 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002319 .addr.start = int128_make64(addr),
2320 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002321 .match_data = match_data,
2322 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002323 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002324 };
2325 unsigned i;
2326
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002327 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2328 userspace_eventfd_warning))) {
2329 userspace_eventfd_warning = true;
2330 error_report("Using eventfd without MMIO binding in KVM. "
2331 "Suboptimal performance expected");
2332 }
2333
Jason Wangb8aecea2015-11-06 16:02:45 +08002334 if (size) {
Tony Nguyen9bf825b2019-08-24 04:36:54 +10002335 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
Jason Wangb8aecea2015-11-06 16:02:45 +08002336 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002337 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002338 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002339 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002340 break;
2341 }
2342 }
2343 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002344 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002345 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2346 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2347 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2348 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002349 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002350 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002351}
2352
2353void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002354 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002355 unsigned size,
2356 bool match_data,
2357 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002358 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002359{
2360 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002361 .addr.start = int128_make64(addr),
2362 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002363 .match_data = match_data,
2364 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002365 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002366 };
2367 unsigned i;
2368
Jason Wangb8aecea2015-11-06 16:02:45 +08002369 if (size) {
Tony Nguyen9bf825b2019-08-24 04:36:54 +10002370 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
Jason Wangb8aecea2015-11-06 16:02:45 +08002371 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002372 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002373 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002374 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002375 break;
2376 }
2377 }
2378 assert(i != mr->ioeventfd_nb);
2379 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2380 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2381 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002382 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002383 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002384 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002385 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002386}
2387
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002388static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002389{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002390 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002391 MemoryRegion *other;
2392
Jan Kiszka59023ef2012-08-23 13:02:30 +02002393 memory_region_transaction_begin();
2394
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002395 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002396 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002397 if (subregion->priority >= other->priority) {
2398 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2399 goto done;
2400 }
2401 }
2402 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2403done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002404 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002405 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002406}
2407
Peter Crosthwaite05987012014-06-05 23:14:44 -07002408static void memory_region_add_subregion_common(MemoryRegion *mr,
2409 hwaddr offset,
2410 MemoryRegion *subregion)
2411{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002412 assert(!subregion->container);
2413 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002414 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002415 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002416}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002417
2418void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002419 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002420 MemoryRegion *subregion)
2421{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002422 subregion->priority = 0;
2423 memory_region_add_subregion_common(mr, offset, subregion);
2424}
2425
2426void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002427 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002428 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002429 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002430{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002431 subregion->priority = priority;
2432 memory_region_add_subregion_common(mr, offset, subregion);
2433}
2434
2435void memory_region_del_subregion(MemoryRegion *mr,
2436 MemoryRegion *subregion)
2437{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002438 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002439 assert(subregion->container == mr);
2440 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002441 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002442 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002443 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002444 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002445}
2446
2447void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2448{
2449 if (enabled == mr->enabled) {
2450 return;
2451 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002452 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002453 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002454 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002455 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002456}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002457
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002458void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2459{
2460 Int128 s = int128_make64(size);
2461
2462 if (size == UINT64_MAX) {
2463 s = int128_2_64();
2464 }
2465 if (int128_eq(s, mr->size)) {
2466 return;
2467 }
2468 memory_region_transaction_begin();
2469 mr->size = s;
2470 memory_region_update_pending = true;
2471 memory_region_transaction_commit();
2472}
2473
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002474static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002475{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002476 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002477
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002478 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002479 memory_region_transaction_begin();
2480 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002481 memory_region_del_subregion(container, mr);
2482 mr->container = container;
2483 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002484 memory_region_unref(mr);
2485 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002486 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002487}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002488
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002489void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2490{
2491 if (addr != mr->addr) {
2492 mr->addr = addr;
2493 memory_region_readd_subregion(mr);
2494 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002495}
2496
Avi Kivitya8170e52012-10-23 12:30:10 +02002497void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002498{
Avi Kivity47033592011-12-04 19:16:50 +02002499 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002500
Jan Kiszka59023ef2012-08-23 13:02:30 +02002501 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002502 return;
2503 }
2504
Jan Kiszka59023ef2012-08-23 13:02:30 +02002505 memory_region_transaction_begin();
2506 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002507 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002508 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002509}
2510
Igor Mammedova2b257d2014-10-31 16:38:37 +00002511uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2512{
2513 return mr->align;
2514}
2515
Avi Kivitye2177952011-12-08 15:00:18 +02002516static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2517{
2518 const AddrRange *addr = addr_;
2519 const FlatRange *fr = fr_;
2520
2521 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2522 return -1;
2523 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2524 return 1;
2525 }
2526 return 0;
2527}
2528
Paolo Bonzini99e86342013-05-06 10:26:13 +02002529static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002530{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002531 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002532 sizeof(FlatRange), cmp_flatrange_addr);
2533}
2534
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002535bool memory_region_is_mapped(MemoryRegion *mr)
2536{
2537 return mr->container ? true : false;
2538}
2539
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002540/* Same as memory_region_find, but it does not add a reference to the
2541 * returned region. It must be called from an RCU critical section.
2542 */
2543static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2544 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002545{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002546 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002547 MemoryRegion *root;
2548 AddressSpace *as;
2549 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002550 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002551 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002552
Paolo Bonzini73034e92013-05-07 15:48:28 +02002553 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002554 for (root = mr; root->container; ) {
2555 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002556 addr += root->addr;
2557 }
2558
2559 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002560 if (!as) {
2561 return ret;
2562 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002563 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002564
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002565 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002566 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002567 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002568 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002569 }
2570
Paolo Bonzini99e86342013-05-06 10:26:13 +02002571 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002572 --fr;
2573 }
2574
2575 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002576 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002577 range = addrrange_intersection(range, fr->addr);
2578 ret.offset_within_region = fr->offset_in_region;
2579 ret.offset_within_region += int128_get64(int128_sub(range.start,
2580 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002581 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002582 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002583 ret.readonly = fr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002584 ret.nonvolatile = fr->nonvolatile;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002585 return ret;
2586}
2587
2588MemoryRegionSection memory_region_find(MemoryRegion *mr,
2589 hwaddr addr, uint64_t size)
2590{
2591 MemoryRegionSection ret;
2592 rcu_read_lock();
2593 ret = memory_region_find_rcu(mr, addr, size);
2594 if (ret.mr) {
2595 memory_region_ref(ret.mr);
2596 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002597 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002598 return ret;
2599}
2600
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002601bool memory_region_present(MemoryRegion *container, hwaddr addr)
2602{
2603 MemoryRegion *mr;
2604
2605 rcu_read_lock();
2606 mr = memory_region_find_rcu(container, addr, 1).mr;
2607 rcu_read_unlock();
2608 return mr && mr != container;
2609}
2610
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002611void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002612{
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002613 memory_region_sync_dirty_bitmap(NULL);
Avi Kivity7664e802011-12-11 14:47:25 +02002614}
2615
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002616void memory_global_after_dirty_log_sync(void)
2617{
2618 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2619}
2620
Jay Zhou19310762017-07-28 18:28:53 +08002621static VMChangeStateEntry *vmstate_change;
2622
Avi Kivity7664e802011-12-11 14:47:25 +02002623void memory_global_dirty_log_start(void)
2624{
Jay Zhou19310762017-07-28 18:28:53 +08002625 if (vmstate_change) {
2626 qemu_del_vm_change_state_handler(vmstate_change);
2627 vmstate_change = NULL;
2628 }
2629
Avi Kivity7664e802011-12-11 14:47:25 +02002630 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002631
Avi Kivity7376e582012-02-08 21:05:17 +02002632 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002633
Wei Yang39adb532019-04-26 10:09:27 +08002634 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002635 memory_region_transaction_begin();
2636 memory_region_update_pending = true;
2637 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002638}
2639
Jay Zhou19310762017-07-28 18:28:53 +08002640static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002641{
Avi Kivity7664e802011-12-11 14:47:25 +02002642 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002643
Wei Yang39adb532019-04-26 10:09:27 +08002644 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002645 memory_region_transaction_begin();
2646 memory_region_update_pending = true;
2647 memory_region_transaction_commit();
2648
Avi Kivity7376e582012-02-08 21:05:17 +02002649 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002650}
2651
Jay Zhou19310762017-07-28 18:28:53 +08002652static void memory_vm_change_state_handler(void *opaque, int running,
2653 RunState state)
2654{
2655 if (running) {
2656 memory_global_dirty_log_do_stop();
2657
2658 if (vmstate_change) {
2659 qemu_del_vm_change_state_handler(vmstate_change);
2660 vmstate_change = NULL;
2661 }
2662 }
2663}
2664
2665void memory_global_dirty_log_stop(void)
2666{
2667 if (!runstate_is_running()) {
2668 if (vmstate_change) {
2669 return;
2670 }
2671 vmstate_change = qemu_add_vm_change_state_handler(
2672 memory_vm_change_state_handler, NULL);
2673 return;
2674 }
2675
2676 memory_global_dirty_log_do_stop();
2677}
2678
Avi Kivity7664e802011-12-11 14:47:25 +02002679static void listener_add_address_space(MemoryListener *listener,
2680 AddressSpace *as)
2681{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002682 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002683 FlatRange *fr;
2684
Paolo Bonzini680a4782015-11-02 09:23:52 +01002685 if (listener->begin) {
2686 listener->begin(listener);
2687 }
Avi Kivity7664e802011-12-11 14:47:25 +02002688 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002689 if (listener->log_global_start) {
2690 listener->log_global_start(listener);
2691 }
Avi Kivity7664e802011-12-11 14:47:25 +02002692 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002693
Paolo Bonzini856d7242013-05-06 11:57:21 +02002694 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002695 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002696 MemoryRegionSection section = section_from_flat_range(fr, view);
2697
Avi Kivity975aefe2012-10-02 16:39:57 +02002698 if (listener->region_add) {
2699 listener->region_add(listener, &section);
2700 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002701 if (fr->dirty_log_mask && listener->log_start) {
2702 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2703 }
Avi Kivity7664e802011-12-11 14:47:25 +02002704 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002705 if (listener->commit) {
2706 listener->commit(listener);
2707 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002708 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002709}
2710
Peter Xud25836c2018-01-22 14:02:44 +08002711static void listener_del_address_space(MemoryListener *listener,
2712 AddressSpace *as)
2713{
2714 FlatView *view;
2715 FlatRange *fr;
2716
2717 if (listener->begin) {
2718 listener->begin(listener);
2719 }
2720 view = address_space_get_flatview(as);
2721 FOR_EACH_FLAT_RANGE(fr, view) {
2722 MemoryRegionSection section = section_from_flat_range(fr, view);
2723
2724 if (fr->dirty_log_mask && listener->log_stop) {
2725 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2726 }
2727 if (listener->region_del) {
2728 listener->region_del(listener, &section);
2729 }
2730 }
2731 if (listener->commit) {
2732 listener->commit(listener);
2733 }
2734 flatview_unref(view);
2735}
2736
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002737void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002738{
Avi Kivity72e22d22012-02-08 15:05:50 +02002739 MemoryListener *other = NULL;
2740
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002741 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002742 if (QTAILQ_EMPTY(&memory_listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002743 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
Avi Kivity72e22d22012-02-08 15:05:50 +02002744 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2745 } else {
2746 QTAILQ_FOREACH(other, &memory_listeners, link) {
2747 if (listener->priority < other->priority) {
2748 break;
2749 }
2750 }
2751 QTAILQ_INSERT_BEFORE(other, listener, link);
2752 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002753
Paolo Bonzini9a546352016-09-22 16:23:06 +02002754 if (QTAILQ_EMPTY(&as->listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002755 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
Paolo Bonzini9a546352016-09-22 16:23:06 +02002756 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2757 } else {
2758 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2759 if (listener->priority < other->priority) {
2760 break;
2761 }
2762 }
2763 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2764 }
2765
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002766 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002767}
2768
2769void memory_listener_unregister(MemoryListener *listener)
2770{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002771 if (!listener->address_space) {
2772 return;
2773 }
2774
Peter Xud25836c2018-01-22 14:02:44 +08002775 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002776 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002777 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002778 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002779}
Avi Kivitye2177952011-12-08 15:00:18 +02002780
Greg Kurza2166412019-06-21 11:27:33 +02002781void address_space_remove_listeners(AddressSpace *as)
2782{
2783 while (!QTAILQ_EMPTY(&as->listeners)) {
2784 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2785 }
2786}
2787
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002788void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002789{
Paolo Bonziniac951902015-02-11 15:21:04 +01002790 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002791 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002792 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002793 as->ioeventfd_nb = 0;
2794 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002795 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002796 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002797 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002798 address_space_update_topology(as);
2799 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002800}
Avi Kivity658b2222011-07-26 14:26:08 +03002801
Paolo Bonzini374f2982013-05-17 12:37:03 +02002802static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002803{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002804 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002805
Paolo Bonzini856d7242013-05-06 11:57:21 +02002806 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002807 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002808 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002809 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002810}
2811
Paolo Bonzini374f2982013-05-17 12:37:03 +02002812void address_space_destroy(AddressSpace *as)
2813{
Paolo Bonziniac951902015-02-11 15:21:04 +01002814 MemoryRegion *root = as->root;
2815
Paolo Bonzini374f2982013-05-17 12:37:03 +02002816 /* Flush out anything from MemoryListeners listening in on this */
2817 memory_region_transaction_begin();
2818 as->root = NULL;
2819 memory_region_transaction_commit();
2820 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2821
2822 /* At this point, as->dispatch and as->current_map are dummy
2823 * entries that the guest should never use. Wait for the old
2824 * values to expire before freeing the data.
2825 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002826 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002827 call_rcu(as, do_address_space_destroy, rcu);
2828}
2829
Peter Xu4e831902017-01-16 16:40:04 +08002830static const char *memory_region_type(MemoryRegion *mr)
2831{
2832 if (memory_region_is_ram_device(mr)) {
2833 return "ramd";
2834 } else if (memory_region_is_romd(mr)) {
2835 return "romd";
2836 } else if (memory_region_is_rom(mr)) {
2837 return "rom";
2838 } else if (memory_region_is_ram(mr)) {
2839 return "ram";
2840 } else {
2841 return "i/o";
2842 }
2843}
2844
Blue Swirl314e2982011-09-11 20:22:05 +00002845typedef struct MemoryRegionList MemoryRegionList;
2846
2847struct MemoryRegionList {
2848 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002849 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002850};
2851
Paolo Bonzinib58deb32018-12-06 11:58:10 +01002852typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002853
Peter Xu4e831902017-01-16 16:40:04 +08002854#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2855 int128_sub((size), int128_one())) : 0)
2856#define MTREE_INDENT " "
2857
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002858static void mtree_expand_owner(const char *label, Object *obj)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002859{
2860 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2861
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002862 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002863 if (dev && dev->id) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002864 qemu_printf(" id=%s", dev->id);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002865 } else {
2866 gchar *canonical_path = object_get_canonical_path(obj);
2867 if (canonical_path) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002868 qemu_printf(" path=%s", canonical_path);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002869 g_free(canonical_path);
2870 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002871 qemu_printf(" type=%s", object_get_typename(obj));
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002872 }
2873 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002874 qemu_printf("}");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002875}
2876
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002877static void mtree_print_mr_owner(const MemoryRegion *mr)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002878{
2879 Object *owner = mr->owner;
2880 Object *parent = memory_region_owner((MemoryRegion *)mr);
2881
2882 if (!owner && !parent) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002883 qemu_printf(" orphan");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002884 return;
2885 }
2886 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002887 mtree_expand_owner("owner", owner);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002888 }
2889 if (parent && parent != owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002890 mtree_expand_owner("parent", parent);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002891 }
2892}
2893
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002894static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002895 hwaddr base,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002896 MemoryRegionListHead *alias_print_queue,
2897 bool owner)
Blue Swirl314e2982011-09-11 20:22:05 +00002898{
Jan Kiszka9479c572011-09-27 15:00:41 +02002899 MemoryRegionList *new_ml, *ml, *next_ml;
2900 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002901 const MemoryRegion *submr;
2902 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002903 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002904
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002905 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002906 return;
2907 }
2908
2909 for (i = 0; i < level; i++) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002910 qemu_printf(MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002911 }
2912
Peter Xub31f8412017-03-14 20:56:27 +08002913 cur_start = base + mr->addr;
2914 cur_end = cur_start + MR_SIZE(mr->size);
2915
2916 /*
2917 * Try to detect overflow of memory region. This should never
2918 * happen normally. When it happens, we dump something to warn the
2919 * user who is observing this.
2920 */
2921 if (cur_start < base || cur_end < cur_start) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002922 qemu_printf("[DETECTED OVERFLOW!] ");
Peter Xub31f8412017-03-14 20:56:27 +08002923 }
2924
Blue Swirl314e2982011-09-11 20:22:05 +00002925 if (mr->alias) {
2926 MemoryRegionList *ml;
2927 bool found = false;
2928
2929 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002930 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002931 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002932 found = true;
2933 }
2934 }
2935
2936 if (!found) {
2937 ml = g_new(MemoryRegionList, 1);
2938 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002939 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002940 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002941 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2942 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2943 "-" TARGET_FMT_plx "%s",
2944 cur_start, cur_end,
2945 mr->priority,
2946 mr->nonvolatile ? "nv-" : "",
2947 memory_region_type((MemoryRegion *)mr),
2948 memory_region_name(mr),
2949 memory_region_name(mr->alias),
2950 mr->alias_offset,
2951 mr->alias_offset + MR_SIZE(mr->size),
2952 mr->enabled ? "" : " [disabled]");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002953 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002954 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002955 }
Blue Swirl314e2982011-09-11 20:22:05 +00002956 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002957 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2958 " (prio %d, %s%s): %s%s",
2959 cur_start, cur_end,
2960 mr->priority,
2961 mr->nonvolatile ? "nv-" : "",
2962 memory_region_type((MemoryRegion *)mr),
2963 memory_region_name(mr),
2964 mr->enabled ? "" : " [disabled]");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002965 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002966 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002967 }
Blue Swirl314e2982011-09-11 20:22:05 +00002968 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002969 qemu_printf("\n");
Jan Kiszka9479c572011-09-27 15:00:41 +02002970
2971 QTAILQ_INIT(&submr_print_queue);
2972
Blue Swirl314e2982011-09-11 20:22:05 +00002973 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002974 new_ml = g_new(MemoryRegionList, 1);
2975 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002976 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002977 if (new_ml->mr->addr < ml->mr->addr ||
2978 (new_ml->mr->addr == ml->mr->addr &&
2979 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002980 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002981 new_ml = NULL;
2982 break;
2983 }
2984 }
2985 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002986 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002987 }
2988 }
2989
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002990 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002991 mtree_print_mr(ml->mr, level + 1, cur_start,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002992 alias_print_queue, owner);
Jan Kiszka9479c572011-09-27 15:00:41 +02002993 }
2994
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002995 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002996 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002997 }
2998}
2999
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003000struct FlatViewInfo {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003001 int counter;
3002 bool dispatch_tree;
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003003 bool owner;
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003004 AccelClass *ac;
3005 const char *ac_name;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003006};
3007
3008static void mtree_print_flatview(gpointer key, gpointer value,
3009 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08003010{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003011 FlatView *view = key;
3012 GArray *fv_address_spaces = value;
3013 struct FlatViewInfo *fvi = user_data;
Peter Xu57bb40c2017-01-16 16:40:05 +08003014 FlatRange *range = &view->ranges[0];
3015 MemoryRegion *mr;
3016 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003017 int i;
3018 AddressSpace *as;
3019
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003020 qemu_printf("FlatView #%d\n", fvi->counter);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003021 ++fvi->counter;
3022
3023 for (i = 0; i < fv_address_spaces->len; ++i) {
3024 as = g_array_index(fv_address_spaces, AddressSpace*, i);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003025 qemu_printf(" AS \"%s\", root: %s",
3026 as->name, memory_region_name(as->root));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003027 if (as->root->alias) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003028 qemu_printf(", alias %s", memory_region_name(as->root->alias));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003029 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003030 qemu_printf("\n");
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003031 }
3032
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003033 qemu_printf(" Root memory region: %s\n",
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003034 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08003035
3036 if (n <= 0) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003037 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003038 return;
3039 }
3040
3041 while (n--) {
3042 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003043 if (range->offset_in_region) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003044 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3045 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3046 int128_get64(range->addr.start),
3047 int128_get64(range->addr.start)
3048 + MR_SIZE(range->addr.size),
3049 mr->priority,
3050 range->nonvolatile ? "nv-" : "",
3051 range->readonly ? "rom" : memory_region_type(mr),
3052 memory_region_name(mr),
3053 range->offset_in_region);
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003054 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003055 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3056 " (prio %d, %s%s): %s",
3057 int128_get64(range->addr.start),
3058 int128_get64(range->addr.start)
3059 + MR_SIZE(range->addr.size),
3060 mr->priority,
3061 range->nonvolatile ? "nv-" : "",
3062 range->readonly ? "rom" : memory_region_type(mr),
3063 memory_region_name(mr));
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003064 }
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003065 if (fvi->owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003066 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003067 }
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003068
3069 if (fvi->ac) {
3070 for (i = 0; i < fv_address_spaces->len; ++i) {
3071 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3072 if (fvi->ac->has_memory(current_machine, as,
3073 int128_get64(range->addr.start),
3074 MR_SIZE(range->addr.size) + 1)) {
3075 qemu_printf(" %s", fvi->ac_name);
3076 }
3077 }
3078 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003079 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003080 range++;
3081 }
3082
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003083#if !defined(CONFIG_USER_ONLY)
3084 if (fvi->dispatch_tree && view->root) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003085 mtree_print_dispatch(view->dispatch, view->root);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003086 }
3087#endif
3088
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003089 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003090}
3091
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003092static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3093 gpointer user_data)
3094{
3095 FlatView *view = key;
3096 GArray *fv_address_spaces = value;
3097
3098 g_array_unref(fv_address_spaces);
3099 flatview_unref(view);
3100
3101 return true;
3102}
3103
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003104void mtree_info(bool flatview, bool dispatch_tree, bool owner)
Blue Swirl314e2982011-09-11 20:22:05 +00003105{
3106 MemoryRegionListHead ml_head;
3107 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003108 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003109
Peter Xu57bb40c2017-01-16 16:40:05 +08003110 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003111 FlatView *view;
3112 struct FlatViewInfo fvi = {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003113 .counter = 0,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003114 .dispatch_tree = dispatch_tree,
3115 .owner = owner,
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003116 };
3117 GArray *fv_address_spaces;
3118 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003119 AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator);
3120
3121 if (ac->has_memory) {
3122 fvi.ac = ac;
3123 fvi.ac_name = current_machine->accel ? current_machine->accel :
3124 object_class_get_name(OBJECT_CLASS(ac));
3125 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003126
3127 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003128 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003129 view = address_space_get_flatview(as);
3130
3131 fv_address_spaces = g_hash_table_lookup(views, view);
3132 if (!fv_address_spaces) {
3133 fv_address_spaces = g_array_new(false, false, sizeof(as));
3134 g_hash_table_insert(views, view, fv_address_spaces);
3135 }
3136
3137 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003138 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003139
3140 /* Print */
3141 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3142
3143 /* Free */
3144 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3145 g_hash_table_unref(views);
3146
Peter Xu57bb40c2017-01-16 16:40:05 +08003147 return;
3148 }
3149
Blue Swirl314e2982011-09-11 20:22:05 +00003150 QTAILQ_INIT(&ml_head);
3151
Avi Kivity0d673e32012-10-02 15:28:50 +02003152 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003153 qemu_printf("address-space: %s\n", as->name);
3154 mtree_print_mr(as->root, 1, 0, &ml_head, owner);
3155 qemu_printf("\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003156 }
3157
Blue Swirl314e2982011-09-11 20:22:05 +00003158 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003159 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003160 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3161 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
3162 qemu_printf("\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003163 }
3164
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003165 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003166 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003167 }
Blue Swirl314e2982011-09-11 20:22:05 +00003168}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003169
Peter Maydellb08199c2017-07-07 15:42:51 +01003170void memory_region_init_ram(MemoryRegion *mr,
3171 struct Object *owner,
3172 const char *name,
3173 uint64_t size,
3174 Error **errp)
3175{
3176 DeviceState *owner_dev;
3177 Error *err = NULL;
3178
3179 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3180 if (err) {
3181 error_propagate(errp, err);
3182 return;
3183 }
3184 /* This will assert if owner is neither NULL nor a DeviceState.
3185 * We only want the owner here for the purposes of defining a
3186 * unique name for migration. TODO: Ideally we should implement
3187 * a naming scheme for Objects which are not DeviceStates, in
3188 * which case we can relax this restriction.
3189 */
3190 owner_dev = DEVICE(owner);
3191 vmstate_register_ram(mr, owner_dev);
3192}
3193
3194void memory_region_init_rom(MemoryRegion *mr,
3195 struct Object *owner,
3196 const char *name,
3197 uint64_t size,
3198 Error **errp)
3199{
3200 DeviceState *owner_dev;
3201 Error *err = NULL;
3202
3203 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3204 if (err) {
3205 error_propagate(errp, err);
3206 return;
3207 }
3208 /* This will assert if owner is neither NULL nor a DeviceState.
3209 * We only want the owner here for the purposes of defining a
3210 * unique name for migration. TODO: Ideally we should implement
3211 * a naming scheme for Objects which are not DeviceStates, in
3212 * which case we can relax this restriction.
3213 */
3214 owner_dev = DEVICE(owner);
3215 vmstate_register_ram(mr, owner_dev);
3216}
3217
3218void memory_region_init_rom_device(MemoryRegion *mr,
3219 struct Object *owner,
3220 const MemoryRegionOps *ops,
3221 void *opaque,
3222 const char *name,
3223 uint64_t size,
3224 Error **errp)
3225{
3226 DeviceState *owner_dev;
3227 Error *err = NULL;
3228
3229 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3230 name, size, &err);
3231 if (err) {
3232 error_propagate(errp, err);
3233 return;
3234 }
3235 /* This will assert if owner is neither NULL nor a DeviceState.
3236 * We only want the owner here for the purposes of defining a
3237 * unique name for migration. TODO: Ideally we should implement
3238 * a naming scheme for Objects which are not DeviceStates, in
3239 * which case we can relax this restriction.
3240 */
3241 owner_dev = DEVICE(owner);
3242 vmstate_register_ram(mr, owner_dev);
3243}
3244
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003245static const TypeInfo memory_region_info = {
3246 .parent = TYPE_OBJECT,
3247 .name = TYPE_MEMORY_REGION,
Markus Armbruster1b53ecd2019-08-12 07:23:34 +02003248 .class_size = sizeof(MemoryRegionClass),
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003249 .instance_size = sizeof(MemoryRegion),
3250 .instance_init = memory_region_initfn,
3251 .instance_finalize = memory_region_finalize,
3252};
3253
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003254static const TypeInfo iommu_memory_region_info = {
3255 .parent = TYPE_MEMORY_REGION,
3256 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003257 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003258 .instance_size = sizeof(IOMMUMemoryRegion),
3259 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003260 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003261};
3262
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003263static void memory_register_types(void)
3264{
3265 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003266 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003267}
3268
3269type_init(memory_register_types)
Tony Nguyend5d680c2019-08-24 04:36:52 +10003270
3271MemOp devend_memop(enum device_endian end)
3272{
3273 static MemOp conv[] = {
3274 [DEVICE_LITTLE_ENDIAN] = MO_LE,
3275 [DEVICE_BIG_ENDIAN] = MO_BE,
3276 [DEVICE_NATIVE_ENDIAN] = MO_TE,
3277 [DEVICE_HOST_ENDIAN] = 0,
3278 };
3279 switch (end) {
3280 case DEVICE_LITTLE_ENDIAN:
3281 case DEVICE_BIG_ENDIAN:
3282 case DEVICE_NATIVE_ENDIAN:
3283 return conv[end];
3284 default:
3285 g_assert_not_reached();
3286 }
3287}