blob: bfedaf9c4dfcd14afd0e9429b825ecc586c1bd77 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +020017#include "qemu/log.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010018#include "qapi/error.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010019#include "exec/memory.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070020#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010021#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030022#include "qemu/error-report.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +020023#include "qemu/main-loop.h"
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +020024#include "qemu/qemu-print.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040025#include "qom/object.h"
Philippe Mathieu-Daudé8b7a5502020-08-05 15:02:20 +020026#include "trace.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030027
Paolo Bonzini022c62c2012-12-17 18:19:49 +010028#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020029#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030030#include "sysemu/kvm.h"
Markus Armbruster54d31232019-08-12 07:23:59 +020031#include "sysemu/runstate.h"
Markus Armbruster14a48c12019-05-23 16:35:05 +020032#include "sysemu/tcg.h"
Claudio Fontana940e43a2021-02-04 17:39:24 +010033#include "qemu/accel.h"
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +100034#include "hw/boards.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Peter Xuae7a2bc2019-06-03 14:50:48 +080042bool global_dirty_log;
Avi Kivity7664e802011-12-11 14:47:25 +020043
Paolo Bonzinieae3eb32018-12-06 13:10:34 +010044static QTAILQ_HEAD(, MemoryListener) memory_listeners
Avi Kivity72e22d22012-02-08 15:05:50 +020045 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100050static GHashTable *flat_views;
51
Avi Kivity093bc2c2011-07-26 14:26:01 +030052typedef struct AddrRange AddrRange;
53
Avi Kivity8417ceb2011-08-03 11:56:14 +030054/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080055 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030056 * (large MemoryRegion::alias_offset).
57 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030058struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020059 Int128 start;
60 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030061};
62
Avi Kivity08dafab2011-10-16 13:19:17 +020063static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030064{
65 return (AddrRange) { start, size };
66}
67
68static bool addrrange_equal(AddrRange r1, AddrRange r2)
69{
Avi Kivity08dafab2011-10-16 13:19:17 +020070 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030071}
72
Avi Kivity08dafab2011-10-16 13:19:17 +020073static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030074{
Avi Kivity08dafab2011-10-16 13:19:17 +020075 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030076}
77
Avi Kivity08dafab2011-10-16 13:19:17 +020078static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030079{
Avi Kivity08dafab2011-10-16 13:19:17 +020080 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030081 return range;
82}
83
Avi Kivity08dafab2011-10-16 13:19:17 +020084static bool addrrange_contains(AddrRange range, Int128 addr)
85{
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
88}
89
Avi Kivity093bc2c2011-07-26 14:26:01 +030090static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91{
Avi Kivity08dafab2011-10-16 13:19:17 +020092 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030094}
95
96static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97{
Avi Kivity08dafab2011-10-16 13:19:17 +020098 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300101}
102
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200103enum ListenerDirection { Forward, Reverse };
104
Avi Kivity7376e582012-02-08 21:05:17 +0200105#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200106 do { \
107 MemoryListener *_listener; \
108 \
109 switch (_direction) { \
110 case Forward: \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
114 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200115 } \
116 break; \
117 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200119 if (_listener->_callback) { \
120 _listener->_callback(_listener, ##_args); \
121 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200122 } \
123 break; \
124 default: \
125 abort(); \
126 } \
127 } while (0)
128
Paolo Bonzini9a546352016-09-22 16:23:06 +0200129#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200130 do { \
131 MemoryListener *_listener; \
132 \
133 switch (_direction) { \
134 case Forward: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100135 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200136 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100142 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200143 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200144 _listener->_callback(_listener, _section, ##_args); \
145 } \
146 } \
147 break; \
148 default: \
149 abort(); \
150 } \
151 } while (0)
152
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200153/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200154#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200155 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000156 MemoryRegionSection mrs = section_from_flat_range(fr, \
157 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200158 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200159 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200160
Avi Kivity093bc2c2011-07-26 14:26:01 +0300161struct CoalescedMemoryRange {
162 AddrRange addr;
163 QTAILQ_ENTRY(CoalescedMemoryRange) link;
164};
165
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300166struct MemoryRegionIoeventfd {
167 AddrRange addr;
168 bool match_data;
169 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200170 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300171};
172
Tristan Burgess73bb7532018-05-28 23:04:45 -0400173static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
174 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300175{
Tristan Burgess73bb7532018-05-28 23:04:45 -0400176 if (int128_lt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300177 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400178 } else if (int128_gt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300179 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400180 } else if (int128_lt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300181 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400182 } else if (int128_gt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300183 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400184 } else if (a->match_data < b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300185 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400186 } else if (a->match_data > b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300187 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400188 } else if (a->match_data) {
189 if (a->data < b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300190 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400191 } else if (a->data > b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300192 return false;
193 }
194 }
Tristan Burgess73bb7532018-05-28 23:04:45 -0400195 if (a->e < b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300196 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400197 } else if (a->e > b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300198 return false;
199 }
200 return false;
201}
202
Tristan Burgess73bb7532018-05-28 23:04:45 -0400203static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
204 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300205{
Elena Afanasovae6ffd752020-10-19 13:20:13 -0700206 if (int128_eq(a->addr.start, b->addr.start) &&
207 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
208 (int128_eq(a->addr.size, b->addr.size) &&
209 (a->match_data == b->match_data) &&
210 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
211 (a->e == b->e))))
212 return true;
213
214 return false;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300215}
216
Avi Kivity093bc2c2011-07-26 14:26:01 +0300217/* Range of memory in the global map. Addresses are absolute. */
218struct FlatRange {
219 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200220 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300221 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300222 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200223 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300224 bool readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400225 bool nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300226};
227
Avi Kivity093bc2c2011-07-26 14:26:01 +0300228#define FOR_EACH_FLAT_RANGE(var, view) \
229 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
230
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200231static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000232section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200233{
234 return (MemoryRegionSection) {
235 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000236 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200237 .offset_within_region = fr->offset_in_region,
238 .size = fr->addr.size,
239 .offset_within_address_space = int128_get64(fr->addr.start),
240 .readonly = fr->readonly,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400241 .nonvolatile = fr->nonvolatile,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200242 };
243}
244
Avi Kivity093bc2c2011-07-26 14:26:01 +0300245static bool flatrange_equal(FlatRange *a, FlatRange *b)
246{
247 return a->mr == b->mr
248 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300249 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200250 && a->romd_mode == b->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400251 && a->readonly == b->readonly
252 && a->nonvolatile == b->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300253}
254
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000255static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300256{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000257 FlatView *view;
258
259 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200260 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000261 view->root = mr_root;
262 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200263 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000264
265 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300266}
267
268/* Insert a range into a given position. Caller is responsible for maintaining
269 * sorting order.
270 */
271static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
272{
273 if (view->nr == view->nr_allocated) {
274 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500275 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300276 view->nr_allocated * sizeof(*view->ranges));
277 }
278 memmove(view->ranges + pos + 1, view->ranges + pos,
279 (view->nr - pos) * sizeof(FlatRange));
280 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200281 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300282 ++view->nr;
283}
284
285static void flatview_destroy(FlatView *view)
286{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200287 int i;
288
Paolo Bonzini02d96512017-09-21 12:34:00 +0200289 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000290 if (view->dispatch) {
291 address_space_dispatch_free(view->dispatch);
292 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200293 for (i = 0; i < view->nr; i++) {
294 memory_region_unref(view->ranges[i].mr);
295 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500296 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000297 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200298 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300299}
300
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200301static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200302{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100303 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200304}
305
Paolo Bonzini48564042018-03-18 18:26:36 +0100306void flatview_unref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200307{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100308 if (qatomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200309 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000310 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000311 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200312 }
313}
314
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300315static bool can_merge(FlatRange *r1, FlatRange *r2)
316{
Avi Kivity08dafab2011-10-16 13:19:17 +0200317 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300318 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200319 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
320 r1->addr.size),
321 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300322 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200323 && r1->romd_mode == r2->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400324 && r1->readonly == r2->readonly
325 && r1->nonvolatile == r2->nonvolatile;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300326}
327
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000328/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300329static void flatview_simplify(FlatView *view)
330{
King Wang838ec112019-07-12 14:52:41 +0800331 unsigned i, j, k;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300332
333 i = 0;
334 while (i < view->nr) {
335 j = i + 1;
336 while (j < view->nr
337 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200338 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300339 ++j;
340 }
341 ++i;
King Wang838ec112019-07-12 14:52:41 +0800342 for (k = i; k < j; k++) {
343 memory_region_unref(view->ranges[k].mr);
344 }
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300345 memmove(&view->ranges[i], &view->ranges[j],
346 (view->nr - j) * sizeof(view->ranges[j]));
347 view->nr -= j - i;
348 }
349}
350
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200351static bool memory_region_big_endian(MemoryRegion *mr)
352{
353#ifdef TARGET_WORDS_BIGENDIAN
354 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
355#else
356 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
357#endif
358}
359
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000360static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200361{
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000362 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
363 switch (op & MO_SIZE) {
364 case MO_8:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200365 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000366 case MO_16:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200367 *data = bswap16(*data);
368 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000369 case MO_32:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200370 *data = bswap32(*data);
371 break;
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000372 case MO_64:
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200373 *data = bswap64(*data);
374 break;
375 default:
Tony Nguyen9bf825b2019-08-24 04:36:54 +1000376 g_assert_not_reached();
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200377 }
378 }
379}
380
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200381static inline void memory_region_shift_read_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200382 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200383 uint64_t mask,
384 uint64_t tmp)
385{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200386 if (shift >= 0) {
387 *value |= (tmp & mask) << shift;
388 } else {
389 *value |= (tmp & mask) >> -shift;
390 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200391}
392
393static inline uint64_t memory_region_shift_write_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200394 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200395 uint64_t mask)
396{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200397 uint64_t tmp;
398
399 if (shift >= 0) {
400 tmp = (*value >> shift) & mask;
401 } else {
402 tmp = (*value << -shift) & mask;
403 }
404
405 return tmp;
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200406}
407
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800408static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
409{
410 MemoryRegion *root;
411 hwaddr abs_addr = offset;
412
413 abs_addr += mr->addr;
414 for (root = mr; root->container; ) {
415 root = root->container;
416 abs_addr += root->addr;
417 }
418
419 return abs_addr;
420}
421
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800422static int get_cpu_index(void)
423{
424 if (current_cpu) {
425 return current_cpu->cpu_index;
426 }
427 return -1;
428}
429
Peter Maydellcc05c432015-04-26 16:49:23 +0100430static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
431 hwaddr addr,
432 uint64_t *value,
433 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200434 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100435 uint64_t mask,
436 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300437{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300438 uint64_t tmp;
439
440 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800441 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800442 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000443 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800444 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Philippe Mathieu-Daudé9bb54052021-03-07 08:48:33 +0100445 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
446 memory_region_name(mr));
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800447 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200448 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100449 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300450}
451
Peter Maydellcc05c432015-04-26 16:49:23 +0100452static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
453 hwaddr addr,
454 uint64_t *value,
455 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200456 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100457 uint64_t mask,
458 MemTxAttrs attrs)
459{
460 uint64_t tmp = 0;
461 MemTxResult r;
462
Peter Maydellcc05c432015-04-26 16:49:23 +0100463 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800464 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800465 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000466 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800467 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Philippe Mathieu-Daudé9bb54052021-03-07 08:48:33 +0100468 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
469 memory_region_name(mr));
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800470 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200471 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100472 return r;
473}
474
Peter Maydellcc05c432015-04-26 16:49:23 +0100475static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
476 hwaddr addr,
477 uint64_t *value,
478 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200479 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100480 uint64_t mask,
481 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300482{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200483 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Avi Kivity164a4dc2011-08-11 10:40:25 +0300484
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800485 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800486 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000487 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800488 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Philippe Mathieu-Daudé9bb54052021-03-07 08:48:33 +0100489 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
490 memory_region_name(mr));
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800491 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300492 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100493 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300494}
495
Peter Maydellcc05c432015-04-26 16:49:23 +0100496static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
497 hwaddr addr,
498 uint64_t *value,
499 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200500 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100501 uint64_t mask,
502 MemTxAttrs attrs)
503{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200504 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Peter Maydellcc05c432015-04-26 16:49:23 +0100505
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800506 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800507 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Peter Maydell380ea842020-01-20 15:11:41 +0000508 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800509 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Philippe Mathieu-Daudé9bb54052021-03-07 08:48:33 +0100510 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
511 memory_region_name(mr));
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800512 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100513 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
514}
515
516static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300517 uint64_t *value,
518 unsigned size,
519 unsigned access_size_min,
520 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200521 MemTxResult (*access_fn)
522 (MemoryRegion *mr,
523 hwaddr addr,
524 uint64_t *value,
525 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200526 signed shift,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200527 uint64_t mask,
528 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100529 MemoryRegion *mr,
530 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300531{
532 uint64_t access_mask;
533 unsigned access_size;
534 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100535 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300536
537 if (!access_size_min) {
538 access_size_min = 1;
539 }
540 if (!access_size_max) {
541 access_size_max = 4;
542 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200543
544 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300545 access_size = MAX(MIN(size, access_size_max), access_size_min);
Philippe Mathieu-Daudé36960b42018-09-27 02:24:14 +0200546 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200547 if (memory_region_big_endian(mr)) {
548 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200549 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100550 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200551 }
552 } else {
553 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200554 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100555 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200556 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300557 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100558 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300559}
560
Avi Kivitye2177952011-12-08 15:00:18 +0200561static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
562{
Avi Kivity0d673e32012-10-02 15:28:50 +0200563 AddressSpace *as;
564
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200565 while (mr->container) {
566 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200567 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200568 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
569 if (mr == as->root) {
570 return as;
571 }
Avi Kivitye2177952011-12-08 15:00:18 +0200572 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200573 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200574}
575
Avi Kivity093bc2c2011-07-26 14:26:01 +0300576/* Render a memory region into the global view. Ranges in @view obscure
577 * ranges in @mr.
578 */
579static void render_memory_region(FlatView *view,
580 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200581 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300582 AddrRange clip,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400583 bool readonly,
584 bool nonvolatile)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300585{
586 MemoryRegion *subregion;
587 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200588 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200589 Int128 remain;
590 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300591 FlatRange fr;
592 AddrRange tmp;
593
Avi Kivity6bba19b2011-09-14 11:54:58 +0300594 if (!mr->enabled) {
595 return;
596 }
597
Avi Kivity08dafab2011-10-16 13:19:17 +0200598 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300599 readonly |= mr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400600 nonvolatile |= mr->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300601
602 tmp = addrrange_make(base, mr->size);
603
604 if (!addrrange_intersects(tmp, clip)) {
605 return;
606 }
607
608 clip = addrrange_intersection(tmp, clip);
609
610 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200611 int128_subfrom(&base, int128_make64(mr->alias->addr));
612 int128_subfrom(&base, int128_make64(mr->alias_offset));
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400613 render_memory_region(view, mr->alias, base, clip,
614 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300615 return;
616 }
617
618 /* Render subregions in priority order. */
619 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400620 render_memory_region(view, subregion, base, clip,
621 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300622 }
623
Avi Kivity14a3c102011-07-26 14:26:06 +0300624 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300625 return;
626 }
627
Avi Kivity08dafab2011-10-16 13:19:17 +0200628 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300629 base = clip.start;
630 remain = clip.size;
631
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000632 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100633 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200634 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000635 fr.readonly = readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400636 fr.nonvolatile = nonvolatile;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000637
Avi Kivity093bc2c2011-07-26 14:26:01 +0300638 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200639 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
640 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300641 continue;
642 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200643 if (int128_lt(base, view->ranges[i].addr.start)) {
644 now = int128_min(remain,
645 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300646 fr.offset_in_region = offset_in_region;
647 fr.addr = addrrange_make(base, now);
648 flatview_insert(view, i, &fr);
649 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200650 int128_addto(&base, now);
651 offset_in_region += int128_get64(now);
652 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300653 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200654 now = int128_sub(int128_min(int128_add(base, remain),
655 addrrange_end(view->ranges[i].addr)),
656 base);
657 int128_addto(&base, now);
658 offset_in_region += int128_get64(now);
659 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300660 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200661 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300662 fr.offset_in_region = offset_in_region;
663 fr.addr = addrrange_make(base, remain);
664 flatview_insert(view, i, &fr);
665 }
666}
667
Alexander Bulekovfb5ef4e2020-10-23 11:07:30 -0400668void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
669{
670 FlatRange *fr;
671
672 assert(fv);
673 assert(cb);
674
675 FOR_EACH_FLAT_RANGE(fr, fv) {
Peter Maydellb3566002021-03-18 17:48:21 +0000676 if (cb(fr->addr.start, fr->addr.size, fr->mr,
677 fr->offset_in_region, opaque)) {
Alexander Bulekovfb5ef4e2020-10-23 11:07:30 -0400678 break;
Peter Maydellb3566002021-03-18 17:48:21 +0000679 }
Alexander Bulekovfb5ef4e2020-10-23 11:07:30 -0400680 }
681}
682
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000683static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
684{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200685 while (mr->enabled) {
686 if (mr->alias) {
687 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
688 /* The alias is included in its entirety. Use it as
689 * the "real" root, so that we can share more FlatViews.
690 */
691 mr = mr->alias;
692 continue;
693 }
694 } else if (!mr->terminates) {
695 unsigned int found = 0;
696 MemoryRegion *child, *next = NULL;
697 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
698 if (child->enabled) {
699 if (++found > 1) {
700 next = NULL;
701 break;
702 }
703 if (!child->addr && int128_ge(mr->size, child->size)) {
704 /* A child is included in its entirety. If it's the only
705 * enabled one, use it in the hope of finding an alias down the
706 * way. This will also let us share FlatViews.
707 */
708 next = child;
709 }
710 }
711 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000712 if (found == 0) {
713 return NULL;
714 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200715 if (next) {
716 mr = next;
717 continue;
718 }
719 }
720
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000721 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000722 }
723
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000724 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000725}
726
Avi Kivity093bc2c2011-07-26 14:26:01 +0300727/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200728static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300729{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000730 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200731 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300732
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000733 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300734
Avi Kivity83f3c252012-10-07 12:59:55 +0200735 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200736 render_memory_region(view, mr, int128_zero(),
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400737 addrrange_make(int128_zero(), int128_2_64()),
738 false, false);
Avi Kivity83f3c252012-10-07 12:59:55 +0200739 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200740 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300741
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000742 view->dispatch = address_space_dispatch_new(view);
743 for (i = 0; i < view->nr; i++) {
744 MemoryRegionSection mrs =
745 section_from_flat_range(&view->ranges[i], view);
746 flatview_add_to_dispatch(view, &mrs);
747 }
748 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000749 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000750
Avi Kivity093bc2c2011-07-26 14:26:01 +0300751 return view;
752}
753
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300754static void address_space_add_del_ioeventfds(AddressSpace *as,
755 MemoryRegionIoeventfd *fds_new,
756 unsigned fds_new_nb,
757 MemoryRegionIoeventfd *fds_old,
758 unsigned fds_old_nb)
759{
760 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200761 MemoryRegionIoeventfd *fd;
762 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300763
764 /* Generate a symmetric difference of the old and new fd sets, adding
765 * and deleting as necessary.
766 */
767
768 iold = inew = 0;
769 while (iold < fds_old_nb || inew < fds_new_nb) {
770 if (iold < fds_old_nb
771 && (inew == fds_new_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400772 || memory_region_ioeventfd_before(&fds_old[iold],
773 &fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200774 fd = &fds_old[iold];
775 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000776 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200777 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200778 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200779 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200780 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200781 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300782 ++iold;
783 } else if (inew < fds_new_nb
784 && (iold == fds_old_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400785 || memory_region_ioeventfd_before(&fds_new[inew],
786 &fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200787 fd = &fds_new[inew];
788 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000789 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200790 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200791 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200792 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200793 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200794 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300795 ++inew;
796 } else {
797 ++iold;
798 ++inew;
799 }
800 }
801}
802
Paolo Bonzini48564042018-03-18 18:26:36 +0100803FlatView *address_space_get_flatview(AddressSpace *as)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200804{
805 FlatView *view;
806
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +0100807 RCU_READ_LOCK_GUARD();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200808 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000809 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200810 /* If somebody has replaced as->current_map concurrently,
811 * flatview_ref returns false.
812 */
813 } while (!flatview_ref(view));
Paolo Bonzini856d7242013-05-06 11:57:21 +0200814 return view;
815}
816
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300817static void address_space_update_ioeventfds(AddressSpace *as)
818{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200819 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300820 FlatRange *fr;
821 unsigned ioeventfd_nb = 0;
Stefan Hajnoczi920d5572020-02-18 18:22:26 +0000822 unsigned ioeventfd_max;
823 MemoryRegionIoeventfd *ioeventfds;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300824 AddrRange tmp;
825 unsigned i;
826
Stefan Hajnoczi920d5572020-02-18 18:22:26 +0000827 /*
828 * It is likely that the number of ioeventfds hasn't changed much, so use
829 * the previous size as the starting value, with some headroom to avoid
830 * gratuitous reallocations.
831 */
832 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
833 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
834
Paolo Bonzini856d7242013-05-06 11:57:21 +0200835 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200836 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300837 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
838 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200839 int128_sub(fr->addr.start,
840 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300841 if (addrrange_intersects(fr->addr, tmp)) {
842 ++ioeventfd_nb;
Stefan Hajnoczi920d5572020-02-18 18:22:26 +0000843 if (ioeventfd_nb > ioeventfd_max) {
844 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
845 ioeventfds = g_realloc(ioeventfds,
846 ioeventfd_max * sizeof(*ioeventfds));
847 }
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300848 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
849 ioeventfds[ioeventfd_nb-1].addr = tmp;
850 }
851 }
852 }
853
854 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
855 as->ioeventfds, as->ioeventfd_nb);
856
Anthony Liguori7267c092011-08-20 22:09:37 -0500857 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300858 as->ioeventfds = ioeventfds;
859 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200860 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300861}
862
Peter Xu23f11742019-08-20 22:13:25 +0800863/*
864 * Notify the memory listeners about the coalesced IO change events of
865 * range `cmr'. Only the part that has intersection of the specified
866 * FlatRange will be sent.
867 */
868static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
869 CoalescedMemoryRange *cmr, bool add)
870{
871 AddrRange tmp;
872
873 tmp = addrrange_shift(cmr->addr,
874 int128_sub(fr->addr.start,
875 int128_make64(fr->offset_in_region)));
876 if (!addrrange_intersects(tmp, fr->addr)) {
877 return;
878 }
879 tmp = addrrange_intersection(tmp, fr->addr);
880
881 if (add) {
882 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
883 int128_get64(tmp.start),
884 int128_get64(tmp.size));
885 } else {
886 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
887 int128_get64(tmp.start),
888 int128_get64(tmp.size));
889 }
890}
891
Paolo Bonzini909bf762018-11-28 10:42:06 +0100892static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
893{
Peter Xu23f11742019-08-20 22:13:25 +0800894 CoalescedMemoryRange *cmr;
895
Peter Xu23f11742019-08-20 22:13:25 +0800896 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
897 flat_range_coalesced_io_notify(fr, as, cmr, false);
898 }
Paolo Bonzini909bf762018-11-28 10:42:06 +0100899}
900
901static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
902{
903 MemoryRegion *mr = fr->mr;
904 CoalescedMemoryRange *cmr;
Paolo Bonzini909bf762018-11-28 10:42:06 +0100905
Paolo Bonzini1f7af802018-11-28 17:29:45 +0100906 if (QTAILQ_EMPTY(&mr->coalesced)) {
907 return;
908 }
909
Paolo Bonzini909bf762018-11-28 10:42:06 +0100910 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
Peter Xu23f11742019-08-20 22:13:25 +0800911 flat_range_coalesced_io_notify(fr, as, cmr, true);
Paolo Bonzini909bf762018-11-28 10:42:06 +0100912 }
913}
914
Avi Kivityb8af1af2011-07-26 14:26:12 +0300915static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200916 const FlatView *old_view,
917 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300918 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300919{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300920 unsigned iold, inew;
921 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300922
923 /* Generate a symmetric difference of the old and new memory maps.
924 * Kill ranges in the old map, and instantiate ranges in the new map.
925 */
926 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200927 while (iold < old_view->nr || inew < new_view->nr) {
928 if (iold < old_view->nr) {
929 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300930 } else {
931 frold = NULL;
932 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200933 if (inew < new_view->nr) {
934 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300935 } else {
936 frnew = NULL;
937 }
938
939 if (frold
940 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200941 || int128_lt(frold->addr.start, frnew->addr.start)
942 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300943 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000944 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300945
Avi Kivityb8af1af2011-07-26 14:26:12 +0300946 if (!adding) {
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100947 flat_range_coalesced_io_del(frold, as);
Avi Kivity72e22d22012-02-08 15:05:50 +0200948 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300949 }
950
Avi Kivity093bc2c2011-07-26 14:26:01 +0300951 ++iold;
952 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000953 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300954
Jagannathan Raman4f826022019-02-05 17:50:19 -0500955 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200956 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200957 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
958 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
959 frold->dirty_log_mask,
960 frnew->dirty_log_mask);
961 }
962 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
963 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
964 frold->dirty_log_mask,
965 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300966 }
Avi Kivity5a583342011-07-26 14:26:02 +0300967 }
968
Avi Kivity093bc2c2011-07-26 14:26:01 +0300969 ++iold;
970 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300971 } else {
972 /* In new */
973
Avi Kivityb8af1af2011-07-26 14:26:12 +0300974 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200975 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100976 flat_range_coalesced_io_add(frnew, as);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300977 }
978
Avi Kivity093bc2c2011-07-26 14:26:01 +0300979 ++inew;
980 }
981 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300982}
983
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000984static void flatviews_init(void)
985{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000986 static FlatView *empty_view;
987
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000988 if (flat_views) {
989 return;
990 }
991
992 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
993 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000994 if (!empty_view) {
995 empty_view = generate_memory_topology(NULL);
996 /* We keep it alive forever in the global variable. */
997 flatview_ref(empty_view);
998 } else {
999 g_hash_table_replace(flat_views, NULL, empty_view);
1000 flatview_ref(empty_view);
1001 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001002}
1003
1004static void flatviews_reset(void)
1005{
1006 AddressSpace *as;
1007
1008 if (flat_views) {
1009 g_hash_table_unref(flat_views);
1010 flat_views = NULL;
1011 }
1012 flatviews_init();
1013
1014 /* Render unique FVs */
1015 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1016 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1017
1018 if (g_hash_table_lookup(flat_views, physmr)) {
1019 continue;
1020 }
1021
1022 generate_memory_topology(physmr);
1023 }
1024}
1025
1026static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +03001027{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001028 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001029 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1030 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1031
1032 assert(new_view);
1033
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001034 if (old_view == new_view) {
1035 return;
1036 }
1037
1038 if (old_view) {
1039 flatview_ref(old_view);
1040 }
1041
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001042 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001043
1044 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001045 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1046
1047 if (!old_view2) {
1048 old_view2 = &tmpview;
1049 }
1050 address_space_update_topology_pass(as, old_view2, new_view, false);
1051 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001052 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001053
Paolo Bonzini374f2982013-05-17 12:37:03 +02001054 /* Writes are protected by the BQL. */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +01001055 qatomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001056 if (old_view) {
1057 flatview_unref(old_view);
1058 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001059
1060 /* Note that all the old MemoryRegions are still alive up to this
1061 * point. This relieves most MemoryListeners from the need to
1062 * ref/unref the MemoryRegions they get---unless they use them
1063 * outside the iothread mutex, in which case precise reference
1064 * counting is necessary.
1065 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001066 if (old_view) {
1067 flatview_unref(old_view);
1068 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001069}
1070
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001071static void address_space_update_topology(AddressSpace *as)
1072{
1073 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1074
1075 flatviews_init();
1076 if (!g_hash_table_lookup(flat_views, physmr)) {
1077 generate_memory_topology(physmr);
1078 }
1079 address_space_set_flatview(as);
1080}
1081
Avi Kivity4ef4db82011-07-26 14:26:13 +03001082void memory_region_transaction_begin(void)
1083{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001084 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001085 ++memory_region_transaction_depth;
1086}
1087
1088void memory_region_transaction_commit(void)
1089{
Avi Kivity0d673e32012-10-02 15:28:50 +02001090 AddressSpace *as;
1091
Avi Kivity4ef4db82011-07-26 14:26:13 +03001092 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001093 assert(qemu_mutex_iothread_locked());
1094
Avi Kivity4ef4db82011-07-26 14:26:13 +03001095 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001096 if (!memory_region_transaction_depth) {
1097 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001098 flatviews_reset();
1099
Gonglei4dc56152014-05-08 11:47:32 +08001100 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001101
Gonglei4dc56152014-05-08 11:47:32 +08001102 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001103 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001104 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001105 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001106 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001107 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001108 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1109 } else if (ioeventfd_update_pending) {
1110 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1111 address_space_update_ioeventfds(as);
1112 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001113 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001114 }
Gonglei4dc56152014-05-08 11:47:32 +08001115 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001116}
1117
Avi Kivity545e92e2011-08-08 19:58:48 +03001118static void memory_region_destructor_none(MemoryRegion *mr)
1119{
1120}
1121
1122static void memory_region_destructor_ram(MemoryRegion *mr)
1123{
Fam Zhengf1060c52016-03-01 14:18:22 +08001124 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001125}
1126
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001127static bool memory_region_need_escape(char c)
1128{
1129 return c == '/' || c == '[' || c == '\\' || c == ']';
1130}
1131
1132static char *memory_region_escape_name(const char *name)
1133{
1134 const char *p;
1135 char *escaped, *q;
1136 uint8_t c;
1137 size_t bytes = 0;
1138
1139 for (p = name; *p; p++) {
1140 bytes += memory_region_need_escape(*p) ? 4 : 1;
1141 }
1142 if (bytes == p - name) {
1143 return g_memdup(name, bytes + 1);
1144 }
1145
1146 escaped = g_malloc(bytes + 1);
1147 for (p = name, q = escaped; *p; p++) {
1148 c = *p;
1149 if (unlikely(memory_region_need_escape(c))) {
1150 *q++ = '\\';
1151 *q++ = 'x';
1152 *q++ = "0123456789abcdef"[c >> 4];
1153 c = "0123456789abcdef"[c & 15];
1154 }
1155 *q++ = c;
1156 }
1157 *q = 0;
1158 return escaped;
1159}
1160
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001161static void memory_region_do_init(MemoryRegion *mr,
1162 Object *owner,
1163 const char *name,
1164 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001165{
Avi Kivity08dafab2011-10-16 13:19:17 +02001166 mr->size = int128_make64(size);
1167 if (size == UINT64_MAX) {
1168 mr->size = int128_2_64();
1169 }
Peter Maydell302fa282014-08-19 20:05:46 +01001170 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001171 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001172 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001173
1174 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001175 char *escaped_name = memory_region_escape_name(name);
1176 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001177
1178 if (!owner) {
1179 owner = container_get(qdev_get_machine(), "/unattached");
1180 }
1181
Markus Armbrusterd2623122020-05-05 17:29:22 +02001182 object_property_add_child(owner, name_array, OBJECT(mr));
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001183 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001184 g_free(name_array);
1185 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001186 }
1187}
1188
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001189void memory_region_init(MemoryRegion *mr,
1190 Object *owner,
1191 const char *name,
1192 uint64_t size)
1193{
1194 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1195 memory_region_do_init(mr, owner, name, size);
1196}
1197
Eric Blaked7bce992016-01-29 06:48:55 -07001198static void memory_region_get_container(Object *obj, Visitor *v,
1199 const char *name, void *opaque,
1200 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001201{
1202 MemoryRegion *mr = MEMORY_REGION(obj);
Markus Armbrusterddfb0ba2020-05-05 17:29:10 +02001203 char *path = (char *)"";
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001204
1205 if (mr->container) {
1206 path = object_get_canonical_path(OBJECT(mr->container));
1207 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001208 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001209 if (mr->container) {
1210 g_free(path);
1211 }
1212}
1213
1214static Object *memory_region_resolve_container(Object *obj, void *opaque,
1215 const char *part)
1216{
1217 MemoryRegion *mr = MEMORY_REGION(obj);
1218
1219 return OBJECT(mr->container);
1220}
1221
Eric Blaked7bce992016-01-29 06:48:55 -07001222static void memory_region_get_priority(Object *obj, Visitor *v,
1223 const char *name, void *opaque,
1224 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001225{
1226 MemoryRegion *mr = MEMORY_REGION(obj);
1227 int32_t value = mr->priority;
1228
Eric Blake51e72bc2016-01-29 06:48:54 -07001229 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001230}
1231
Eric Blaked7bce992016-01-29 06:48:55 -07001232static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1233 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001234{
1235 MemoryRegion *mr = MEMORY_REGION(obj);
1236 uint64_t value = memory_region_size(mr);
1237
Eric Blake51e72bc2016-01-29 06:48:54 -07001238 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001239}
1240
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001241static void memory_region_initfn(Object *obj)
1242{
1243 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001244 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001245
1246 mr->ops = &unassigned_mem_ops;
1247 mr->enabled = true;
1248 mr->romd_mode = true;
1249 mr->destructor = memory_region_destructor_none;
1250 QTAILQ_INIT(&mr->subregions);
1251 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001252
1253 op = object_property_add(OBJECT(mr), "container",
1254 "link<" TYPE_MEMORY_REGION ">",
1255 memory_region_get_container,
1256 NULL, /* memory_region_set_container */
Markus Armbrusterd2623122020-05-05 17:29:22 +02001257 NULL, NULL);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001258 op->resolve = memory_region_resolve_container;
1259
Felipe Franciosi64a7b8d2020-02-04 13:16:01 +00001260 object_property_add_uint64_ptr(OBJECT(mr), "addr",
Markus Armbrusterd2623122020-05-05 17:29:22 +02001261 &mr->addr, OBJ_PROP_FLAG_READ);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001262 object_property_add(OBJECT(mr), "priority", "uint32",
1263 memory_region_get_priority,
1264 NULL, /* memory_region_set_priority */
Markus Armbrusterd2623122020-05-05 17:29:22 +02001265 NULL, NULL);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001266 object_property_add(OBJECT(mr), "size", "uint64",
1267 memory_region_get_size,
1268 NULL, /* memory_region_set_size, */
Markus Armbrusterd2623122020-05-05 17:29:22 +02001269 NULL, NULL);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001270}
1271
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001272static void iommu_memory_region_initfn(Object *obj)
1273{
1274 MemoryRegion *mr = MEMORY_REGION(obj);
1275
1276 mr->is_iommu = true;
1277}
1278
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001279static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1280 unsigned size)
1281{
1282#ifdef DEBUG_UNASSIGNED
1283 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1284#endif
Jan Kiszka68a74392013-09-02 18:43:31 +02001285 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001286}
1287
1288static void unassigned_mem_write(void *opaque, hwaddr addr,
1289 uint64_t val, unsigned size)
1290{
1291#ifdef DEBUG_UNASSIGNED
1292 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1293#endif
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001294}
1295
Paolo Bonzinid1970632013-05-24 13:23:38 +02001296static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
Peter Maydell8372d382018-05-31 14:50:52 +01001297 unsigned size, bool is_write,
1298 MemTxAttrs attrs)
Paolo Bonzinid1970632013-05-24 13:23:38 +02001299{
1300 return false;
1301}
1302
1303const MemoryRegionOps unassigned_mem_ops = {
1304 .valid.accepts = unassigned_mem_accepts,
1305 .endianness = DEVICE_NATIVE_ENDIAN,
1306};
1307
Alex Williamson4a2e2422016-10-31 09:53:03 -06001308static uint64_t memory_region_ram_device_read(void *opaque,
1309 hwaddr addr, unsigned size)
1310{
1311 MemoryRegion *mr = opaque;
1312 uint64_t data = (uint64_t)~0;
1313
1314 switch (size) {
1315 case 1:
1316 data = *(uint8_t *)(mr->ram_block->host + addr);
1317 break;
1318 case 2:
1319 data = *(uint16_t *)(mr->ram_block->host + addr);
1320 break;
1321 case 4:
1322 data = *(uint32_t *)(mr->ram_block->host + addr);
1323 break;
1324 case 8:
1325 data = *(uint64_t *)(mr->ram_block->host + addr);
1326 break;
1327 }
1328
1329 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1330
1331 return data;
1332}
1333
1334static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1335 uint64_t data, unsigned size)
1336{
1337 MemoryRegion *mr = opaque;
1338
1339 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1340
1341 switch (size) {
1342 case 1:
1343 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1344 break;
1345 case 2:
1346 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1347 break;
1348 case 4:
1349 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1350 break;
1351 case 8:
1352 *(uint64_t *)(mr->ram_block->host + addr) = data;
1353 break;
1354 }
1355}
1356
1357static const MemoryRegionOps ram_device_mem_ops = {
1358 .read = memory_region_ram_device_read,
1359 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001360 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001361 .valid = {
1362 .min_access_size = 1,
1363 .max_access_size = 8,
1364 .unaligned = true,
1365 },
1366 .impl = {
1367 .min_access_size = 1,
1368 .max_access_size = 8,
1369 .unaligned = true,
1370 },
1371};
1372
Paolo Bonzinid2702032013-05-24 11:55:06 +02001373bool memory_region_access_valid(MemoryRegion *mr,
1374 hwaddr addr,
1375 unsigned size,
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001376 bool is_write,
1377 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001378{
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001379 if (mr->ops->valid.accepts
1380 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +02001381 qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
1382 "0x%" HWADDR_PRIX ", size %u, "
1383 "region '%s', reason: rejected\n",
1384 addr, size, memory_region_name(mr));
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001385 return false;
1386 }
Avi Kivity897fa7c2011-11-13 13:05:27 +02001387
Avi Kivity093bc2c2011-07-26 14:26:01 +03001388 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +02001389 qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
1390 "0x%" HWADDR_PRIX ", size %u, "
1391 "region '%s', reason: unaligned\n",
1392 addr, size, memory_region_name(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001393 return false;
1394 }
1395
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001396 /* Treat zero as compatibility all valid */
1397 if (!mr->ops->valid.max_access_size) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001398 return true;
1399 }
1400
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001401 if (size > mr->ops->valid.max_access_size
1402 || size < mr->ops->valid.min_access_size) {
Philippe Mathieu-Daudé21786c72020-10-05 17:27:25 +02001403 qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
1404 "0x%" HWADDR_PRIX ", size %u, "
1405 "region '%s', reason: invalid size "
1406 "(min:%u max:%u)\n",
1407 addr, size, memory_region_name(mr),
1408 mr->ops->valid.min_access_size,
1409 mr->ops->valid.max_access_size);
Michael S. Tsirkin5d971f92020-06-10 09:47:49 -04001410 return false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001411 }
1412 return true;
1413}
1414
Peter Maydellcc05c432015-04-26 16:49:23 +01001415static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1416 hwaddr addr,
1417 uint64_t *pval,
1418 unsigned size,
1419 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001420{
Peter Maydellcc05c432015-04-26 16:49:23 +01001421 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001422
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001423 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001424 return access_with_adjusted_size(addr, pval, size,
1425 mr->ops->impl.min_access_size,
1426 mr->ops->impl.max_access_size,
1427 memory_region_read_accessor,
1428 mr, attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001429 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001430 return access_with_adjusted_size(addr, pval, size,
1431 mr->ops->impl.min_access_size,
1432 mr->ops->impl.max_access_size,
1433 memory_region_read_with_attrs_accessor,
1434 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001435 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001436}
1437
Peter Maydell3b643492015-04-26 16:49:23 +01001438MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1439 hwaddr addr,
1440 uint64_t *pval,
Tony Nguyene67c9042019-08-24 04:36:48 +10001441 MemOp op,
Peter Maydell3b643492015-04-26 16:49:23 +01001442 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001443{
Tony Nguyene67c9042019-08-24 04:36:48 +10001444 unsigned size = memop_size(op);
Peter Maydellcc05c432015-04-26 16:49:23 +01001445 MemTxResult r;
1446
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001447 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001448 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001449 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001450 }
Avi Kivitya621f382012-01-02 13:12:08 +02001451
Peter Maydellcc05c432015-04-26 16:49:23 +01001452 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Tony Nguyen9bf825b2019-08-24 04:36:54 +10001453 adjust_endianness(mr, pval, op);
Peter Maydellcc05c432015-04-26 16:49:23 +01001454 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001455}
1456
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001457/* Return true if an eventfd was signalled */
1458static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1459 hwaddr addr,
1460 uint64_t data,
1461 unsigned size,
1462 MemTxAttrs attrs)
1463{
1464 MemoryRegionIoeventfd ioeventfd = {
1465 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1466 .data = data,
1467 };
1468 unsigned i;
1469
1470 for (i = 0; i < mr->ioeventfd_nb; i++) {
1471 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1472 ioeventfd.e = mr->ioeventfds[i].e;
1473
Tristan Burgess73bb7532018-05-28 23:04:45 -04001474 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001475 event_notifier_set(ioeventfd.e);
1476 return true;
1477 }
1478 }
1479
1480 return false;
1481}
1482
Peter Maydell3b643492015-04-26 16:49:23 +01001483MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1484 hwaddr addr,
1485 uint64_t data,
Tony Nguyene67c9042019-08-24 04:36:48 +10001486 MemOp op,
Peter Maydell3b643492015-04-26 16:49:23 +01001487 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001488{
Tony Nguyene67c9042019-08-24 04:36:48 +10001489 unsigned size = memop_size(op);
1490
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001491 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001492 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001493 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001494 }
1495
Tony Nguyen9bf825b2019-08-24 04:36:54 +10001496 adjust_endianness(mr, &data, op);
Avi Kivitya621f382012-01-02 13:12:08 +02001497
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001498 if ((!kvm_eventfds_enabled()) &&
1499 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1500 return MEMTX_OK;
1501 }
1502
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001503 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001504 return access_with_adjusted_size(addr, &data, size,
1505 mr->ops->impl.min_access_size,
1506 mr->ops->impl.max_access_size,
1507 memory_region_write_accessor, mr,
1508 attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001509 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001510 return
1511 access_with_adjusted_size(addr, &data, size,
1512 mr->ops->impl.min_access_size,
1513 mr->ops->impl.max_access_size,
1514 memory_region_write_with_attrs_accessor,
1515 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001516 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001517}
1518
Avi Kivity093bc2c2011-07-26 14:26:01 +03001519void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001520 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001521 const MemoryRegionOps *ops,
1522 void *opaque,
1523 const char *name,
1524 uint64_t size)
1525{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001526 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001527 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001528 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001529 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001530}
1531
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001532void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1533 Object *owner,
1534 const char *name,
1535 uint64_t size,
1536 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001537{
David Hildenbrand7f863cb2021-05-10 13:43:18 +02001538 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001539}
1540
David Hildenbrand7f863cb2021-05-10 13:43:18 +02001541void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1542 Object *owner,
1543 const char *name,
1544 uint64_t size,
1545 uint32_t ram_flags,
1546 Error **errp)
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001547{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001548 Error *err = NULL;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001549 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001550 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001551 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001552 mr->destructor = memory_region_destructor_ram;
David Hildenbrandebef62d2021-05-10 13:43:19 +02001553 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001554 if (err) {
1555 mr->size = int128_zero();
1556 object_unparent(OBJECT(mr));
1557 error_propagate(errp, err);
1558 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001559}
1560
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001561void memory_region_init_resizeable_ram(MemoryRegion *mr,
1562 Object *owner,
1563 const char *name,
1564 uint64_t size,
1565 uint64_t max_size,
1566 void (*resized)(const char*,
1567 uint64_t length,
1568 void *host),
1569 Error **errp)
1570{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001571 Error *err = NULL;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001572 memory_region_init(mr, owner, name, size);
1573 mr->ram = true;
1574 mr->terminates = true;
1575 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001576 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001577 mr, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001578 if (err) {
1579 mr->size = int128_zero();
1580 object_unparent(OBJECT(mr));
1581 error_propagate(errp, err);
1582 }
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001583}
1584
Hikaru Nishidad5dbde42018-09-24 21:32:05 +09001585#ifdef CONFIG_POSIX
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001586void memory_region_init_ram_from_file(MemoryRegion *mr,
Philippe Mathieu-Daudéd32335e2021-02-25 19:20:03 +01001587 Object *owner,
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001588 const char *name,
1589 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001590 uint64_t align,
Junyan Hecbfc0172018-07-18 15:47:58 +08001591 uint32_t ram_flags,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001592 const char *path,
Stefan Hajnoczi369d6dc2021-01-04 17:13:18 +00001593 bool readonly,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001594 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001595{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001596 Error *err = NULL;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001597 memory_region_init(mr, owner, name, size);
1598 mr->ram = true;
Stefan Hajnoczi369d6dc2021-01-04 17:13:18 +00001599 mr->readonly = readonly;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001600 mr->terminates = true;
1601 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001602 mr->align = align;
Stefan Hajnoczi369d6dc2021-01-04 17:13:18 +00001603 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1604 readonly, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001605 if (err) {
1606 mr->size = int128_zero();
1607 object_unparent(OBJECT(mr));
1608 error_propagate(errp, err);
1609 }
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001610}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001611
1612void memory_region_init_ram_from_fd(MemoryRegion *mr,
Philippe Mathieu-Daudéd32335e2021-02-25 19:20:03 +01001613 Object *owner,
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001614 const char *name,
1615 uint64_t size,
David Hildenbrandd5015b82021-05-10 13:43:17 +02001616 uint32_t ram_flags,
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001617 int fd,
Jagannathan Raman44a4ff32021-01-29 11:46:04 -05001618 ram_addr_t offset,
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001619 Error **errp)
1620{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001621 Error *err = NULL;
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001622 memory_region_init(mr, owner, name, size);
1623 mr->ram = true;
1624 mr->terminates = true;
1625 mr->destructor = memory_region_destructor_ram;
David Hildenbrandd5015b82021-05-10 13:43:17 +02001626 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
1627 false, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001628 if (err) {
1629 mr->size = int128_zero();
1630 object_unparent(OBJECT(mr));
1631 error_propagate(errp, err);
1632 }
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001633}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001634#endif
1635
Avi Kivity093bc2c2011-07-26 14:26:01 +03001636void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001637 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001638 const char *name,
1639 uint64_t size,
1640 void *ptr)
1641{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001642 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001643 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001644 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001645 mr->destructor = memory_region_destructor_ram;
Hu Taoef701d72014-09-09 13:27:54 +08001646
1647 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1648 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001649 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001650}
1651
Alex Williamson21e00fa2016-10-31 09:53:03 -06001652void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1653 Object *owner,
1654 const char *name,
1655 uint64_t size,
1656 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301657{
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001658 memory_region_init(mr, owner, name, size);
1659 mr->ram = true;
1660 mr->terminates = true;
Alex Williamson21e00fa2016-10-31 09:53:03 -06001661 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001662 mr->ops = &ram_device_mem_ops;
1663 mr->opaque = mr;
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001664 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini0a2949e2020-10-28 03:52:01 -04001665
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001666 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1667 assert(ptr != NULL);
1668 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301669}
1670
Avi Kivity093bc2c2011-07-26 14:26:01 +03001671void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001672 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001673 const char *name,
1674 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001675 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001676 uint64_t size)
1677{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001678 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001679 mr->alias = orig;
1680 mr->alias_offset = offset;
1681}
1682
Peter Maydellb59821a2017-07-07 15:42:50 +01001683void memory_region_init_rom_nomigrate(MemoryRegion *mr,
Philippe Mathieu-Daudéd32335e2021-02-25 19:20:03 +01001684 Object *owner,
Peter Maydellb59821a2017-07-07 15:42:50 +01001685 const char *name,
1686 uint64_t size,
1687 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001688{
David Hildenbrand7f863cb2021-05-10 13:43:18 +02001689 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
Peter Maydella1777f72016-07-04 13:06:35 +01001690 mr->readonly = true;
Peter Maydella1777f72016-07-04 13:06:35 +01001691}
1692
Peter Maydellb59821a2017-07-07 15:42:50 +01001693void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1694 Object *owner,
1695 const MemoryRegionOps *ops,
1696 void *opaque,
1697 const char *name,
1698 uint64_t size,
1699 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001700{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001701 Error *err = NULL;
Peter Maydell39e0b032016-07-04 13:06:35 +01001702 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001703 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001704 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001705 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001706 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001707 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001708 mr->destructor = memory_region_destructor_ram;
David Hildenbrandebef62d2021-05-10 13:43:19 +02001709 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001710 if (err) {
1711 mr->size = int128_zero();
1712 object_unparent(OBJECT(mr));
1713 error_propagate(errp, err);
1714 }
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001715}
1716
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001717void memory_region_init_iommu(void *_iommu_mr,
1718 size_t instance_size,
1719 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001720 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001721 const char *name,
1722 uint64_t size)
1723{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001724 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001725 struct MemoryRegion *mr;
1726
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001727 object_initialize(_iommu_mr, instance_size, mrtypename);
1728 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001729 memory_region_do_init(mr, owner, name, size);
1730 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001731 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001732 QLIST_INIT(&iommu_mr->iommu_notify);
1733 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001734}
1735
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001736static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001737{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001738 MemoryRegion *mr = MEMORY_REGION(obj);
1739
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001740 assert(!mr->container);
1741
1742 /* We know the region is not visible in any address space (it
1743 * does not have a container and cannot be a root either because
1744 * it has no references, so we can blindly clear mr->enabled.
1745 * memory_region_set_enabled instead could trigger a transaction
1746 * and cause an infinite loop.
1747 */
1748 mr->enabled = false;
1749 memory_region_transaction_begin();
1750 while (!QTAILQ_EMPTY(&mr->subregions)) {
1751 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1752 memory_region_del_subregion(mr, subregion);
1753 }
1754 memory_region_transaction_commit();
1755
Avi Kivity545e92e2011-08-08 19:58:48 +03001756 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001757 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001758 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001759 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001760}
1761
Paolo Bonzini803c0812013-05-07 06:59:09 +02001762Object *memory_region_owner(MemoryRegion *mr)
1763{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001764 Object *obj = OBJECT(mr);
1765 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001766}
1767
Paolo Bonzini46637be2013-05-07 09:06:00 +02001768void memory_region_ref(MemoryRegion *mr)
1769{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001770 /* MMIO callbacks most likely will access data that belongs
1771 * to the owner, hence the need to ref/unref the owner whenever
1772 * the memory region is in use.
1773 *
1774 * The memory region is a child of its owner. As long as the
1775 * owner doesn't call unparent itself on the memory region,
1776 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001777 * Memory regions without an owner are supposed to never go away;
1778 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001779 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001780 if (mr && mr->owner) {
1781 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001782 }
1783}
1784
1785void memory_region_unref(MemoryRegion *mr)
1786{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001787 if (mr && mr->owner) {
1788 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001789 }
1790}
1791
Avi Kivity093bc2c2011-07-26 14:26:01 +03001792uint64_t memory_region_size(MemoryRegion *mr)
1793{
Avi Kivity08dafab2011-10-16 13:19:17 +02001794 if (int128_eq(mr->size, int128_2_64())) {
1795 return UINT64_MAX;
1796 }
1797 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001798}
1799
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001800const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001801{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001802 if (!mr->name) {
1803 ((MemoryRegion *)mr)->name =
Markus Armbruster7a309cc2020-07-14 18:02:00 +02001804 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001805 }
Peter Maydell302fa282014-08-19 20:05:46 +01001806 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001807}
1808
Alex Williamson21e00fa2016-10-31 09:53:03 -06001809bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301810{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001811 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301812}
1813
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001814uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001815{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001816 uint8_t mask = mr->dirty_log_mask;
Zenghui Yu1370d612020-11-16 21:22:10 +08001817 RAMBlock *rb = mr->ram_block;
1818
1819 if (global_dirty_log && ((rb && qemu_ram_is_migratable(rb)) ||
1820 memory_region_is_iommu(mr))) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001821 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1822 }
Paolo Bonzini0a2949e2020-10-28 03:52:01 -04001823
1824 if (tcg_enabled() && rb) {
1825 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1826 mask |= (1 << DIRTY_MEMORY_CODE);
1827 }
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001828 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001829}
1830
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001831bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1832{
1833 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1834}
1835
Eric Auger549d40052019-09-24 10:25:17 +02001836static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1837 Error **errp)
Peter Xu5bf3d312016-09-23 13:02:27 +08001838{
1839 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1840 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001841 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Eric Auger549d40052019-09-24 10:25:17 +02001842 int ret = 0;
Peter Xu5bf3d312016-09-23 13:02:27 +08001843
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001844 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001845 flags |= iommu_notifier->notifier_flags;
1846 }
1847
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001848 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
Eric Auger549d40052019-09-24 10:25:17 +02001849 ret = imrc->notify_flag_changed(iommu_mr,
1850 iommu_mr->iommu_notify_flags,
1851 flags, errp);
Peter Xu5bf3d312016-09-23 13:02:27 +08001852 }
1853
Eric Auger549d40052019-09-24 10:25:17 +02001854 if (!ret) {
1855 iommu_mr->iommu_notify_flags = flags;
1856 }
1857 return ret;
Peter Xu5bf3d312016-09-23 13:02:27 +08001858}
1859
Bharat Bhushan457f8cb2020-10-30 19:05:07 +01001860int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1861 uint64_t page_size_mask,
1862 Error **errp)
1863{
1864 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1865 int ret = 0;
1866
1867 if (imrc->iommu_set_page_size_mask) {
1868 ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
1869 }
1870 return ret;
1871}
1872
Eric Auger549d40052019-09-24 10:25:17 +02001873int memory_region_register_iommu_notifier(MemoryRegion *mr,
1874 IOMMUNotifier *n, Error **errp)
David Gibson06866572013-05-14 19:13:56 +10001875{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001876 IOMMUMemoryRegion *iommu_mr;
Eric Auger549d40052019-09-24 10:25:17 +02001877 int ret;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001878
Jason Wangefcd38c2016-12-30 18:09:17 +08001879 if (mr->alias) {
Eric Auger549d40052019-09-24 10:25:17 +02001880 return memory_region_register_iommu_notifier(mr->alias, n, errp);
Jason Wangefcd38c2016-12-30 18:09:17 +08001881 }
1882
Peter Xucdb30812016-09-23 13:02:26 +08001883 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001884 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001885 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001886 assert(n->start <= n->end);
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001887 assert(n->iommu_idx >= 0 &&
1888 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1889
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001890 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
Eric Auger549d40052019-09-24 10:25:17 +02001891 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
1892 if (ret) {
1893 QLIST_REMOVE(n, node);
1894 }
1895 return ret;
David Gibson06866572013-05-14 19:13:56 +10001896}
1897
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001898uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001899{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001900 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1901
1902 if (imrc->get_min_page_size) {
1903 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001904 }
1905 return TARGET_PAGE_SIZE;
1906}
1907
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001908void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001909{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001910 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001911 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001912 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001913 IOMMUTLBEntry iotlb;
1914
Peter Xufaa362e2017-04-07 18:59:11 +08001915 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001916 if (imrc->replay) {
1917 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001918 return;
1919 }
1920
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001921 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001922
David Gibsona788f222015-09-30 12:13:55 +10001923 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Peter Maydell2c91bcf2018-06-15 14:57:16 +01001924 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
David Gibsona788f222015-09-30 12:13:55 +10001925 if (iotlb.perm != IOMMU_NONE) {
1926 n->notify(n, &iotlb);
1927 }
1928
1929 /* if (2^64 - MR size) < granularity, it's possible to get an
1930 * infinite loop here. This should catch such a wraparound */
1931 if ((addr + granularity) < addr) {
1932 break;
1933 }
1934 }
1935}
1936
Peter Xucdb30812016-09-23 13:02:26 +08001937void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1938 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001939{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001940 IOMMUMemoryRegion *iommu_mr;
1941
Jason Wangefcd38c2016-12-30 18:09:17 +08001942 if (mr->alias) {
1943 memory_region_unregister_iommu_notifier(mr->alias, n);
1944 return;
1945 }
Peter Xucdb30812016-09-23 13:02:26 +08001946 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001947 iommu_mr = IOMMU_MEMORY_REGION(mr);
Eric Auger549d40052019-09-24 10:25:17 +02001948 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
David Gibson06866572013-05-14 19:13:56 +10001949}
1950
Eugenio Pérez3b5ebf82020-11-16 17:55:02 +01001951void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001952 IOMMUTLBEvent *event)
David Gibson06866572013-05-14 19:13:56 +10001953{
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001954 IOMMUTLBEntry *entry = &event->entry;
Yan Zhao03c71402019-06-25 11:21:18 +08001955 hwaddr entry_end = entry->iova + entry->addr_mask;
Eugenio Pérez18048572020-11-16 17:55:06 +01001956 IOMMUTLBEntry tmp = *entry;
Peter Xucdb30812016-09-23 13:02:26 +08001957
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001958 if (event->type == IOMMU_NOTIFIER_UNMAP) {
1959 assert(entry->perm == IOMMU_NONE);
1960 }
1961
Peter Xubd2bfa42017-04-07 18:59:10 +08001962 /*
1963 * Skip the notification if the notification does not overlap
1964 * with registered range.
1965 */
Yan Zhao03c71402019-06-25 11:21:18 +08001966 if (notifier->start > entry_end || notifier->end < entry->iova) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001967 return;
1968 }
Peter Xucdb30812016-09-23 13:02:26 +08001969
Eugenio Pérez18048572020-11-16 17:55:06 +01001970 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1971 /* Crop (iova, addr_mask) to range */
1972 tmp.iova = MAX(tmp.iova, notifier->start);
1973 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
1974 } else {
1975 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
1976 }
Yan Zhao03c71402019-06-25 11:21:18 +08001977
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001978 if (event->type & notifier->notifier_flags) {
Eugenio Pérez18048572020-11-16 17:55:06 +01001979 notifier->notify(notifier, &tmp);
Peter Xubd2bfa42017-04-07 18:59:10 +08001980 }
1981}
1982
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001983void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001984 int iommu_idx,
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001985 IOMMUTLBEvent event)
Peter Xubd2bfa42017-04-07 18:59:10 +08001986{
1987 IOMMUNotifier *iommu_notifier;
1988
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001989 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001990
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001991 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001992 if (iommu_notifier->iommu_idx == iommu_idx) {
Eugenio Pérez5039caf2020-11-16 17:55:03 +01001993 memory_region_notify_iommu_one(iommu_notifier, &event);
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001994 }
Peter Xucdb30812016-09-23 13:02:26 +08001995 }
David Gibson06866572013-05-14 19:13:56 +10001996}
1997
Alexey Kardashevskiyf1334de2018-02-06 11:08:24 -07001998int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1999 enum IOMMUMemoryRegionAttr attr,
2000 void *data)
2001{
2002 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2003
2004 if (!imrc->get_attr) {
2005 return -EINVAL;
2006 }
2007
2008 return imrc->get_attr(iommu_mr, attr, data);
2009}
2010
Peter Maydell21f40202018-06-15 14:57:15 +01002011int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2012 MemTxAttrs attrs)
2013{
2014 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2015
2016 if (!imrc->attrs_to_index) {
2017 return 0;
2018 }
2019
2020 return imrc->attrs_to_index(iommu_mr, attrs);
2021}
2022
2023int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2024{
2025 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2026
2027 if (!imrc->num_indexes) {
2028 return 1;
2029 }
2030
2031 return imrc->num_indexes(iommu_mr);
2032}
2033
David Hildenbrand8947d7f2021-04-13 11:55:19 +02002034RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
2035{
2036 if (!memory_region_is_mapped(mr) || !memory_region_is_ram(mr)) {
2037 return NULL;
2038 }
2039 return mr->rdm;
2040}
2041
2042void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2043 RamDiscardManager *rdm)
2044{
2045 g_assert(memory_region_is_ram(mr) && !memory_region_is_mapped(mr));
2046 g_assert(!rdm || !mr->rdm);
2047 mr->rdm = rdm;
2048}
2049
2050uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
2051 const MemoryRegion *mr)
2052{
2053 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2054
2055 g_assert(rdmc->get_min_granularity);
2056 return rdmc->get_min_granularity(rdm, mr);
2057}
2058
2059bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
2060 const MemoryRegionSection *section)
2061{
2062 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2063
2064 g_assert(rdmc->is_populated);
2065 return rdmc->is_populated(rdm, section);
2066}
2067
2068int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
2069 MemoryRegionSection *section,
2070 ReplayRamPopulate replay_fn,
2071 void *opaque)
2072{
2073 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2074
2075 g_assert(rdmc->replay_populated);
2076 return rdmc->replay_populated(rdm, section, replay_fn, opaque);
2077}
2078
2079void ram_discard_manager_register_listener(RamDiscardManager *rdm,
2080 RamDiscardListener *rdl,
2081 MemoryRegionSection *section)
2082{
2083 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2084
2085 g_assert(rdmc->register_listener);
2086 rdmc->register_listener(rdm, rdl, section);
2087}
2088
2089void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
2090 RamDiscardListener *rdl)
2091{
2092 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2093
2094 g_assert(rdmc->unregister_listener);
2095 rdmc->unregister_listener(rdm, rdl);
2096}
2097
Avi Kivity093bc2c2011-07-26 14:26:01 +03002098void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2099{
Avi Kivity5a583342011-07-26 14:26:02 +03002100 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002101 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03002102
Paolo Bonzinidbddac62015-03-23 10:31:53 +01002103 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002104 old_logging = mr->vga_logging_count;
2105 mr->vga_logging_count += log ? 1 : -1;
2106 if (!!old_logging == !!mr->vga_logging_count) {
2107 return;
2108 }
2109
Jan Kiszka59023ef2012-08-23 13:02:30 +02002110 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03002111 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01002112 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002113 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002114}
2115
Avi Kivitya8170e52012-10-23 12:30:10 +02002116void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2117 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002118{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002119 assert(mr->ram_block);
2120 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2121 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01002122 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002123}
2124
Peter Xub87eaa92021-05-06 12:05:40 -04002125/*
2126 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2127 * dirty bitmap for the specified memory region.
2128 */
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002129static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002130{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002131 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02002132 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002133 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03002134 FlatRange *fr;
2135
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002136 /* If the same address space has multiple log_sync listeners, we
2137 * visit that address space's FlatView multiple times. But because
2138 * log_sync listeners are rare, it's still cheaper than walking each
2139 * address space once.
2140 */
2141 QTAILQ_FOREACH(listener, &memory_listeners, link) {
Peter Xub87eaa92021-05-06 12:05:40 -04002142 if (listener->log_sync) {
2143 as = listener->address_space;
2144 view = address_space_get_flatview(as);
2145 FOR_EACH_FLAT_RANGE(fr, view) {
2146 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2147 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2148 listener->log_sync(listener, &mrs);
2149 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002150 }
Peter Xub87eaa92021-05-06 12:05:40 -04002151 flatview_unref(view);
2152 } else if (listener->log_sync_global) {
2153 /*
2154 * No matter whether MR is specified, what we can do here
2155 * is to do a global sync, because we are not capable to
2156 * sync in a finer granularity.
2157 */
2158 listener->log_sync_global(listener);
Avi Kivity5a583342011-07-26 14:26:02 +03002159 }
2160 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002161}
2162
Peter Xu077874e2019-06-03 14:50:51 +08002163void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2164 hwaddr len)
2165{
2166 MemoryRegionSection mrs;
2167 MemoryListener *listener;
2168 AddressSpace *as;
2169 FlatView *view;
2170 FlatRange *fr;
2171 hwaddr sec_start, sec_end, sec_size;
2172
2173 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2174 if (!listener->log_clear) {
2175 continue;
2176 }
2177 as = listener->address_space;
2178 view = address_space_get_flatview(as);
2179 FOR_EACH_FLAT_RANGE(fr, view) {
2180 if (!fr->dirty_log_mask || fr->mr != mr) {
2181 /*
2182 * Clear dirty bitmap operation only applies to those
2183 * regions whose dirty logging is at least enabled
2184 */
2185 continue;
2186 }
2187
2188 mrs = section_from_flat_range(fr, view);
2189
2190 sec_start = MAX(mrs.offset_within_region, start);
2191 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2192 sec_end = MIN(sec_end, start + len);
2193
2194 if (sec_start >= sec_end) {
2195 /*
2196 * If this memory region section has no intersection
2197 * with the requested range, skip.
2198 */
2199 continue;
2200 }
2201
2202 /* Valid case; shrink the section if needed */
2203 mrs.offset_within_address_space +=
2204 sec_start - mrs.offset_within_region;
2205 mrs.offset_within_region = sec_start;
2206 sec_size = sec_end - sec_start;
2207 mrs.size = int128_make64(sec_size);
2208 listener->log_clear(listener, &mrs);
2209 }
2210 flatview_unref(view);
2211 }
2212}
2213
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002214DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2215 hwaddr addr,
2216 hwaddr size,
2217 unsigned client)
2218{
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002219 DirtyBitmapSnapshot *snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002220 assert(mr->ram_block);
2221 memory_region_sync_dirty_bitmap(mr);
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002222 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2223 memory_global_after_dirty_log_sync();
2224 return snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002225}
2226
2227bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2228 hwaddr addr, hwaddr size)
2229{
2230 assert(mr->ram_block);
2231 return cpu_physical_memory_snapshot_get_dirty(snap,
2232 memory_region_get_ram_addr(mr) + addr, size);
2233}
2234
Avi Kivity093bc2c2011-07-26 14:26:01 +03002235void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2236{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002237 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002238 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002239 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002240 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002241 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002242 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002243}
2244
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002245void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2246{
2247 if (mr->nonvolatile != nonvolatile) {
2248 memory_region_transaction_begin();
2249 mr->nonvolatile = nonvolatile;
2250 memory_region_update_pending |= mr->enabled;
2251 memory_region_transaction_commit();
2252 }
2253}
2254
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002255void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002256{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002257 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002258 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002259 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002260 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002261 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002262 }
2263}
2264
Avi Kivitya8170e52012-10-23 12:30:10 +02002265void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2266 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002267{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002268 assert(mr->ram_block);
2269 cpu_physical_memory_test_and_clear_dirty(
2270 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002271}
2272
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002273int memory_region_get_fd(MemoryRegion *mr)
2274{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002275 int fd;
2276
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002277 RCU_READ_LOCK_GUARD();
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002278 while (mr->alias) {
2279 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002280 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002281 fd = mr->ram_block->fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002282
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002283 return fd;
2284}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002285
Avi Kivity093bc2c2011-07-26 14:26:01 +03002286void *memory_region_get_ram_ptr(MemoryRegion *mr)
2287{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002288 void *ptr;
2289 uint64_t offset = 0;
2290
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002291 RCU_READ_LOCK_GUARD();
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002292 while (mr->alias) {
2293 offset += mr->alias_offset;
2294 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002295 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002296 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002297 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002298
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002299 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002300}
2301
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002302MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2303{
2304 RAMBlock *block;
2305
2306 block = qemu_ram_block_from_host(ptr, false, offset);
2307 if (!block) {
2308 return NULL;
2309 }
2310
2311 return block->mr;
2312}
2313
Fam Zheng7ebb2742016-03-01 14:18:20 +08002314ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2315{
2316 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2317}
2318
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002319void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2320{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002321 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002322
Gongleifa53a0e2016-05-10 10:04:59 +08002323 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002324}
2325
Philippe Mathieu-Daudé9ecc9962020-05-08 08:24:54 +02002326void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2327{
2328 if (mr->ram_block) {
Philippe Mathieu-Daudéab7e41e2020-05-08 08:24:56 +02002329 qemu_ram_msync(mr->ram_block, addr, size);
Philippe Mathieu-Daudé9ecc9962020-05-08 08:24:54 +02002330 }
2331}
Beata Michalska61c490e2019-11-21 00:08:41 +00002332
Philippe Mathieu-Daudé4dfe59d2020-05-08 08:24:53 +02002333void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
Beata Michalska61c490e2019-11-21 00:08:41 +00002334{
2335 /*
2336 * Might be extended case needed to cover
2337 * different types of memory regions
2338 */
Philippe Mathieu-Daudé9ecc9962020-05-08 08:24:54 +02002339 if (mr->dirty_log_mask) {
2340 memory_region_msync(mr, addr, size);
Beata Michalska61c490e2019-11-21 00:08:41 +00002341 }
2342}
2343
Peter Xub960fc12019-08-20 22:13:28 +08002344/*
2345 * Call proper memory listeners about the change on the newly
2346 * added/removed CoalescedMemoryRange.
2347 */
2348static void memory_region_update_coalesced_range(MemoryRegion *mr,
2349 CoalescedMemoryRange *cmr,
2350 bool add)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002351{
Peter Xub960fc12019-08-20 22:13:28 +08002352 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002353 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002354 FlatRange *fr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002355
Avi Kivity0d673e32012-10-02 15:28:50 +02002356 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Peter Xub960fc12019-08-20 22:13:28 +08002357 view = address_space_get_flatview(as);
2358 FOR_EACH_FLAT_RANGE(fr, view) {
2359 if (fr->mr == mr) {
2360 flat_range_coalesced_io_notify(fr, as, cmr, add);
2361 }
2362 }
2363 flatview_unref(view);
Avi Kivity0d673e32012-10-02 15:28:50 +02002364 }
2365}
2366
Avi Kivity093bc2c2011-07-26 14:26:01 +03002367void memory_region_set_coalescing(MemoryRegion *mr)
2368{
2369 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002370 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002371}
2372
2373void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002374 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002375 uint64_t size)
2376{
Anthony Liguori7267c092011-08-20 22:09:37 -05002377 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002378
Avi Kivity08dafab2011-10-16 13:19:17 +02002379 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002380 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002381 memory_region_update_coalesced_range(mr, cmr, true);
Jan Kiszkad4105152012-08-23 13:02:29 +02002382 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002383}
2384
2385void memory_region_clear_coalescing(MemoryRegion *mr)
2386{
2387 CoalescedMemoryRange *cmr;
Peter Xu9c1aa1c2019-08-20 22:13:27 +08002388
2389 if (QTAILQ_EMPTY(&mr->coalesced)) {
2390 return;
2391 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002392
Jan Kiszkad4105152012-08-23 13:02:29 +02002393 qemu_flush_coalesced_mmio_buffer();
2394 mr->flush_coalesced_mmio = false;
2395
Avi Kivity093bc2c2011-07-26 14:26:01 +03002396 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2397 cmr = QTAILQ_FIRST(&mr->coalesced);
2398 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002399 memory_region_update_coalesced_range(mr, cmr, false);
Anthony Liguori7267c092011-08-20 22:09:37 -05002400 g_free(cmr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002401 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002402}
2403
Jan Kiszkad4105152012-08-23 13:02:29 +02002404void memory_region_set_flush_coalesced(MemoryRegion *mr)
2405{
2406 mr->flush_coalesced_mmio = true;
2407}
2408
2409void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2410{
2411 qemu_flush_coalesced_mmio_buffer();
2412 if (QTAILQ_EMPTY(&mr->coalesced)) {
2413 mr->flush_coalesced_mmio = false;
2414 }
2415}
2416
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002417static bool userspace_eventfd_warning;
2418
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002419void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002420 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002421 unsigned size,
2422 bool match_data,
2423 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002424 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002425{
2426 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002427 .addr.start = int128_make64(addr),
2428 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002429 .match_data = match_data,
2430 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002431 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002432 };
2433 unsigned i;
2434
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002435 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2436 userspace_eventfd_warning))) {
2437 userspace_eventfd_warning = true;
2438 error_report("Using eventfd without MMIO binding in KVM. "
2439 "Suboptimal performance expected");
2440 }
2441
Jason Wangb8aecea2015-11-06 16:02:45 +08002442 if (size) {
Tony Nguyen9bf825b2019-08-24 04:36:54 +10002443 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
Jason Wangb8aecea2015-11-06 16:02:45 +08002444 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002445 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002446 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002447 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002448 break;
2449 }
2450 }
2451 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002452 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002453 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2454 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2455 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2456 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002457 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002458 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002459}
2460
2461void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002462 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002463 unsigned size,
2464 bool match_data,
2465 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002466 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002467{
2468 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002469 .addr.start = int128_make64(addr),
2470 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002471 .match_data = match_data,
2472 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002473 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002474 };
2475 unsigned i;
2476
Jason Wangb8aecea2015-11-06 16:02:45 +08002477 if (size) {
Tony Nguyen9bf825b2019-08-24 04:36:54 +10002478 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
Jason Wangb8aecea2015-11-06 16:02:45 +08002479 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002480 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002481 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002482 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002483 break;
2484 }
2485 }
2486 assert(i != mr->ioeventfd_nb);
2487 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2488 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2489 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002490 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002491 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002492 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002493 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002494}
2495
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002496static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002497{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002498 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002499 MemoryRegion *other;
2500
Jan Kiszka59023ef2012-08-23 13:02:30 +02002501 memory_region_transaction_begin();
2502
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002503 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002504 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002505 if (subregion->priority >= other->priority) {
2506 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2507 goto done;
2508 }
2509 }
2510 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2511done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002512 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002513 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002514}
2515
Peter Crosthwaite05987012014-06-05 23:14:44 -07002516static void memory_region_add_subregion_common(MemoryRegion *mr,
2517 hwaddr offset,
2518 MemoryRegion *subregion)
2519{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002520 assert(!subregion->container);
2521 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002522 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002523 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002524}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002525
2526void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002527 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002528 MemoryRegion *subregion)
2529{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002530 subregion->priority = 0;
2531 memory_region_add_subregion_common(mr, offset, subregion);
2532}
2533
2534void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002535 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002536 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002537 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002538{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002539 subregion->priority = priority;
2540 memory_region_add_subregion_common(mr, offset, subregion);
2541}
2542
2543void memory_region_del_subregion(MemoryRegion *mr,
2544 MemoryRegion *subregion)
2545{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002546 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002547 assert(subregion->container == mr);
2548 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002549 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002550 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002551 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002552 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002553}
2554
2555void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2556{
2557 if (enabled == mr->enabled) {
2558 return;
2559 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002560 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002561 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002562 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002563 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002564}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002565
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002566void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2567{
2568 Int128 s = int128_make64(size);
2569
2570 if (size == UINT64_MAX) {
2571 s = int128_2_64();
2572 }
2573 if (int128_eq(s, mr->size)) {
2574 return;
2575 }
2576 memory_region_transaction_begin();
2577 mr->size = s;
2578 memory_region_update_pending = true;
2579 memory_region_transaction_commit();
2580}
2581
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002582static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002583{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002584 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002585
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002586 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002587 memory_region_transaction_begin();
2588 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002589 memory_region_del_subregion(container, mr);
2590 mr->container = container;
2591 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002592 memory_region_unref(mr);
2593 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002594 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002595}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002596
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002597void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2598{
2599 if (addr != mr->addr) {
2600 mr->addr = addr;
2601 memory_region_readd_subregion(mr);
2602 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002603}
2604
Avi Kivitya8170e52012-10-23 12:30:10 +02002605void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002606{
Avi Kivity47033592011-12-04 19:16:50 +02002607 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002608
Jan Kiszka59023ef2012-08-23 13:02:30 +02002609 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002610 return;
2611 }
2612
Jan Kiszka59023ef2012-08-23 13:02:30 +02002613 memory_region_transaction_begin();
2614 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002615 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002616 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002617}
2618
Igor Mammedova2b257d2014-10-31 16:38:37 +00002619uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2620{
2621 return mr->align;
2622}
2623
Avi Kivitye2177952011-12-08 15:00:18 +02002624static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2625{
2626 const AddrRange *addr = addr_;
2627 const FlatRange *fr = fr_;
2628
2629 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2630 return -1;
2631 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2632 return 1;
2633 }
2634 return 0;
2635}
2636
Paolo Bonzini99e86342013-05-06 10:26:13 +02002637static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002638{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002639 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002640 sizeof(FlatRange), cmp_flatrange_addr);
2641}
2642
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002643bool memory_region_is_mapped(MemoryRegion *mr)
2644{
2645 return mr->container ? true : false;
2646}
2647
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002648/* Same as memory_region_find, but it does not add a reference to the
2649 * returned region. It must be called from an RCU critical section.
2650 */
2651static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2652 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002653{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002654 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002655 MemoryRegion *root;
2656 AddressSpace *as;
2657 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002658 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002659 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002660
Paolo Bonzini73034e92013-05-07 15:48:28 +02002661 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002662 for (root = mr; root->container; ) {
2663 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002664 addr += root->addr;
2665 }
2666
2667 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002668 if (!as) {
2669 return ret;
2670 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002671 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002672
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002673 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002674 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002675 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002676 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002677 }
2678
Paolo Bonzini99e86342013-05-06 10:26:13 +02002679 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002680 --fr;
2681 }
2682
2683 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002684 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002685 range = addrrange_intersection(range, fr->addr);
2686 ret.offset_within_region = fr->offset_in_region;
2687 ret.offset_within_region += int128_get64(int128_sub(range.start,
2688 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002689 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002690 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002691 ret.readonly = fr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002692 ret.nonvolatile = fr->nonvolatile;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002693 return ret;
2694}
2695
2696MemoryRegionSection memory_region_find(MemoryRegion *mr,
2697 hwaddr addr, uint64_t size)
2698{
2699 MemoryRegionSection ret;
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002700 RCU_READ_LOCK_GUARD();
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002701 ret = memory_region_find_rcu(mr, addr, size);
2702 if (ret.mr) {
2703 memory_region_ref(ret.mr);
2704 }
Avi Kivitye2177952011-12-08 15:00:18 +02002705 return ret;
2706}
2707
David Hildenbrand22843832021-04-13 11:55:20 +02002708MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
2709{
2710 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
2711
2712 *tmp = *s;
2713 if (tmp->mr) {
2714 memory_region_ref(tmp->mr);
2715 }
2716 if (tmp->fv) {
2717 bool ret = flatview_ref(tmp->fv);
2718
2719 g_assert(ret);
2720 }
2721 return tmp;
2722}
2723
2724void memory_region_section_free_copy(MemoryRegionSection *s)
2725{
2726 if (s->fv) {
2727 flatview_unref(s->fv);
2728 }
2729 if (s->mr) {
2730 memory_region_unref(s->mr);
2731 }
2732 g_free(s);
2733}
2734
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002735bool memory_region_present(MemoryRegion *container, hwaddr addr)
2736{
2737 MemoryRegion *mr;
2738
Dr. David Alan Gilbert694ea272019-10-07 15:36:41 +01002739 RCU_READ_LOCK_GUARD();
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002740 mr = memory_region_find_rcu(container, addr, 1).mr;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002741 return mr && mr != container;
2742}
2743
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002744void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002745{
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002746 memory_region_sync_dirty_bitmap(NULL);
Avi Kivity7664e802011-12-11 14:47:25 +02002747}
2748
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002749void memory_global_after_dirty_log_sync(void)
2750{
2751 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2752}
2753
Jay Zhou19310762017-07-28 18:28:53 +08002754static VMChangeStateEntry *vmstate_change;
2755
Avi Kivity7664e802011-12-11 14:47:25 +02002756void memory_global_dirty_log_start(void)
2757{
Jay Zhou19310762017-07-28 18:28:53 +08002758 if (vmstate_change) {
2759 qemu_del_vm_change_state_handler(vmstate_change);
2760 vmstate_change = NULL;
2761 }
2762
Avi Kivity7664e802011-12-11 14:47:25 +02002763 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002764
Avi Kivity7376e582012-02-08 21:05:17 +02002765 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002766
Wei Yang39adb532019-04-26 10:09:27 +08002767 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002768 memory_region_transaction_begin();
2769 memory_region_update_pending = true;
2770 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002771}
2772
Jay Zhou19310762017-07-28 18:28:53 +08002773static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002774{
Avi Kivity7664e802011-12-11 14:47:25 +02002775 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002776
Wei Yang39adb532019-04-26 10:09:27 +08002777 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002778 memory_region_transaction_begin();
2779 memory_region_update_pending = true;
2780 memory_region_transaction_commit();
2781
Avi Kivity7376e582012-02-08 21:05:17 +02002782 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002783}
2784
Philippe Mathieu-Daudé538f0492021-01-11 16:20:20 +01002785static void memory_vm_change_state_handler(void *opaque, bool running,
Jay Zhou19310762017-07-28 18:28:53 +08002786 RunState state)
2787{
2788 if (running) {
2789 memory_global_dirty_log_do_stop();
2790
2791 if (vmstate_change) {
2792 qemu_del_vm_change_state_handler(vmstate_change);
2793 vmstate_change = NULL;
2794 }
2795 }
2796}
2797
2798void memory_global_dirty_log_stop(void)
2799{
2800 if (!runstate_is_running()) {
2801 if (vmstate_change) {
2802 return;
2803 }
2804 vmstate_change = qemu_add_vm_change_state_handler(
2805 memory_vm_change_state_handler, NULL);
2806 return;
2807 }
2808
2809 memory_global_dirty_log_do_stop();
2810}
2811
Avi Kivity7664e802011-12-11 14:47:25 +02002812static void listener_add_address_space(MemoryListener *listener,
2813 AddressSpace *as)
2814{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002815 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002816 FlatRange *fr;
2817
Paolo Bonzini680a4782015-11-02 09:23:52 +01002818 if (listener->begin) {
2819 listener->begin(listener);
2820 }
Avi Kivity7664e802011-12-11 14:47:25 +02002821 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002822 if (listener->log_global_start) {
2823 listener->log_global_start(listener);
2824 }
Avi Kivity7664e802011-12-11 14:47:25 +02002825 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002826
Paolo Bonzini856d7242013-05-06 11:57:21 +02002827 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002828 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002829 MemoryRegionSection section = section_from_flat_range(fr, view);
2830
Avi Kivity975aefe2012-10-02 16:39:57 +02002831 if (listener->region_add) {
2832 listener->region_add(listener, &section);
2833 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002834 if (fr->dirty_log_mask && listener->log_start) {
2835 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2836 }
Avi Kivity7664e802011-12-11 14:47:25 +02002837 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002838 if (listener->commit) {
2839 listener->commit(listener);
2840 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002841 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002842}
2843
Peter Xud25836c2018-01-22 14:02:44 +08002844static void listener_del_address_space(MemoryListener *listener,
2845 AddressSpace *as)
2846{
2847 FlatView *view;
2848 FlatRange *fr;
2849
2850 if (listener->begin) {
2851 listener->begin(listener);
2852 }
2853 view = address_space_get_flatview(as);
2854 FOR_EACH_FLAT_RANGE(fr, view) {
2855 MemoryRegionSection section = section_from_flat_range(fr, view);
2856
2857 if (fr->dirty_log_mask && listener->log_stop) {
2858 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2859 }
2860 if (listener->region_del) {
2861 listener->region_del(listener, &section);
2862 }
2863 }
2864 if (listener->commit) {
2865 listener->commit(listener);
2866 }
2867 flatview_unref(view);
2868}
2869
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002870void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002871{
Avi Kivity72e22d22012-02-08 15:05:50 +02002872 MemoryListener *other = NULL;
2873
Peter Xub87eaa92021-05-06 12:05:40 -04002874 /* Only one of them can be defined for a listener */
2875 assert(!(listener->log_sync && listener->log_sync_global));
2876
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002877 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002878 if (QTAILQ_EMPTY(&memory_listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002879 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
Avi Kivity72e22d22012-02-08 15:05:50 +02002880 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2881 } else {
2882 QTAILQ_FOREACH(other, &memory_listeners, link) {
2883 if (listener->priority < other->priority) {
2884 break;
2885 }
2886 }
2887 QTAILQ_INSERT_BEFORE(other, listener, link);
2888 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002889
Paolo Bonzini9a546352016-09-22 16:23:06 +02002890 if (QTAILQ_EMPTY(&as->listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002891 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
Paolo Bonzini9a546352016-09-22 16:23:06 +02002892 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2893 } else {
2894 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2895 if (listener->priority < other->priority) {
2896 break;
2897 }
2898 }
2899 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2900 }
2901
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002902 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002903}
2904
2905void memory_listener_unregister(MemoryListener *listener)
2906{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002907 if (!listener->address_space) {
2908 return;
2909 }
2910
Peter Xud25836c2018-01-22 14:02:44 +08002911 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002912 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002913 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002914 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002915}
Avi Kivitye2177952011-12-08 15:00:18 +02002916
Greg Kurza2166412019-06-21 11:27:33 +02002917void address_space_remove_listeners(AddressSpace *as)
2918{
2919 while (!QTAILQ_EMPTY(&as->listeners)) {
2920 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2921 }
2922}
2923
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002924void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002925{
Paolo Bonziniac951902015-02-11 15:21:04 +01002926 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002927 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002928 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002929 as->ioeventfd_nb = 0;
2930 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002931 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002932 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002933 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002934 address_space_update_topology(as);
2935 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002936}
Avi Kivity658b2222011-07-26 14:26:08 +03002937
Paolo Bonzini374f2982013-05-17 12:37:03 +02002938static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002939{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002940 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002941
Paolo Bonzini856d7242013-05-06 11:57:21 +02002942 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002943 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002944 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002945 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002946}
2947
Paolo Bonzini374f2982013-05-17 12:37:03 +02002948void address_space_destroy(AddressSpace *as)
2949{
Paolo Bonziniac951902015-02-11 15:21:04 +01002950 MemoryRegion *root = as->root;
2951
Paolo Bonzini374f2982013-05-17 12:37:03 +02002952 /* Flush out anything from MemoryListeners listening in on this */
2953 memory_region_transaction_begin();
2954 as->root = NULL;
2955 memory_region_transaction_commit();
2956 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2957
2958 /* At this point, as->dispatch and as->current_map are dummy
2959 * entries that the guest should never use. Wait for the old
2960 * values to expire before freeing the data.
2961 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002962 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002963 call_rcu(as, do_address_space_destroy, rcu);
2964}
2965
Peter Xu4e831902017-01-16 16:40:04 +08002966static const char *memory_region_type(MemoryRegion *mr)
2967{
Philippe Mathieu-Daudé39fa93c2020-02-24 10:13:00 +01002968 if (mr->alias) {
2969 return memory_region_type(mr->alias);
2970 }
Peter Xu4e831902017-01-16 16:40:04 +08002971 if (memory_region_is_ram_device(mr)) {
2972 return "ramd";
2973 } else if (memory_region_is_romd(mr)) {
2974 return "romd";
2975 } else if (memory_region_is_rom(mr)) {
2976 return "rom";
2977 } else if (memory_region_is_ram(mr)) {
2978 return "ram";
2979 } else {
2980 return "i/o";
2981 }
2982}
2983
Blue Swirl314e2982011-09-11 20:22:05 +00002984typedef struct MemoryRegionList MemoryRegionList;
2985
2986struct MemoryRegionList {
2987 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002988 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002989};
2990
Paolo Bonzinib58deb32018-12-06 11:58:10 +01002991typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002992
Peter Xu4e831902017-01-16 16:40:04 +08002993#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2994 int128_sub((size), int128_one())) : 0)
2995#define MTREE_INDENT " "
2996
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002997static void mtree_expand_owner(const char *label, Object *obj)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002998{
2999 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3000
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003001 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003002 if (dev && dev->id) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003003 qemu_printf(" id=%s", dev->id);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003004 } else {
Markus Armbrusterddfb0ba2020-05-05 17:29:10 +02003005 char *canonical_path = object_get_canonical_path(obj);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003006 if (canonical_path) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003007 qemu_printf(" path=%s", canonical_path);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003008 g_free(canonical_path);
3009 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003010 qemu_printf(" type=%s", object_get_typename(obj));
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003011 }
3012 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003013 qemu_printf("}");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003014}
3015
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003016static void mtree_print_mr_owner(const MemoryRegion *mr)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003017{
3018 Object *owner = mr->owner;
3019 Object *parent = memory_region_owner((MemoryRegion *)mr);
3020
3021 if (!owner && !parent) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003022 qemu_printf(" orphan");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003023 return;
3024 }
3025 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003026 mtree_expand_owner("owner", owner);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003027 }
3028 if (parent && parent != owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003029 mtree_expand_owner("parent", parent);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003030 }
3031}
3032
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003033static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02003034 hwaddr base,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003035 MemoryRegionListHead *alias_print_queue,
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003036 bool owner, bool display_disabled)
Blue Swirl314e2982011-09-11 20:22:05 +00003037{
Jan Kiszka9479c572011-09-27 15:00:41 +02003038 MemoryRegionList *new_ml, *ml, *next_ml;
3039 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00003040 const MemoryRegion *submr;
3041 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08003042 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00003043
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02003044 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00003045 return;
3046 }
3047
Peter Xub31f8412017-03-14 20:56:27 +08003048 cur_start = base + mr->addr;
3049 cur_end = cur_start + MR_SIZE(mr->size);
3050
3051 /*
3052 * Try to detect overflow of memory region. This should never
3053 * happen normally. When it happens, we dump something to warn the
3054 * user who is observing this.
3055 */
3056 if (cur_start < base || cur_end < cur_start) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003057 qemu_printf("[DETECTED OVERFLOW!] ");
Peter Xub31f8412017-03-14 20:56:27 +08003058 }
3059
Blue Swirl314e2982011-09-11 20:22:05 +00003060 if (mr->alias) {
3061 MemoryRegionList *ml;
3062 bool found = false;
3063
3064 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003065 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01003066 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00003067 found = true;
3068 }
3069 }
3070
3071 if (!found) {
3072 ml = g_new(MemoryRegionList, 1);
3073 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003074 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00003075 }
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003076 if (mr->enabled || display_disabled) {
3077 for (i = 0; i < level; i++) {
3078 qemu_printf(MTREE_INDENT);
3079 }
3080 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
3081 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
3082 "-" TARGET_FMT_plx "%s",
3083 cur_start, cur_end,
3084 mr->priority,
3085 mr->nonvolatile ? "nv-" : "",
3086 memory_region_type((MemoryRegion *)mr),
3087 memory_region_name(mr),
3088 memory_region_name(mr->alias),
3089 mr->alias_offset,
3090 mr->alias_offset + MR_SIZE(mr->size),
3091 mr->enabled ? "" : " [disabled]");
3092 if (owner) {
3093 mtree_print_mr_owner(mr);
3094 }
3095 qemu_printf("\n");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003096 }
Blue Swirl314e2982011-09-11 20:22:05 +00003097 } else {
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003098 if (mr->enabled || display_disabled) {
3099 for (i = 0; i < level; i++) {
3100 qemu_printf(MTREE_INDENT);
3101 }
3102 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
3103 " (prio %d, %s%s): %s%s",
3104 cur_start, cur_end,
3105 mr->priority,
3106 mr->nonvolatile ? "nv-" : "",
3107 memory_region_type((MemoryRegion *)mr),
3108 memory_region_name(mr),
3109 mr->enabled ? "" : " [disabled]");
3110 if (owner) {
3111 mtree_print_mr_owner(mr);
3112 }
3113 qemu_printf("\n");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003114 }
Blue Swirl314e2982011-09-11 20:22:05 +00003115 }
Jan Kiszka9479c572011-09-27 15:00:41 +02003116
3117 QTAILQ_INIT(&submr_print_queue);
3118
Blue Swirl314e2982011-09-11 20:22:05 +00003119 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02003120 new_ml = g_new(MemoryRegionList, 1);
3121 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003122 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02003123 if (new_ml->mr->addr < ml->mr->addr ||
3124 (new_ml->mr->addr == ml->mr->addr &&
3125 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003126 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02003127 new_ml = NULL;
3128 break;
3129 }
3130 }
3131 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003132 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02003133 }
3134 }
3135
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003136 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003137 mtree_print_mr(ml->mr, level + 1, cur_start,
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003138 alias_print_queue, owner, display_disabled);
Jan Kiszka9479c572011-09-27 15:00:41 +02003139 }
3140
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003141 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02003142 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003143 }
3144}
3145
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003146struct FlatViewInfo {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003147 int counter;
3148 bool dispatch_tree;
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003149 bool owner;
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003150 AccelClass *ac;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003151};
3152
3153static void mtree_print_flatview(gpointer key, gpointer value,
3154 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08003155{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003156 FlatView *view = key;
3157 GArray *fv_address_spaces = value;
3158 struct FlatViewInfo *fvi = user_data;
Peter Xu57bb40c2017-01-16 16:40:05 +08003159 FlatRange *range = &view->ranges[0];
3160 MemoryRegion *mr;
3161 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003162 int i;
3163 AddressSpace *as;
3164
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003165 qemu_printf("FlatView #%d\n", fvi->counter);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003166 ++fvi->counter;
3167
3168 for (i = 0; i < fv_address_spaces->len; ++i) {
3169 as = g_array_index(fv_address_spaces, AddressSpace*, i);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003170 qemu_printf(" AS \"%s\", root: %s",
3171 as->name, memory_region_name(as->root));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003172 if (as->root->alias) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003173 qemu_printf(", alias %s", memory_region_name(as->root->alias));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003174 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003175 qemu_printf("\n");
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003176 }
3177
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003178 qemu_printf(" Root memory region: %s\n",
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003179 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08003180
3181 if (n <= 0) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003182 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003183 return;
3184 }
3185
3186 while (n--) {
3187 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003188 if (range->offset_in_region) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003189 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3190 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3191 int128_get64(range->addr.start),
3192 int128_get64(range->addr.start)
3193 + MR_SIZE(range->addr.size),
3194 mr->priority,
3195 range->nonvolatile ? "nv-" : "",
3196 range->readonly ? "rom" : memory_region_type(mr),
3197 memory_region_name(mr),
3198 range->offset_in_region);
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003199 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003200 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3201 " (prio %d, %s%s): %s",
3202 int128_get64(range->addr.start),
3203 int128_get64(range->addr.start)
3204 + MR_SIZE(range->addr.size),
3205 mr->priority,
3206 range->nonvolatile ? "nv-" : "",
3207 range->readonly ? "rom" : memory_region_type(mr),
3208 memory_region_name(mr));
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003209 }
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003210 if (fvi->owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003211 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003212 }
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003213
3214 if (fvi->ac) {
3215 for (i = 0; i < fv_address_spaces->len; ++i) {
3216 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3217 if (fvi->ac->has_memory(current_machine, as,
3218 int128_get64(range->addr.start),
3219 MR_SIZE(range->addr.size) + 1)) {
Paolo Bonzini53b62be2019-11-13 11:50:03 +01003220 qemu_printf(" %s", fvi->ac->name);
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003221 }
3222 }
3223 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003224 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003225 range++;
3226 }
3227
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003228#if !defined(CONFIG_USER_ONLY)
3229 if (fvi->dispatch_tree && view->root) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003230 mtree_print_dispatch(view->dispatch, view->root);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003231 }
3232#endif
3233
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003234 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003235}
3236
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003237static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3238 gpointer user_data)
3239{
3240 FlatView *view = key;
3241 GArray *fv_address_spaces = value;
3242
3243 g_array_unref(fv_address_spaces);
3244 flatview_unref(view);
3245
3246 return true;
3247}
3248
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003249void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
Blue Swirl314e2982011-09-11 20:22:05 +00003250{
3251 MemoryRegionListHead ml_head;
3252 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003253 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003254
Peter Xu57bb40c2017-01-16 16:40:05 +08003255 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003256 FlatView *view;
3257 struct FlatViewInfo fvi = {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003258 .counter = 0,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003259 .dispatch_tree = dispatch_tree,
3260 .owner = owner,
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003261 };
3262 GArray *fv_address_spaces;
3263 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
Philippe Mathieu-Daudé4f7f5892020-01-21 12:03:48 +01003264 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003265
3266 if (ac->has_memory) {
3267 fvi.ac = ac;
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003268 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003269
3270 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003271 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003272 view = address_space_get_flatview(as);
3273
3274 fv_address_spaces = g_hash_table_lookup(views, view);
3275 if (!fv_address_spaces) {
3276 fv_address_spaces = g_array_new(false, false, sizeof(as));
3277 g_hash_table_insert(views, view, fv_address_spaces);
3278 }
3279
3280 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003281 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003282
3283 /* Print */
3284 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3285
3286 /* Free */
3287 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3288 g_hash_table_unref(views);
3289
Peter Xu57bb40c2017-01-16 16:40:05 +08003290 return;
3291 }
3292
Blue Swirl314e2982011-09-11 20:22:05 +00003293 QTAILQ_INIT(&ml_head);
3294
Avi Kivity0d673e32012-10-02 15:28:50 +02003295 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003296 qemu_printf("address-space: %s\n", as->name);
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003297 mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003298 qemu_printf("\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003299 }
3300
Blue Swirl314e2982011-09-11 20:22:05 +00003301 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003302 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003303 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
Philippe Mathieu-Daudé2261d392020-05-29 14:53:25 +02003304 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003305 qemu_printf("\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003306 }
3307
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003308 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003309 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003310 }
Blue Swirl314e2982011-09-11 20:22:05 +00003311}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003312
Peter Maydellb08199c2017-07-07 15:42:51 +01003313void memory_region_init_ram(MemoryRegion *mr,
Philippe Mathieu-Daudéd32335e2021-02-25 19:20:03 +01003314 Object *owner,
Peter Maydellb08199c2017-07-07 15:42:51 +01003315 const char *name,
3316 uint64_t size,
3317 Error **errp)
3318{
3319 DeviceState *owner_dev;
3320 Error *err = NULL;
3321
3322 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3323 if (err) {
3324 error_propagate(errp, err);
3325 return;
3326 }
3327 /* This will assert if owner is neither NULL nor a DeviceState.
3328 * We only want the owner here for the purposes of defining a
3329 * unique name for migration. TODO: Ideally we should implement
3330 * a naming scheme for Objects which are not DeviceStates, in
3331 * which case we can relax this restriction.
3332 */
3333 owner_dev = DEVICE(owner);
3334 vmstate_register_ram(mr, owner_dev);
3335}
3336
3337void memory_region_init_rom(MemoryRegion *mr,
Philippe Mathieu-Daudéd32335e2021-02-25 19:20:03 +01003338 Object *owner,
Peter Maydellb08199c2017-07-07 15:42:51 +01003339 const char *name,
3340 uint64_t size,
3341 Error **errp)
3342{
3343 DeviceState *owner_dev;
3344 Error *err = NULL;
3345
3346 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3347 if (err) {
3348 error_propagate(errp, err);
3349 return;
3350 }
3351 /* This will assert if owner is neither NULL nor a DeviceState.
3352 * We only want the owner here for the purposes of defining a
3353 * unique name for migration. TODO: Ideally we should implement
3354 * a naming scheme for Objects which are not DeviceStates, in
3355 * which case we can relax this restriction.
3356 */
3357 owner_dev = DEVICE(owner);
3358 vmstate_register_ram(mr, owner_dev);
3359}
3360
3361void memory_region_init_rom_device(MemoryRegion *mr,
Philippe Mathieu-Daudéd32335e2021-02-25 19:20:03 +01003362 Object *owner,
Peter Maydellb08199c2017-07-07 15:42:51 +01003363 const MemoryRegionOps *ops,
3364 void *opaque,
3365 const char *name,
3366 uint64_t size,
3367 Error **errp)
3368{
3369 DeviceState *owner_dev;
3370 Error *err = NULL;
3371
3372 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3373 name, size, &err);
3374 if (err) {
3375 error_propagate(errp, err);
3376 return;
3377 }
3378 /* This will assert if owner is neither NULL nor a DeviceState.
3379 * We only want the owner here for the purposes of defining a
3380 * unique name for migration. TODO: Ideally we should implement
3381 * a naming scheme for Objects which are not DeviceStates, in
3382 * which case we can relax this restriction.
3383 */
3384 owner_dev = DEVICE(owner);
3385 vmstate_register_ram(mr, owner_dev);
3386}
3387
Alexander Bulekove7d32222020-10-23 11:07:34 -04003388/*
3389 * Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
3390 * the fuzz_dma_read_cb callback
3391 */
3392#ifdef CONFIG_FUZZ
3393void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3394 size_t len,
Alexander Bulekovfc1c8342021-01-20 01:02:55 -05003395 MemoryRegion *mr)
Alexander Bulekove7d32222020-10-23 11:07:34 -04003396{
3397}
3398#endif
3399
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003400static const TypeInfo memory_region_info = {
3401 .parent = TYPE_OBJECT,
3402 .name = TYPE_MEMORY_REGION,
Markus Armbruster1b53ecd2019-08-12 07:23:34 +02003403 .class_size = sizeof(MemoryRegionClass),
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003404 .instance_size = sizeof(MemoryRegion),
3405 .instance_init = memory_region_initfn,
3406 .instance_finalize = memory_region_finalize,
3407};
3408
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003409static const TypeInfo iommu_memory_region_info = {
3410 .parent = TYPE_MEMORY_REGION,
3411 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003412 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003413 .instance_size = sizeof(IOMMUMemoryRegion),
3414 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003415 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003416};
3417
David Hildenbrand8947d7f2021-04-13 11:55:19 +02003418static const TypeInfo ram_discard_manager_info = {
3419 .parent = TYPE_INTERFACE,
3420 .name = TYPE_RAM_DISCARD_MANAGER,
3421 .class_size = sizeof(RamDiscardManagerClass),
3422};
3423
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003424static void memory_register_types(void)
3425{
3426 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003427 type_register_static(&iommu_memory_region_info);
David Hildenbrand8947d7f2021-04-13 11:55:19 +02003428 type_register_static(&ram_discard_manager_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003429}
3430
3431type_init(memory_register_types)