blob: c54aee4a951fcd4c0b00e23faf9224b42e8f0370 [file] [log] [blame]
Paolo Bonzini0ce265f2016-11-22 11:34:02 +01001/*
2 * Physical memory access templates
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22/* warning: addr must be aligned */
23static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
26{
27 uint8_t *ptr;
28 uint64_t val;
29 MemoryRegion *mr;
30 hwaddr l = 4;
31 hwaddr addr1;
32 MemTxResult r;
33 bool release_lock = false;
34
35 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +010036 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +020037 if (l < 4 || !memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +010038 release_lock |= prepare_mmio_access(mr);
39
40 /* I/O case */
Tony Nguyend5d680c2019-08-24 04:36:52 +100041 r = memory_region_dispatch_read(mr, addr1, &val,
42 MO_32 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +010043 } else {
44 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +020045 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +010046 switch (endian) {
47 case DEVICE_LITTLE_ENDIAN:
48 val = ldl_le_p(ptr);
49 break;
50 case DEVICE_BIG_ENDIAN:
51 val = ldl_be_p(ptr);
52 break;
53 default:
54 val = ldl_p(ptr);
55 break;
56 }
57 r = MEMTX_OK;
58 }
59 if (result) {
60 *result = r;
61 }
62 if (release_lock) {
63 qemu_mutex_unlock_iothread();
64 }
65 RCU_READ_UNLOCK();
66 return val;
67}
68
69uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
70 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
71{
72 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
73 DEVICE_NATIVE_ENDIAN);
74}
75
76uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
77 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
78{
79 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
80 DEVICE_LITTLE_ENDIAN);
81}
82
83uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
84 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
85{
86 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
87 DEVICE_BIG_ENDIAN);
88}
89
Paolo Bonzini0ce265f2016-11-22 11:34:02 +010090/* warning: addr must be aligned */
91static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
92 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
93 enum device_endian endian)
94{
95 uint8_t *ptr;
96 uint64_t val;
97 MemoryRegion *mr;
98 hwaddr l = 8;
99 hwaddr addr1;
100 MemTxResult r;
101 bool release_lock = false;
102
103 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100104 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200105 if (l < 8 || !memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100106 release_lock |= prepare_mmio_access(mr);
107
108 /* I/O case */
Tony Nguyend5d680c2019-08-24 04:36:52 +1000109 r = memory_region_dispatch_read(mr, addr1, &val,
110 MO_64 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100111 } else {
112 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200113 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100114 switch (endian) {
115 case DEVICE_LITTLE_ENDIAN:
116 val = ldq_le_p(ptr);
117 break;
118 case DEVICE_BIG_ENDIAN:
119 val = ldq_be_p(ptr);
120 break;
121 default:
122 val = ldq_p(ptr);
123 break;
124 }
125 r = MEMTX_OK;
126 }
127 if (result) {
128 *result = r;
129 }
130 if (release_lock) {
131 qemu_mutex_unlock_iothread();
132 }
133 RCU_READ_UNLOCK();
134 return val;
135}
136
137uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
138 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
139{
140 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
141 DEVICE_NATIVE_ENDIAN);
142}
143
144uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
145 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
146{
147 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
148 DEVICE_LITTLE_ENDIAN);
149}
150
151uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
152 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
153{
154 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
155 DEVICE_BIG_ENDIAN);
156}
157
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100158uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
159 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
160{
161 uint8_t *ptr;
162 uint64_t val;
163 MemoryRegion *mr;
164 hwaddr l = 1;
165 hwaddr addr1;
166 MemTxResult r;
167 bool release_lock = false;
168
169 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100170 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200171 if (!memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100172 release_lock |= prepare_mmio_access(mr);
173
174 /* I/O case */
Tony Nguyen07f08342019-08-24 04:36:51 +1000175 r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100176 } else {
177 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200178 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100179 val = ldub_p(ptr);
180 r = MEMTX_OK;
181 }
182 if (result) {
183 *result = r;
184 }
185 if (release_lock) {
186 qemu_mutex_unlock_iothread();
187 }
188 RCU_READ_UNLOCK();
189 return val;
190}
191
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100192/* warning: addr must be aligned */
193static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
194 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
195 enum device_endian endian)
196{
197 uint8_t *ptr;
198 uint64_t val;
199 MemoryRegion *mr;
200 hwaddr l = 2;
201 hwaddr addr1;
202 MemTxResult r;
203 bool release_lock = false;
204
205 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100206 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200207 if (l < 2 || !memory_access_is_direct(mr, false)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100208 release_lock |= prepare_mmio_access(mr);
209
210 /* I/O case */
Tony Nguyend5d680c2019-08-24 04:36:52 +1000211 r = memory_region_dispatch_read(mr, addr1, &val,
212 MO_16 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100213 } else {
214 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200215 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100216 switch (endian) {
217 case DEVICE_LITTLE_ENDIAN:
218 val = lduw_le_p(ptr);
219 break;
220 case DEVICE_BIG_ENDIAN:
221 val = lduw_be_p(ptr);
222 break;
223 default:
224 val = lduw_p(ptr);
225 break;
226 }
227 r = MEMTX_OK;
228 }
229 if (result) {
230 *result = r;
231 }
232 if (release_lock) {
233 qemu_mutex_unlock_iothread();
234 }
235 RCU_READ_UNLOCK();
236 return val;
237}
238
239uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
240 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
241{
242 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
243 DEVICE_NATIVE_ENDIAN);
244}
245
246uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
247 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
248{
249 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
250 DEVICE_LITTLE_ENDIAN);
251}
252
253uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
254 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
255{
256 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
257 DEVICE_BIG_ENDIAN);
258}
259
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100260/* warning: addr must be aligned. The ram page is not masked as dirty
261 and the code inside is not invalidated. It is useful if the dirty
262 bits are used to track modified PTEs */
263void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
264 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
265{
266 uint8_t *ptr;
267 MemoryRegion *mr;
268 hwaddr l = 4;
269 hwaddr addr1;
270 MemTxResult r;
271 uint8_t dirty_log_mask;
272 bool release_lock = false;
273
274 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100275 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200276 if (l < 4 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100277 release_lock |= prepare_mmio_access(mr);
278
Tony Nguyen07f08342019-08-24 04:36:51 +1000279 r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100280 } else {
Eric Augera99761d2018-06-13 15:19:06 +0200281 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100282 stl_p(ptr, val);
283
284 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
285 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
286 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
287 4, dirty_log_mask);
288 r = MEMTX_OK;
289 }
290 if (result) {
291 *result = r;
292 }
293 if (release_lock) {
294 qemu_mutex_unlock_iothread();
295 }
296 RCU_READ_UNLOCK();
297}
298
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100299/* warning: addr must be aligned */
300static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
301 hwaddr addr, uint32_t val, MemTxAttrs attrs,
302 MemTxResult *result, enum device_endian endian)
303{
304 uint8_t *ptr;
305 MemoryRegion *mr;
306 hwaddr l = 4;
307 hwaddr addr1;
308 MemTxResult r;
309 bool release_lock = false;
310
311 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100312 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200313 if (l < 4 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100314 release_lock |= prepare_mmio_access(mr);
Tony Nguyend5d680c2019-08-24 04:36:52 +1000315 r = memory_region_dispatch_write(mr, addr1, val,
316 MO_32 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100317 } else {
318 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200319 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100320 switch (endian) {
321 case DEVICE_LITTLE_ENDIAN:
322 stl_le_p(ptr, val);
323 break;
324 case DEVICE_BIG_ENDIAN:
325 stl_be_p(ptr, val);
326 break;
327 default:
328 stl_p(ptr, val);
329 break;
330 }
Eric Augera99761d2018-06-13 15:19:06 +0200331 invalidate_and_set_dirty(mr, addr1, 4);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100332 r = MEMTX_OK;
333 }
334 if (result) {
335 *result = r;
336 }
337 if (release_lock) {
338 qemu_mutex_unlock_iothread();
339 }
340 RCU_READ_UNLOCK();
341}
342
343void glue(address_space_stl, SUFFIX)(ARG1_DECL,
344 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
345{
346 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
347 result, DEVICE_NATIVE_ENDIAN);
348}
349
350void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
351 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
352{
353 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
354 result, DEVICE_LITTLE_ENDIAN);
355}
356
357void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
358 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
359{
360 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
361 result, DEVICE_BIG_ENDIAN);
362}
363
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100364void glue(address_space_stb, SUFFIX)(ARG1_DECL,
365 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
366{
367 uint8_t *ptr;
368 MemoryRegion *mr;
369 hwaddr l = 1;
370 hwaddr addr1;
371 MemTxResult r;
372 bool release_lock = false;
373
374 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100375 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200376 if (!memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100377 release_lock |= prepare_mmio_access(mr);
Tony Nguyen07f08342019-08-24 04:36:51 +1000378 r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100379 } else {
380 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200381 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100382 stb_p(ptr, val);
Eric Augera99761d2018-06-13 15:19:06 +0200383 invalidate_and_set_dirty(mr, addr1, 1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100384 r = MEMTX_OK;
385 }
386 if (result) {
387 *result = r;
388 }
389 if (release_lock) {
390 qemu_mutex_unlock_iothread();
391 }
392 RCU_READ_UNLOCK();
393}
394
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100395/* warning: addr must be aligned */
396static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
397 hwaddr addr, uint32_t val, MemTxAttrs attrs,
398 MemTxResult *result, enum device_endian endian)
399{
400 uint8_t *ptr;
401 MemoryRegion *mr;
402 hwaddr l = 2;
403 hwaddr addr1;
404 MemTxResult r;
405 bool release_lock = false;
406
407 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100408 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200409 if (l < 2 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100410 release_lock |= prepare_mmio_access(mr);
Tony Nguyend5d680c2019-08-24 04:36:52 +1000411 r = memory_region_dispatch_write(mr, addr1, val,
412 MO_16 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100413 } else {
414 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200415 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100416 switch (endian) {
417 case DEVICE_LITTLE_ENDIAN:
418 stw_le_p(ptr, val);
419 break;
420 case DEVICE_BIG_ENDIAN:
421 stw_be_p(ptr, val);
422 break;
423 default:
424 stw_p(ptr, val);
425 break;
426 }
Eric Augera99761d2018-06-13 15:19:06 +0200427 invalidate_and_set_dirty(mr, addr1, 2);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100428 r = MEMTX_OK;
429 }
430 if (result) {
431 *result = r;
432 }
433 if (release_lock) {
434 qemu_mutex_unlock_iothread();
435 }
436 RCU_READ_UNLOCK();
437}
438
439void glue(address_space_stw, SUFFIX)(ARG1_DECL,
440 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
441{
442 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
443 DEVICE_NATIVE_ENDIAN);
444}
445
446void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
447 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
448{
449 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
450 DEVICE_LITTLE_ENDIAN);
451}
452
453void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
454 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
455{
456 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
457 DEVICE_BIG_ENDIAN);
458}
459
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100460static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
461 hwaddr addr, uint64_t val, MemTxAttrs attrs,
462 MemTxResult *result, enum device_endian endian)
463{
464 uint8_t *ptr;
465 MemoryRegion *mr;
466 hwaddr l = 8;
467 hwaddr addr1;
468 MemTxResult r;
469 bool release_lock = false;
470
471 RCU_READ_LOCK();
Peter Maydellbc6b1ce2018-05-31 14:50:52 +0100472 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
Eric Augera99761d2018-06-13 15:19:06 +0200473 if (l < 8 || !memory_access_is_direct(mr, true)) {
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100474 release_lock |= prepare_mmio_access(mr);
Tony Nguyend5d680c2019-08-24 04:36:52 +1000475 r = memory_region_dispatch_write(mr, addr1, val,
476 MO_64 | devend_memop(endian), attrs);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100477 } else {
478 /* RAM case */
Eric Augera99761d2018-06-13 15:19:06 +0200479 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100480 switch (endian) {
481 case DEVICE_LITTLE_ENDIAN:
482 stq_le_p(ptr, val);
483 break;
484 case DEVICE_BIG_ENDIAN:
485 stq_be_p(ptr, val);
486 break;
487 default:
488 stq_p(ptr, val);
489 break;
490 }
Eric Augera99761d2018-06-13 15:19:06 +0200491 invalidate_and_set_dirty(mr, addr1, 8);
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100492 r = MEMTX_OK;
493 }
494 if (result) {
495 *result = r;
496 }
497 if (release_lock) {
498 qemu_mutex_unlock_iothread();
499 }
500 RCU_READ_UNLOCK();
501}
502
503void glue(address_space_stq, SUFFIX)(ARG1_DECL,
504 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
505{
506 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
507 DEVICE_NATIVE_ENDIAN);
508}
509
510void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
511 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
512{
513 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
514 DEVICE_LITTLE_ENDIAN);
515}
516
517void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
518 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
519{
520 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
521 DEVICE_BIG_ENDIAN);
522}
523
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100524#undef ARG1_DECL
525#undef ARG1
526#undef SUFFIX
527#undef TRANSLATE
Paolo Bonzini0ce265f2016-11-22 11:34:02 +0100528#undef RCU_READ_LOCK
529#undef RCU_READ_UNLOCK