Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 1 | #ifndef CPU_COMMON_H |
Markus Armbruster | 175de52 | 2016-06-29 15:29:06 +0200 | [diff] [blame] | 2 | #define CPU_COMMON_H |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 3 | |
Dong Xu Wang | 07f3507 | 2011-11-22 18:06:26 +0800 | [diff] [blame] | 4 | /* CPU interfaces that are target independent. */ |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 5 | |
Andreas Färber | ce927ed | 2013-05-28 14:02:38 +0200 | [diff] [blame] | 6 | #ifndef CONFIG_USER_ONLY |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 7 | #include "exec/hwaddr.h" |
Andreas Färber | ce927ed | 2013-05-28 14:02:38 +0200 | [diff] [blame] | 8 | #endif |
Paolo Bonzini | 37b76cf | 2010-04-01 19:57:10 +0200 | [diff] [blame] | 9 | |
Philippe Mathieu-Daudé | 06445fb | 2022-02-03 12:31:29 +0100 | [diff] [blame] | 10 | /** |
| 11 | * vaddr: |
| 12 | * Type wide enough to contain any #target_ulong virtual address. |
| 13 | */ |
| 14 | typedef uint64_t vaddr; |
| 15 | #define VADDR_PRId PRId64 |
| 16 | #define VADDR_PRIu PRIu64 |
| 17 | #define VADDR_PRIo PRIo64 |
| 18 | #define VADDR_PRIx PRIx64 |
| 19 | #define VADDR_PRIX PRIX64 |
| 20 | #define VADDR_MAX UINT64_MAX |
| 21 | |
Philippe Mathieu-Daudé | b269a70 | 2022-01-20 01:08:36 +0100 | [diff] [blame] | 22 | /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even |
| 23 | * when intptr_t is 32-bit and we are aligning a long long. |
| 24 | */ |
| 25 | extern uintptr_t qemu_host_page_size; |
| 26 | extern intptr_t qemu_host_page_mask; |
| 27 | |
| 28 | #define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size) |
| 29 | #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size) |
| 30 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 31 | /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 32 | void qemu_init_cpu_list(void); |
| 33 | void cpu_list_lock(void); |
| 34 | void cpu_list_unlock(void); |
| 35 | |
Thomas Huth | 2cd5394 | 2017-06-26 07:22:55 +0200 | [diff] [blame] | 36 | void tcg_flush_softmmu_tlb(CPUState *cs); |
| 37 | |
Paolo Bonzini | d9f24bf | 2020-10-06 09:05:29 +0200 | [diff] [blame] | 38 | void tcg_iommu_init_notifier_list(CPUState *cpu); |
| 39 | void tcg_iommu_free_notifier_list(CPUState *cpu); |
| 40 | |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 41 | #if !defined(CONFIG_USER_ONLY) |
| 42 | |
Alexander Graf | dd31053 | 2010-12-08 12:05:36 +0100 | [diff] [blame] | 43 | enum device_endian { |
| 44 | DEVICE_NATIVE_ENDIAN, |
| 45 | DEVICE_BIG_ENDIAN, |
| 46 | DEVICE_LITTLE_ENDIAN, |
| 47 | }; |
| 48 | |
Yongji Xie | c99a29e | 2017-02-27 12:52:44 +0800 | [diff] [blame] | 49 | #if defined(HOST_WORDS_BIGENDIAN) |
| 50 | #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN |
| 51 | #else |
| 52 | #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN |
| 53 | #endif |
| 54 | |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 55 | /* address in the RAM (different from a physical address) */ |
Avi Kivity | 4be403c | 2012-10-04 12:36:04 +0200 | [diff] [blame] | 56 | #if defined(CONFIG_XEN_BACKEND) |
Anthony PERARD | f15fbc4 | 2011-07-20 08:17:42 +0000 | [diff] [blame] | 57 | typedef uint64_t ram_addr_t; |
| 58 | # define RAM_ADDR_MAX UINT64_MAX |
| 59 | # define RAM_ADDR_FMT "%" PRIx64 |
| 60 | #else |
Stefan Weil | 5357699 | 2012-03-02 23:30:02 +0100 | [diff] [blame] | 61 | typedef uintptr_t ram_addr_t; |
| 62 | # define RAM_ADDR_MAX UINTPTR_MAX |
| 63 | # define RAM_ADDR_FMT "%" PRIxPTR |
Anthony PERARD | f15fbc4 | 2011-07-20 08:17:42 +0000 | [diff] [blame] | 64 | #endif |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 65 | |
| 66 | /* memory API */ |
| 67 | |
Huang Ying | cd19cfa | 2011-03-02 08:56:19 +0100 | [diff] [blame] | 68 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 69 | /* This should not be used by devices. */ |
Paolo Bonzini | 07bdaa4 | 2016-03-25 12:55:08 +0100 | [diff] [blame] | 70 | ram_addr_t qemu_ram_addr_from_host(void *ptr); |
Dr. David Alan Gilbert | e3dd749 | 2015-11-05 18:10:33 +0000 | [diff] [blame] | 71 | RAMBlock *qemu_ram_block_by_name(const char *name); |
Dr. David Alan Gilbert | 422148d | 2015-11-05 18:10:32 +0000 | [diff] [blame] | 72 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
Paolo Bonzini | f615f39 | 2016-05-26 10:07:50 +0200 | [diff] [blame] | 73 | ram_addr_t *offset); |
Dr. David Alan Gilbert | f90bb71 | 2018-03-12 17:20:57 +0000 | [diff] [blame] | 74 | ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); |
Gonglei | fa53a0e | 2016-05-10 10:04:59 +0800 | [diff] [blame] | 75 | void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev); |
| 76 | void qemu_ram_unset_idstr(RAMBlock *block); |
Dr. David Alan Gilbert | 422148d | 2015-11-05 18:10:32 +0000 | [diff] [blame] | 77 | const char *qemu_ram_get_idstr(RAMBlock *rb); |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 78 | void *qemu_ram_get_host_addr(RAMBlock *rb); |
| 79 | ram_addr_t qemu_ram_get_offset(RAMBlock *rb); |
| 80 | ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); |
David Hildenbrand | 082851a | 2021-04-29 13:26:59 +0200 | [diff] [blame] | 81 | ram_addr_t qemu_ram_get_max_length(RAMBlock *rb); |
Dr. David Alan Gilbert | 463a4ac | 2017-03-07 18:36:36 +0000 | [diff] [blame] | 82 | bool qemu_ram_is_shared(RAMBlock *rb); |
David Hildenbrand | 8dbe22c | 2021-05-10 13:43:21 +0200 | [diff] [blame] | 83 | bool qemu_ram_is_noreserve(RAMBlock *rb); |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 84 | bool qemu_ram_is_uf_zeroable(RAMBlock *rb); |
| 85 | void qemu_ram_set_uf_zeroable(RAMBlock *rb); |
Cédric Le Goater | b895de5 | 2018-05-14 08:57:00 +0200 | [diff] [blame] | 86 | bool qemu_ram_is_migratable(RAMBlock *rb); |
| 87 | void qemu_ram_set_migratable(RAMBlock *rb); |
| 88 | void qemu_ram_unset_migratable(RAMBlock *rb); |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 89 | |
Dr. David Alan Gilbert | 863e962 | 2016-09-29 20:09:37 +0100 | [diff] [blame] | 90 | size_t qemu_ram_pagesize(RAMBlock *block); |
Dr. David Alan Gilbert | 67f11b5 | 2017-02-24 18:28:34 +0000 | [diff] [blame] | 91 | size_t qemu_ram_pagesize_largest(void); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 92 | |
Philippe Mathieu-Daudé | 1f649fe | 2021-05-16 19:01:31 +0200 | [diff] [blame] | 93 | /** |
| 94 | * cpu_address_space_init: |
| 95 | * @cpu: CPU to add this address space to |
| 96 | * @asidx: integer index of this address space |
| 97 | * @prefix: prefix to be used as name of address space |
| 98 | * @mr: the root memory region of address space |
| 99 | * |
| 100 | * Add the specified address space to the CPU's cpu_ases list. |
| 101 | * The address space added with @asidx 0 is the one used for the |
| 102 | * convenience pointer cpu->as. |
| 103 | * The target-specific code which registers ASes is responsible |
| 104 | * for defining what semantics address space 0, 1, 2, etc have. |
| 105 | * |
| 106 | * Before the first call to this function, the caller must set |
| 107 | * cpu->num_ases to the total number of address spaces it needs |
| 108 | * to support. |
| 109 | * |
| 110 | * Note that with KVM only one address space is supported. |
| 111 | */ |
| 112 | void cpu_address_space_init(CPUState *cpu, int asidx, |
| 113 | const char *prefix, MemoryRegion *mr); |
| 114 | |
Philippe Mathieu-Daudé | d7ef71e | 2020-02-19 20:02:11 +0100 | [diff] [blame] | 115 | void cpu_physical_memory_rw(hwaddr addr, void *buf, |
Philippe Mathieu-Daudé | 28c80bf | 2020-02-19 20:32:30 +0100 | [diff] [blame] | 116 | hwaddr len, bool is_write); |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 117 | static inline void cpu_physical_memory_read(hwaddr addr, |
Li Zhijian | 0c249ff | 2019-01-17 20:49:01 +0800 | [diff] [blame] | 118 | void *buf, hwaddr len) |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 119 | { |
Philippe Mathieu-Daudé | 85eb7c1 | 2020-02-19 20:20:42 +0100 | [diff] [blame] | 120 | cpu_physical_memory_rw(addr, buf, len, false); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 121 | } |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 122 | static inline void cpu_physical_memory_write(hwaddr addr, |
Li Zhijian | 0c249ff | 2019-01-17 20:49:01 +0800 | [diff] [blame] | 123 | const void *buf, hwaddr len) |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 124 | { |
Philippe Mathieu-Daudé | 85eb7c1 | 2020-02-19 20:20:42 +0100 | [diff] [blame] | 125 | cpu_physical_memory_rw(addr, (void *)buf, len, true); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 126 | } |
Philippe Mathieu-Daudé | 1f649fe | 2021-05-16 19:01:31 +0200 | [diff] [blame] | 127 | void cpu_reloading_memory_map(void); |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 128 | void *cpu_physical_memory_map(hwaddr addr, |
| 129 | hwaddr *plen, |
Philippe Mathieu-Daudé | 28c80bf | 2020-02-19 20:32:30 +0100 | [diff] [blame] | 130 | bool is_write); |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 131 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
Philippe Mathieu-Daudé | 28c80bf | 2020-02-19 20:32:30 +0100 | [diff] [blame] | 132 | bool is_write, hwaddr access_len); |
Fam Zheng | e95205e | 2015-03-16 17:03:37 +0800 | [diff] [blame] | 133 | void cpu_register_map_client(QEMUBH *bh); |
| 134 | void cpu_unregister_map_client(QEMUBH *bh); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 135 | |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 136 | bool cpu_physical_memory_is_io(hwaddr phys_addr); |
Wen Congyang | 76f3553 | 2012-05-07 12:04:18 +0800 | [diff] [blame] | 137 | |
Blue Swirl | 6842a08 | 2010-03-21 19:47:13 +0000 | [diff] [blame] | 138 | /* Coalesced MMIO regions are areas where write operations can be reordered. |
| 139 | * This usually implies that write operations are side-effect free. This allows |
| 140 | * batching which can make a major impact on performance when using |
| 141 | * virtualization. |
| 142 | */ |
Blue Swirl | 6842a08 | 2010-03-21 19:47:13 +0000 | [diff] [blame] | 143 | void qemu_flush_coalesced_mmio_buffer(void); |
| 144 | |
Li Zhijian | 0c249ff | 2019-01-17 20:49:01 +0800 | [diff] [blame] | 145 | void cpu_flush_icache_range(hwaddr start, hwaddr len); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 146 | |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 147 | typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); |
Michael R. Hines | bd2fa51 | 2013-06-25 21:35:34 -0400 | [diff] [blame] | 148 | |
Dr. David Alan Gilbert | e380705 | 2015-05-21 13:24:13 +0100 | [diff] [blame] | 149 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); |
Dr. David Alan Gilbert | d3a5038 | 2017-02-24 18:28:32 +0000 | [diff] [blame] | 150 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); |
Michael R. Hines | bd2fa51 | 2013-06-25 21:35:34 -0400 | [diff] [blame] | 151 | |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 152 | #endif |
| 153 | |
Philippe Mathieu-Daudé | 73842ef | 2022-02-03 02:13:28 +0100 | [diff] [blame] | 154 | /* Returns: 0 on success, -1 on error */ |
| 155 | int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, |
| 156 | void *ptr, size_t len, bool is_write); |
| 157 | |
Paolo Bonzini | c5e3c91 | 2020-10-28 08:04:08 -0400 | [diff] [blame] | 158 | /* vl.c */ |
| 159 | extern int singlestep; |
| 160 | |
Philippe Mathieu-Daudé | 377bf6f | 2022-03-14 15:01:08 +0100 | [diff] [blame] | 161 | void list_cpus(const char *optarg); |
| 162 | |
Markus Armbruster | 175de52 | 2016-06-29 15:29:06 +0200 | [diff] [blame] | 163 | #endif /* CPU_COMMON_H */ |