blob: c9878ba4743f7ee8ece0de2aba2446eda24f1464 [file] [log] [blame]
Paul Brook1ad21342009-05-19 16:17:58 +01001#ifndef CPU_COMMON_H
2#define CPU_COMMON_H 1
3
4/* CPU interfaces that are target indpendent. */
5
Paolo Bonzini37b76cf2010-04-01 19:57:10 +02006#ifdef TARGET_PHYS_ADDR_BITS
7#include "targphys.h"
8#endif
9
10#ifndef NEED_CPU_H
11#include "poison.h"
12#endif
13
Paul Brook1ad21342009-05-19 16:17:58 +010014#include "bswap.h"
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +020015#include "qemu-queue.h"
Paul Brook1ad21342009-05-19 16:17:58 +010016
Paul Brookb3755a92010-03-12 16:54:58 +000017#if !defined(CONFIG_USER_ONLY)
18
Alexander Grafdd310532010-12-08 12:05:36 +010019enum device_endian {
20 DEVICE_NATIVE_ENDIAN,
21 DEVICE_BIG_ENDIAN,
22 DEVICE_LITTLE_ENDIAN,
23};
24
Paul Brook1ad21342009-05-19 16:17:58 +010025/* address in the RAM (different from a physical address) */
Anthony PERARDf15fbc42011-07-20 08:17:42 +000026#if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
27typedef uint64_t ram_addr_t;
28# define RAM_ADDR_MAX UINT64_MAX
29# define RAM_ADDR_FMT "%" PRIx64
30#else
Anthony Liguoric227f092009-10-01 16:12:16 -050031typedef unsigned long ram_addr_t;
Anthony PERARDf15fbc42011-07-20 08:17:42 +000032# define RAM_ADDR_MAX ULONG_MAX
33# define RAM_ADDR_FMT "%lx"
34#endif
Paul Brook1ad21342009-05-19 16:17:58 +010035
36/* memory API */
37
Anthony Liguoric227f092009-10-01 16:12:16 -050038typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
39typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
Paul Brook1ad21342009-05-19 16:17:58 +010040
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +030041void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
42 ram_addr_t size,
43 ram_addr_t phys_offset,
44 ram_addr_t region_offset,
45 bool log_dirty);
46
47static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
48 ram_addr_t size,
49 ram_addr_t phys_offset,
50 ram_addr_t region_offset)
51{
52 cpu_register_physical_memory_log(start_addr, size, phys_offset,
53 region_offset, false);
54}
55
Anthony Liguoric227f092009-10-01 16:12:16 -050056static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
57 ram_addr_t size,
58 ram_addr_t phys_offset)
Paul Brook1ad21342009-05-19 16:17:58 +010059{
60 cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
61}
62
Anthony Liguoric227f092009-10-01 16:12:16 -050063ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
Cam Macdonell84b89d72010-07-26 18:10:57 -060064ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
65 ram_addr_t size, void *host);
Alex Williamson1724f042010-06-25 11:09:35 -060066ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
Anthony Liguoric227f092009-10-01 16:12:16 -050067void qemu_ram_free(ram_addr_t addr);
Alex Williamson1f2e98b2011-05-03 12:48:09 -060068void qemu_ram_free_from_ptr(ram_addr_t addr);
Huang Yingcd19cfa2011-03-02 08:56:19 +010069void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
Paul Brook1ad21342009-05-19 16:17:58 +010070/* This should only be used for ram local to a device. */
Anthony Liguoric227f092009-10-01 16:12:16 -050071void *qemu_get_ram_ptr(ram_addr_t addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +010072void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +020073/* Same but slower, to use for migration, where the order of
74 * RAMBlocks must not change. */
75void *qemu_safe_ram_ptr(ram_addr_t addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +010076void qemu_put_ram_ptr(void *addr);
Paul Brook1ad21342009-05-19 16:17:58 +010077/* This should not be used by devices. */
Marcelo Tosattie8902612010-10-11 15:31:19 -030078int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
79ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
Paul Brook1ad21342009-05-19 16:17:58 +010080
Blue Swirld60efc62009-08-25 18:29:31 +000081int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
82 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +010083 void *opaque, enum device_endian endian);
Paul Brook1ad21342009-05-19 16:17:58 +010084void cpu_unregister_io_memory(int table_address);
85
Anthony Liguoric227f092009-10-01 16:12:16 -050086void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
Paul Brook1ad21342009-05-19 16:17:58 +010087 int len, int is_write);
Anthony Liguoric227f092009-10-01 16:12:16 -050088static inline void cpu_physical_memory_read(target_phys_addr_t addr,
Stefan Weil3bad9812011-04-10 17:28:56 +020089 void *buf, int len)
Paul Brook1ad21342009-05-19 16:17:58 +010090{
91 cpu_physical_memory_rw(addr, buf, len, 0);
92}
Anthony Liguoric227f092009-10-01 16:12:16 -050093static inline void cpu_physical_memory_write(target_phys_addr_t addr,
Stefan Weil3bad9812011-04-10 17:28:56 +020094 const void *buf, int len)
Paul Brook1ad21342009-05-19 16:17:58 +010095{
Stefan Weil3bad9812011-04-10 17:28:56 +020096 cpu_physical_memory_rw(addr, (void *)buf, len, 1);
Paul Brook1ad21342009-05-19 16:17:58 +010097}
Anthony Liguoric227f092009-10-01 16:12:16 -050098void *cpu_physical_memory_map(target_phys_addr_t addr,
99 target_phys_addr_t *plen,
Paul Brook1ad21342009-05-19 16:17:58 +0100100 int is_write);
Anthony Liguoric227f092009-10-01 16:12:16 -0500101void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
102 int is_write, target_phys_addr_t access_len);
Paul Brook1ad21342009-05-19 16:17:58 +0100103void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
104void cpu_unregister_map_client(void *cookie);
105
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200106struct CPUPhysMemoryClient;
107typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
108struct CPUPhysMemoryClient {
109 void (*set_memory)(struct CPUPhysMemoryClient *client,
110 target_phys_addr_t start_addr,
111 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +0300112 ram_addr_t phys_offset,
113 bool log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200114 int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
115 target_phys_addr_t start_addr,
116 target_phys_addr_t end_addr);
117 int (*migration_log)(struct CPUPhysMemoryClient *client,
118 int enable);
Anthony PERARDe5896b12011-02-07 12:19:23 +0100119 int (*log_start)(struct CPUPhysMemoryClient *client,
120 target_phys_addr_t phys_addr, ram_addr_t size);
121 int (*log_stop)(struct CPUPhysMemoryClient *client,
122 target_phys_addr_t phys_addr, ram_addr_t size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200123 QLIST_ENTRY(CPUPhysMemoryClient) list;
124};
125
126void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
127void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
128
Blue Swirl6842a082010-03-21 19:47:13 +0000129/* Coalesced MMIO regions are areas where write operations can be reordered.
130 * This usually implies that write operations are side-effect free. This allows
131 * batching which can make a major impact on performance when using
132 * virtualization.
133 */
134void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
135
136void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
137
138void qemu_flush_coalesced_mmio_buffer(void);
139
Anthony Liguoric227f092009-10-01 16:12:16 -0500140uint32_t ldub_phys(target_phys_addr_t addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200141uint32_t lduw_le_phys(target_phys_addr_t addr);
142uint32_t lduw_be_phys(target_phys_addr_t addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200143uint32_t ldl_le_phys(target_phys_addr_t addr);
144uint32_t ldl_be_phys(target_phys_addr_t addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200145uint64_t ldq_le_phys(target_phys_addr_t addr);
146uint64_t ldq_be_phys(target_phys_addr_t addr);
Anthony Liguoric227f092009-10-01 16:12:16 -0500147void stb_phys(target_phys_addr_t addr, uint32_t val);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200148void stw_le_phys(target_phys_addr_t addr, uint32_t val);
149void stw_be_phys(target_phys_addr_t addr, uint32_t val);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200150void stl_le_phys(target_phys_addr_t addr, uint32_t val);
151void stl_be_phys(target_phys_addr_t addr, uint32_t val);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200152void stq_le_phys(target_phys_addr_t addr, uint64_t val);
153void stq_be_phys(target_phys_addr_t addr, uint64_t val);
Paul Brook1ad21342009-05-19 16:17:58 +0100154
Blue Swirl21673cd2011-07-14 15:22:20 +0000155#ifdef NEED_CPU_H
156uint32_t lduw_phys(target_phys_addr_t addr);
157uint32_t ldl_phys(target_phys_addr_t addr);
158uint64_t ldq_phys(target_phys_addr_t addr);
159void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
160void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
161void stw_phys(target_phys_addr_t addr, uint32_t val);
162void stl_phys(target_phys_addr_t addr, uint32_t val);
163void stq_phys(target_phys_addr_t addr, uint64_t val);
164#endif
165
Anthony Liguoric227f092009-10-01 16:12:16 -0500166void cpu_physical_memory_write_rom(target_phys_addr_t addr,
Paul Brook1ad21342009-05-19 16:17:58 +0100167 const uint8_t *buf, int len);
168
169#define IO_MEM_SHIFT 3
170
171#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
172#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
173#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
174#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
175
176/* Acts like a ROM when read and like a device when written. */
177#define IO_MEM_ROMD (1)
178#define IO_MEM_SUBPAGE (2)
Paul Brook1ad21342009-05-19 16:17:58 +0100179
Paul Brookb3755a92010-03-12 16:54:58 +0000180#endif
181
Paul Brook1ad21342009-05-19 16:17:58 +0100182#endif /* !CPU_COMMON_H */