| /* |
| * defines common to all virtual CPUs |
| * |
| * Copyright (c) 2003 Fabrice Bellard |
| * |
| * This library is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU Lesser General Public |
| * License as published by the Free Software Foundation; either |
| * version 2 of the License, or (at your option) any later version. |
| * |
| * This library is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * Lesser General Public License for more details. |
| * |
| * You should have received a copy of the GNU Lesser General Public |
| * License along with this library; if not, write to the Free Software |
| * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| */ |
| #ifndef CPU_ALL_H |
| #define CPU_ALL_H |
| |
| /* all CPU memory access use these macros */ |
| static inline int ldub(void *ptr) |
| { |
| return *(uint8_t *)ptr; |
| } |
| |
| static inline int ldsb(void *ptr) |
| { |
| return *(int8_t *)ptr; |
| } |
| |
| static inline void stb(void *ptr, int v) |
| { |
| *(uint8_t *)ptr = v; |
| } |
| |
| /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the |
| kernel handles unaligned load/stores may give better results, but |
| it is a system wide setting : bad */ |
| #if defined(WORDS_BIGENDIAN) || defined(__arm__) |
| |
| /* conservative code for little endian unaligned accesses */ |
| static inline int lduw(void *ptr) |
| { |
| #ifdef __powerpc__ |
| int val; |
| __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
| return val; |
| #else |
| uint8_t *p = ptr; |
| return p[0] | (p[1] << 8); |
| #endif |
| } |
| |
| static inline int ldsw(void *ptr) |
| { |
| #ifdef __powerpc__ |
| int val; |
| __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
| return (int16_t)val; |
| #else |
| uint8_t *p = ptr; |
| return (int16_t)(p[0] | (p[1] << 8)); |
| #endif |
| } |
| |
| static inline int ldl(void *ptr) |
| { |
| #ifdef __powerpc__ |
| int val; |
| __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
| return val; |
| #else |
| uint8_t *p = ptr; |
| return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); |
| #endif |
| } |
| |
| static inline uint64_t ldq(void *ptr) |
| { |
| uint8_t *p = ptr; |
| uint32_t v1, v2; |
| v1 = ldl(p); |
| v2 = ldl(p + 4); |
| return v1 | ((uint64_t)v2 << 32); |
| } |
| |
| static inline void stw(void *ptr, int v) |
| { |
| #ifdef __powerpc__ |
| __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); |
| #else |
| uint8_t *p = ptr; |
| p[0] = v; |
| p[1] = v >> 8; |
| #endif |
| } |
| |
| static inline void stl(void *ptr, int v) |
| { |
| #ifdef __powerpc__ |
| __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); |
| #else |
| uint8_t *p = ptr; |
| p[0] = v; |
| p[1] = v >> 8; |
| p[2] = v >> 16; |
| p[3] = v >> 24; |
| #endif |
| } |
| |
| static inline void stq(void *ptr, uint64_t v) |
| { |
| uint8_t *p = ptr; |
| stl(p, (uint32_t)v); |
| stl(p + 4, v >> 32); |
| } |
| |
| /* float access */ |
| |
| static inline float ldfl(void *ptr) |
| { |
| union { |
| float f; |
| uint32_t i; |
| } u; |
| u.i = ldl(ptr); |
| return u.f; |
| } |
| |
| static inline void stfl(void *ptr, float v) |
| { |
| union { |
| float f; |
| uint32_t i; |
| } u; |
| u.f = v; |
| stl(ptr, u.i); |
| } |
| |
| #if defined(__arm__) && !defined(WORDS_BIGENDIAN) |
| |
| /* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */ |
| static inline double ldfq(void *ptr) |
| { |
| union { |
| double d; |
| uint32_t tab[2]; |
| } u; |
| u.tab[1] = ldl(ptr); |
| u.tab[0] = ldl(ptr + 4); |
| return u.d; |
| } |
| |
| static inline void stfq(void *ptr, double v) |
| { |
| union { |
| double d; |
| uint32_t tab[2]; |
| } u; |
| u.d = v; |
| stl(ptr, u.tab[1]); |
| stl(ptr + 4, u.tab[0]); |
| } |
| |
| #else |
| static inline double ldfq(void *ptr) |
| { |
| union { |
| double d; |
| uint64_t i; |
| } u; |
| u.i = ldq(ptr); |
| return u.d; |
| } |
| |
| static inline void stfq(void *ptr, double v) |
| { |
| union { |
| double d; |
| uint64_t i; |
| } u; |
| u.d = v; |
| stq(ptr, u.i); |
| } |
| #endif |
| |
| #else |
| |
| static inline int lduw(void *ptr) |
| { |
| return *(uint16_t *)ptr; |
| } |
| |
| static inline int ldsw(void *ptr) |
| { |
| return *(int16_t *)ptr; |
| } |
| |
| static inline int ldl(void *ptr) |
| { |
| return *(uint32_t *)ptr; |
| } |
| |
| static inline uint64_t ldq(void *ptr) |
| { |
| return *(uint64_t *)ptr; |
| } |
| |
| static inline void stw(void *ptr, int v) |
| { |
| *(uint16_t *)ptr = v; |
| } |
| |
| static inline void stl(void *ptr, int v) |
| { |
| *(uint32_t *)ptr = v; |
| } |
| |
| static inline void stq(void *ptr, uint64_t v) |
| { |
| *(uint64_t *)ptr = v; |
| } |
| |
| /* float access */ |
| |
| static inline float ldfl(void *ptr) |
| { |
| return *(float *)ptr; |
| } |
| |
| static inline double ldfq(void *ptr) |
| { |
| return *(double *)ptr; |
| } |
| |
| static inline void stfl(void *ptr, float v) |
| { |
| *(float *)ptr = v; |
| } |
| |
| static inline void stfq(void *ptr, double v) |
| { |
| *(double *)ptr = v; |
| } |
| #endif |
| |
| /* page related stuff */ |
| |
| #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
| #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
| #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) |
| |
| extern unsigned long real_host_page_size; |
| extern unsigned long host_page_bits; |
| extern unsigned long host_page_size; |
| extern unsigned long host_page_mask; |
| |
| #define HOST_PAGE_ALIGN(addr) (((addr) + host_page_size - 1) & host_page_mask) |
| |
| /* same as PROT_xxx */ |
| #define PAGE_READ 0x0001 |
| #define PAGE_WRITE 0x0002 |
| #define PAGE_EXEC 0x0004 |
| #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) |
| #define PAGE_VALID 0x0008 |
| /* original state of the write flag (used when tracking self-modifying |
| code */ |
| #define PAGE_WRITE_ORG 0x0010 |
| |
| void page_dump(FILE *f); |
| int page_get_flags(unsigned long address); |
| void page_set_flags(unsigned long start, unsigned long end, int flags); |
| void page_unprotect_range(uint8_t *data, unsigned long data_size); |
| |
| #define SINGLE_CPU_DEFINES |
| #ifdef SINGLE_CPU_DEFINES |
| |
| #if defined(TARGET_I386) |
| |
| #define CPUState CPUX86State |
| #define cpu_init cpu_x86_init |
| #define cpu_exec cpu_x86_exec |
| #define cpu_gen_code cpu_x86_gen_code |
| #define cpu_interrupt cpu_x86_interrupt |
| #define cpu_signal_handler cpu_x86_signal_handler |
| |
| #elif defined(TARGET_ARM) |
| |
| #define CPUState CPUARMState |
| #define cpu_init cpu_arm_init |
| #define cpu_exec cpu_arm_exec |
| #define cpu_gen_code cpu_arm_gen_code |
| #define cpu_interrupt cpu_arm_interrupt |
| #define cpu_signal_handler cpu_arm_signal_handler |
| |
| #else |
| |
| #error unsupported target CPU |
| |
| #endif |
| |
| #endif /* SINGLE_CPU_DEFINES */ |
| |
| #define DEFAULT_GDBSTUB_PORT 1234 |
| |
| void cpu_abort(CPUState *env, const char *fmt, ...); |
| extern CPUState *cpu_single_env; |
| |
| #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ |
| #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ |
| void cpu_interrupt(CPUState *s, int mask); |
| |
| int cpu_breakpoint_insert(CPUState *env, uint32_t pc); |
| int cpu_breakpoint_remove(CPUState *env, uint32_t pc); |
| |
| /* gdb stub API */ |
| extern int gdbstub_fd; |
| CPUState *cpu_gdbstub_get_env(void *opaque); |
| int cpu_gdbstub(void *opaque, int (*main_loop)(void *opaque), int port); |
| |
| #endif /* CPU_ALL_H */ |