bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Software MMU support |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
Blue Swirl | efbf29b | 2011-09-21 20:00:18 +0000 | [diff] [blame] | 4 | * Generate helpers used by TCG for qemu_ld/st ops and code load |
| 5 | * functions. |
| 6 | * |
| 7 | * Included from target op helpers and exec.c. |
| 8 | * |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 9 | * Copyright (c) 2003 Fabrice Bellard |
| 10 | * |
| 11 | * This library is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU Lesser General Public |
| 13 | * License as published by the Free Software Foundation; either |
| 14 | * version 2 of the License, or (at your option) any later version. |
| 15 | * |
| 16 | * This library is distributed in the hope that it will be useful, |
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * Lesser General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 22 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 23 | */ |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 24 | #if DATA_SIZE == 8 |
| 25 | #define SUFFIX q |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 26 | #define LSUFFIX q |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 27 | #define SDATA_TYPE int64_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 28 | #define DATA_TYPE uint64_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 29 | #elif DATA_SIZE == 4 |
| 30 | #define SUFFIX l |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 31 | #define LSUFFIX l |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 32 | #define SDATA_TYPE int32_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 33 | #define DATA_TYPE uint32_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 34 | #elif DATA_SIZE == 2 |
| 35 | #define SUFFIX w |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 36 | #define LSUFFIX uw |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 37 | #define SDATA_TYPE int16_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 38 | #define DATA_TYPE uint16_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 39 | #elif DATA_SIZE == 1 |
| 40 | #define SUFFIX b |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 41 | #define LSUFFIX ub |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 42 | #define SDATA_TYPE int8_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 43 | #define DATA_TYPE uint8_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 44 | #else |
| 45 | #error unsupported data size |
| 46 | #endif |
| 47 | |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 48 | |
| 49 | /* For the benefit of TCG generated code, we want to avoid the complication |
| 50 | of ABI-specific return type promotion and always return a value extended |
| 51 | to the register size of the host. This is tcg_target_long, except in the |
| 52 | case of a 32-bit host and 64-bit data, and for that we always have |
| 53 | uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ |
| 54 | #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 |
| 55 | # define WORD_TYPE DATA_TYPE |
| 56 | # define USUFFIX SUFFIX |
| 57 | #else |
| 58 | # define WORD_TYPE tcg_target_ulong |
| 59 | # define USUFFIX glue(u, SUFFIX) |
| 60 | # define SSUFFIX glue(s, SUFFIX) |
| 61 | #endif |
| 62 | |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 63 | #ifdef SOFTMMU_CODE_ACCESS |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 64 | #define READ_ACCESS_TYPE MMU_INST_FETCH |
bellard | 84b7b8e | 2005-11-28 21:19:04 +0000 | [diff] [blame] | 65 | #define ADDR_READ addr_code |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 66 | #else |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 67 | #define READ_ACCESS_TYPE MMU_DATA_LOAD |
bellard | 84b7b8e | 2005-11-28 21:19:04 +0000 | [diff] [blame] | 68 | #define ADDR_READ addr_read |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 69 | #endif |
| 70 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 71 | #if DATA_SIZE == 8 |
| 72 | # define BSWAP(X) bswap64(X) |
| 73 | #elif DATA_SIZE == 4 |
| 74 | # define BSWAP(X) bswap32(X) |
| 75 | #elif DATA_SIZE == 2 |
| 76 | # define BSWAP(X) bswap16(X) |
| 77 | #else |
| 78 | # define BSWAP(X) (X) |
| 79 | #endif |
| 80 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 81 | #if DATA_SIZE == 1 |
| 82 | # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) |
| 83 | # define helper_be_ld_name helper_le_ld_name |
| 84 | # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) |
| 85 | # define helper_be_lds_name helper_le_lds_name |
| 86 | # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) |
| 87 | # define helper_be_st_name helper_le_st_name |
| 88 | #else |
| 89 | # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) |
| 90 | # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) |
| 91 | # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) |
| 92 | # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) |
| 93 | # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) |
| 94 | # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) |
| 95 | #endif |
| 96 | |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 97 | #ifndef SOFTMMU_CODE_ACCESS |
Blue Swirl | 89c3333 | 2012-09-02 15:28:56 +0000 | [diff] [blame] | 98 | static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 99 | size_t mmu_idx, size_t index, |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 100 | target_ulong addr, |
Blue Swirl | 2050396 | 2012-04-09 14:20:20 +0000 | [diff] [blame] | 101 | uintptr_t retaddr) |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 102 | { |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 103 | CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; |
| 104 | return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE); |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 105 | } |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 106 | #endif |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 107 | |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 108 | WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, |
| 109 | TCGMemOpIdx oi, uintptr_t retaddr) |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 110 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 111 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 112 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 113 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 114 | unsigned a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 115 | uintptr_t haddr; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 116 | DATA_TYPE res; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 117 | |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 118 | if (addr & ((1 << a_bits) - 1)) { |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 119 | cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 120 | mmu_idx, retaddr); |
| 121 | } |
| 122 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 123 | /* If the TLB entry is for a different page, reload and try again. */ |
| 124 | if ((addr & TARGET_PAGE_MASK) |
| 125 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 126 | if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 127 | tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 128 | mmu_idx, retaddr); |
| 129 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 130 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 131 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 132 | |
| 133 | /* Handle an IO access. */ |
| 134 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 135 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 136 | goto do_unaligned_access; |
| 137 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 138 | |
| 139 | /* ??? Note that the io helpers always read data in the target |
| 140 | byte ordering. We should push the LE/BE request down into io. */ |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 141 | res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 142 | res = TGT_LE(res); |
| 143 | return res; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 147 | if (DATA_SIZE > 1 |
| 148 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 149 | >= TARGET_PAGE_SIZE)) { |
| 150 | target_ulong addr1, addr2; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 151 | DATA_TYPE res1, res2; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 152 | unsigned shift; |
| 153 | do_unaligned_access: |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 154 | addr1 = addr & ~(DATA_SIZE - 1); |
| 155 | addr2 = addr1 + DATA_SIZE; |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 156 | res1 = helper_le_ld_name(env, addr1, oi, retaddr); |
| 157 | res2 = helper_le_ld_name(env, addr2, oi, retaddr); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 158 | shift = (addr & (DATA_SIZE - 1)) * 8; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 159 | |
| 160 | /* Little-endian combine. */ |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 161 | res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 162 | return res; |
| 163 | } |
| 164 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 165 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 166 | #if DATA_SIZE == 1 |
| 167 | res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); |
| 168 | #else |
| 169 | res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); |
| 170 | #endif |
| 171 | return res; |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 172 | } |
| 173 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 174 | #if DATA_SIZE > 1 |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 175 | WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, |
| 176 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 177 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 178 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 179 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 180 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 181 | unsigned a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 182 | uintptr_t haddr; |
| 183 | DATA_TYPE res; |
| 184 | |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 185 | if (addr & ((1 << a_bits) - 1)) { |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 186 | cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 187 | mmu_idx, retaddr); |
| 188 | } |
| 189 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 190 | /* If the TLB entry is for a different page, reload and try again. */ |
| 191 | if ((addr & TARGET_PAGE_MASK) |
| 192 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 193 | if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 194 | tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 195 | mmu_idx, retaddr); |
| 196 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 197 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
| 198 | } |
| 199 | |
| 200 | /* Handle an IO access. */ |
| 201 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 202 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 203 | goto do_unaligned_access; |
| 204 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 205 | |
| 206 | /* ??? Note that the io helpers always read data in the target |
| 207 | byte ordering. We should push the LE/BE request down into io. */ |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 208 | res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 209 | res = TGT_BE(res); |
| 210 | return res; |
| 211 | } |
| 212 | |
| 213 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 214 | if (DATA_SIZE > 1 |
| 215 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 216 | >= TARGET_PAGE_SIZE)) { |
| 217 | target_ulong addr1, addr2; |
| 218 | DATA_TYPE res1, res2; |
| 219 | unsigned shift; |
| 220 | do_unaligned_access: |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 221 | addr1 = addr & ~(DATA_SIZE - 1); |
| 222 | addr2 = addr1 + DATA_SIZE; |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 223 | res1 = helper_be_ld_name(env, addr1, oi, retaddr); |
| 224 | res2 = helper_be_ld_name(env, addr2, oi, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 225 | shift = (addr & (DATA_SIZE - 1)) * 8; |
| 226 | |
| 227 | /* Big-endian combine. */ |
| 228 | res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); |
| 229 | return res; |
| 230 | } |
| 231 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 232 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
| 233 | res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); |
| 234 | return res; |
| 235 | } |
| 236 | #endif /* DATA_SIZE > 1 */ |
| 237 | |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 238 | #ifndef SOFTMMU_CODE_ACCESS |
| 239 | |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 240 | /* Provide signed versions of the load routines as well. We can of course |
| 241 | avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ |
| 242 | #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 243 | WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 244 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 245 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 246 | return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr); |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 247 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 248 | |
| 249 | # if DATA_SIZE > 1 |
| 250 | WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 251 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 252 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 253 | return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 254 | } |
| 255 | # endif |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 256 | #endif |
| 257 | |
Blue Swirl | 89c3333 | 2012-09-02 15:28:56 +0000 | [diff] [blame] | 258 | static inline void glue(io_write, SUFFIX)(CPUArchState *env, |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 259 | size_t mmu_idx, size_t index, |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 260 | DATA_TYPE val, |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 261 | target_ulong addr, |
Blue Swirl | 2050396 | 2012-04-09 14:20:20 +0000 | [diff] [blame] | 262 | uintptr_t retaddr) |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 263 | { |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 264 | CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; |
| 265 | return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE); |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 266 | } |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 267 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 268 | void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 269 | TCGMemOpIdx oi, uintptr_t retaddr) |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 270 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 271 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 272 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 273 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 274 | unsigned a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 275 | uintptr_t haddr; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 276 | |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 277 | if (addr & ((1 << a_bits) - 1)) { |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 278 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, |
| 279 | mmu_idx, retaddr); |
| 280 | } |
| 281 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 282 | /* If the TLB entry is for a different page, reload and try again. */ |
| 283 | if ((addr & TARGET_PAGE_MASK) |
| 284 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 285 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 286 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 287 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 288 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 289 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 290 | |
| 291 | /* Handle an IO access. */ |
| 292 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 293 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 294 | goto do_unaligned_access; |
| 295 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 296 | |
| 297 | /* ??? Note that the io helpers always read data in the target |
| 298 | byte ordering. We should push the LE/BE request down into io. */ |
| 299 | val = TGT_LE(val); |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 300 | glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 301 | return; |
| 302 | } |
| 303 | |
| 304 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 305 | if (DATA_SIZE > 1 |
| 306 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 307 | >= TARGET_PAGE_SIZE)) { |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 308 | int i, index2; |
| 309 | target_ulong page2, tlb_addr2; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 310 | do_unaligned_access: |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 311 | /* Ensure the second page is in the TLB. Note that the first page |
| 312 | is already guaranteed to be filled, and that the second page |
| 313 | cannot evict the first. */ |
| 314 | page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; |
| 315 | index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 316 | tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; |
| 317 | if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) |
| 318 | && !VICTIM_TLB_HIT(addr_write, page2)) { |
| 319 | tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, |
| 320 | mmu_idx, retaddr); |
| 321 | } |
| 322 | |
| 323 | /* XXX: not efficient, but simple. */ |
| 324 | /* This loop must go in the forward direction to avoid issues |
| 325 | with self-modifying code in Windows 64-bit. */ |
| 326 | for (i = 0; i < DATA_SIZE; ++i) { |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 327 | /* Little-endian extract. */ |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 328 | uint8_t val8 = val >> (i * 8); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 329 | glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 330 | oi, retaddr); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 331 | } |
| 332 | return; |
| 333 | } |
| 334 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 335 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 336 | #if DATA_SIZE == 1 |
| 337 | glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); |
| 338 | #else |
| 339 | glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); |
| 340 | #endif |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 341 | } |
| 342 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 343 | #if DATA_SIZE > 1 |
| 344 | void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 345 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 346 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 347 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 348 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 349 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 350 | unsigned a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 351 | uintptr_t haddr; |
| 352 | |
Richard Henderson | 85aa808 | 2016-07-14 12:43:06 -0700 | [diff] [blame] | 353 | if (addr & ((1 << a_bits) - 1)) { |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 354 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, |
| 355 | mmu_idx, retaddr); |
| 356 | } |
| 357 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 358 | /* If the TLB entry is for a different page, reload and try again. */ |
| 359 | if ((addr & TARGET_PAGE_MASK) |
| 360 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 361 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 362 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 363 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 364 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
| 365 | } |
| 366 | |
| 367 | /* Handle an IO access. */ |
| 368 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 369 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 370 | goto do_unaligned_access; |
| 371 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 372 | |
| 373 | /* ??? Note that the io helpers always read data in the target |
| 374 | byte ordering. We should push the LE/BE request down into io. */ |
| 375 | val = TGT_BE(val); |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 376 | glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 377 | return; |
| 378 | } |
| 379 | |
| 380 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 381 | if (DATA_SIZE > 1 |
| 382 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 383 | >= TARGET_PAGE_SIZE)) { |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 384 | int i, index2; |
| 385 | target_ulong page2, tlb_addr2; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 386 | do_unaligned_access: |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 387 | /* Ensure the second page is in the TLB. Note that the first page |
| 388 | is already guaranteed to be filled, and that the second page |
| 389 | cannot evict the first. */ |
| 390 | page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; |
| 391 | index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 392 | tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; |
| 393 | if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) |
| 394 | && !VICTIM_TLB_HIT(addr_write, page2)) { |
| 395 | tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, |
| 396 | mmu_idx, retaddr); |
| 397 | } |
| 398 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 399 | /* XXX: not efficient, but simple */ |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 400 | /* This loop must go in the forward direction to avoid issues |
| 401 | with self-modifying code. */ |
| 402 | for (i = 0; i < DATA_SIZE; ++i) { |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 403 | /* Big-endian extract. */ |
| 404 | uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 405 | glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 406 | oi, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 407 | } |
| 408 | return; |
| 409 | } |
| 410 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 411 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
| 412 | glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); |
| 413 | } |
| 414 | #endif /* DATA_SIZE > 1 */ |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 415 | #endif /* !defined(SOFTMMU_CODE_ACCESS) */ |
| 416 | |
| 417 | #undef READ_ACCESS_TYPE |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 418 | #undef DATA_TYPE |
| 419 | #undef SUFFIX |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 420 | #undef LSUFFIX |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 421 | #undef DATA_SIZE |
bellard | 84b7b8e | 2005-11-28 21:19:04 +0000 | [diff] [blame] | 422 | #undef ADDR_READ |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 423 | #undef WORD_TYPE |
| 424 | #undef SDATA_TYPE |
| 425 | #undef USUFFIX |
| 426 | #undef SSUFFIX |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 427 | #undef BSWAP |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 428 | #undef helper_le_ld_name |
| 429 | #undef helper_be_ld_name |
| 430 | #undef helper_le_lds_name |
| 431 | #undef helper_be_lds_name |
| 432 | #undef helper_le_st_name |
| 433 | #undef helper_be_st_name |