bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Software MMU support |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
Blue Swirl | efbf29b | 2011-09-21 20:00:18 +0000 | [diff] [blame] | 4 | * Generate helpers used by TCG for qemu_ld/st ops and code load |
| 5 | * functions. |
| 6 | * |
| 7 | * Included from target op helpers and exec.c. |
| 8 | * |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 9 | * Copyright (c) 2003 Fabrice Bellard |
| 10 | * |
| 11 | * This library is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU Lesser General Public |
| 13 | * License as published by the Free Software Foundation; either |
| 14 | * version 2 of the License, or (at your option) any later version. |
| 15 | * |
| 16 | * This library is distributed in the hope that it will be useful, |
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * Lesser General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 22 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 23 | */ |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 24 | #include "qemu/timer.h" |
Edgar E. Iglesias | 7771709 | 2013-11-07 19:55:56 +0100 | [diff] [blame] | 25 | #include "exec/address-spaces.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 26 | #include "exec/memory.h" |
Blue Swirl | 29e922b | 2010-03-29 19:24:00 +0000 | [diff] [blame] | 27 | |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 28 | #define DATA_SIZE (1 << SHIFT) |
| 29 | |
| 30 | #if DATA_SIZE == 8 |
| 31 | #define SUFFIX q |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 32 | #define LSUFFIX q |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 33 | #define SDATA_TYPE int64_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 34 | #define DATA_TYPE uint64_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 35 | #elif DATA_SIZE == 4 |
| 36 | #define SUFFIX l |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 37 | #define LSUFFIX l |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 38 | #define SDATA_TYPE int32_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 39 | #define DATA_TYPE uint32_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 40 | #elif DATA_SIZE == 2 |
| 41 | #define SUFFIX w |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 42 | #define LSUFFIX uw |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 43 | #define SDATA_TYPE int16_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 44 | #define DATA_TYPE uint16_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 45 | #elif DATA_SIZE == 1 |
| 46 | #define SUFFIX b |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 47 | #define LSUFFIX ub |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 48 | #define SDATA_TYPE int8_t |
Martin Husemann | dc9a353 | 2014-01-18 14:47:23 +0100 | [diff] [blame] | 49 | #define DATA_TYPE uint8_t |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 50 | #else |
| 51 | #error unsupported data size |
| 52 | #endif |
| 53 | |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 54 | |
| 55 | /* For the benefit of TCG generated code, we want to avoid the complication |
| 56 | of ABI-specific return type promotion and always return a value extended |
| 57 | to the register size of the host. This is tcg_target_long, except in the |
| 58 | case of a 32-bit host and 64-bit data, and for that we always have |
| 59 | uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ |
| 60 | #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 |
| 61 | # define WORD_TYPE DATA_TYPE |
| 62 | # define USUFFIX SUFFIX |
| 63 | #else |
| 64 | # define WORD_TYPE tcg_target_ulong |
| 65 | # define USUFFIX glue(u, SUFFIX) |
| 66 | # define SSUFFIX glue(s, SUFFIX) |
| 67 | #endif |
| 68 | |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 69 | #ifdef SOFTMMU_CODE_ACCESS |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 70 | #define READ_ACCESS_TYPE MMU_INST_FETCH |
bellard | 84b7b8e | 2005-11-28 21:19:04 +0000 | [diff] [blame] | 71 | #define ADDR_READ addr_code |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 72 | #else |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 73 | #define READ_ACCESS_TYPE MMU_DATA_LOAD |
bellard | 84b7b8e | 2005-11-28 21:19:04 +0000 | [diff] [blame] | 74 | #define ADDR_READ addr_read |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 75 | #endif |
| 76 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 77 | #if DATA_SIZE == 8 |
| 78 | # define BSWAP(X) bswap64(X) |
| 79 | #elif DATA_SIZE == 4 |
| 80 | # define BSWAP(X) bswap32(X) |
| 81 | #elif DATA_SIZE == 2 |
| 82 | # define BSWAP(X) bswap16(X) |
| 83 | #else |
| 84 | # define BSWAP(X) (X) |
| 85 | #endif |
| 86 | |
| 87 | #ifdef TARGET_WORDS_BIGENDIAN |
| 88 | # define TGT_BE(X) (X) |
| 89 | # define TGT_LE(X) BSWAP(X) |
| 90 | #else |
| 91 | # define TGT_BE(X) BSWAP(X) |
| 92 | # define TGT_LE(X) (X) |
| 93 | #endif |
| 94 | |
| 95 | #if DATA_SIZE == 1 |
| 96 | # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) |
| 97 | # define helper_be_ld_name helper_le_ld_name |
| 98 | # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) |
| 99 | # define helper_be_lds_name helper_le_lds_name |
| 100 | # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) |
| 101 | # define helper_be_st_name helper_le_st_name |
| 102 | #else |
| 103 | # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) |
| 104 | # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) |
| 105 | # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) |
| 106 | # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) |
| 107 | # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) |
| 108 | # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) |
| 109 | #endif |
| 110 | |
| 111 | #ifdef TARGET_WORDS_BIGENDIAN |
| 112 | # define helper_te_ld_name helper_be_ld_name |
| 113 | # define helper_te_st_name helper_be_st_name |
| 114 | #else |
| 115 | # define helper_te_ld_name helper_le_ld_name |
| 116 | # define helper_te_st_name helper_le_st_name |
| 117 | #endif |
| 118 | |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 119 | #ifndef SOFTMMU_CODE_ACCESS |
Blue Swirl | 89c3333 | 2012-09-02 15:28:56 +0000 | [diff] [blame] | 120 | static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 121 | CPUIOTLBEntry *iotlbentry, |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 122 | target_ulong addr, |
Blue Swirl | 2050396 | 2012-04-09 14:20:20 +0000 | [diff] [blame] | 123 | uintptr_t retaddr) |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 124 | { |
Paolo Bonzini | 791af8c | 2013-05-24 16:10:39 +0200 | [diff] [blame] | 125 | uint64_t val; |
Edgar E. Iglesias | 09daed8 | 2013-12-17 13:06:51 +1000 | [diff] [blame] | 126 | CPUState *cpu = ENV_GET_CPU(env); |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 127 | hwaddr physaddr = iotlbentry->addr; |
Peter Maydell | a54c87b | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 128 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); |
Avi Kivity | 37ec01d | 2012-03-08 18:08:35 +0200 | [diff] [blame] | 129 | |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 130 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
Andreas Färber | 93afead | 2013-08-26 03:41:01 +0200 | [diff] [blame] | 131 | cpu->mem_io_pc = retaddr; |
Paolo Bonzini | 414b15c | 2015-06-24 14:16:26 +0200 | [diff] [blame] | 132 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
Andreas Färber | 90b40a6 | 2013-09-01 17:21:47 +0200 | [diff] [blame] | 133 | cpu_io_recompile(cpu, retaddr); |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 134 | } |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 135 | |
Andreas Färber | 93afead | 2013-08-26 03:41:01 +0200 | [diff] [blame] | 136 | cpu->mem_io_vaddr = addr; |
Peter Maydell | 3b64349 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 137 | memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT, |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 138 | iotlbentry->attrs); |
Paolo Bonzini | 791af8c | 2013-05-24 16:10:39 +0200 | [diff] [blame] | 139 | return val; |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 140 | } |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 141 | #endif |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 142 | |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 143 | WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, |
| 144 | TCGMemOpIdx oi, uintptr_t retaddr) |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 145 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 146 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 147 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 148 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 149 | int a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 150 | uintptr_t haddr; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 151 | DATA_TYPE res; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 152 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 153 | /* Adjust the given return address. */ |
| 154 | retaddr -= GETPC_ADJ; |
| 155 | |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 156 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
| 157 | cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 158 | mmu_idx, retaddr); |
| 159 | } |
| 160 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 161 | /* If the TLB entry is for a different page, reload and try again. */ |
| 162 | if ((addr & TARGET_PAGE_MASK) |
| 163 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 164 | if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 165 | tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 166 | mmu_idx, retaddr); |
| 167 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 168 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 169 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 170 | |
| 171 | /* Handle an IO access. */ |
| 172 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 173 | CPUIOTLBEntry *iotlbentry; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 174 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 175 | goto do_unaligned_access; |
| 176 | } |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 177 | iotlbentry = &env->iotlb[mmu_idx][index]; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 178 | |
| 179 | /* ??? Note that the io helpers always read data in the target |
| 180 | byte ordering. We should push the LE/BE request down into io. */ |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 181 | res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 182 | res = TGT_LE(res); |
| 183 | return res; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 187 | if (DATA_SIZE > 1 |
| 188 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 189 | >= TARGET_PAGE_SIZE)) { |
| 190 | target_ulong addr1, addr2; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 191 | DATA_TYPE res1, res2; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 192 | unsigned shift; |
| 193 | do_unaligned_access: |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 194 | addr1 = addr & ~(DATA_SIZE - 1); |
| 195 | addr2 = addr1 + DATA_SIZE; |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 196 | /* Note the adjustment at the beginning of the function. |
| 197 | Undo that for the recursion. */ |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 198 | res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); |
| 199 | res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 200 | shift = (addr & (DATA_SIZE - 1)) * 8; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 201 | |
| 202 | /* Little-endian combine. */ |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 203 | res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 204 | return res; |
| 205 | } |
| 206 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 207 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 208 | #if DATA_SIZE == 1 |
| 209 | res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); |
| 210 | #else |
| 211 | res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); |
| 212 | #endif |
| 213 | return res; |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 214 | } |
| 215 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 216 | #if DATA_SIZE > 1 |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 217 | WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, |
| 218 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 219 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 220 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 221 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 222 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 223 | int a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 224 | uintptr_t haddr; |
| 225 | DATA_TYPE res; |
| 226 | |
| 227 | /* Adjust the given return address. */ |
| 228 | retaddr -= GETPC_ADJ; |
| 229 | |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 230 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
| 231 | cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 232 | mmu_idx, retaddr); |
| 233 | } |
| 234 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 235 | /* If the TLB entry is for a different page, reload and try again. */ |
| 236 | if ((addr & TARGET_PAGE_MASK) |
| 237 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 238 | if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 239 | tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
| 240 | mmu_idx, retaddr); |
| 241 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 242 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
| 243 | } |
| 244 | |
| 245 | /* Handle an IO access. */ |
| 246 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 247 | CPUIOTLBEntry *iotlbentry; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 248 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 249 | goto do_unaligned_access; |
| 250 | } |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 251 | iotlbentry = &env->iotlb[mmu_idx][index]; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 252 | |
| 253 | /* ??? Note that the io helpers always read data in the target |
| 254 | byte ordering. We should push the LE/BE request down into io. */ |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 255 | res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 256 | res = TGT_BE(res); |
| 257 | return res; |
| 258 | } |
| 259 | |
| 260 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 261 | if (DATA_SIZE > 1 |
| 262 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 263 | >= TARGET_PAGE_SIZE)) { |
| 264 | target_ulong addr1, addr2; |
| 265 | DATA_TYPE res1, res2; |
| 266 | unsigned shift; |
| 267 | do_unaligned_access: |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 268 | addr1 = addr & ~(DATA_SIZE - 1); |
| 269 | addr2 = addr1 + DATA_SIZE; |
| 270 | /* Note the adjustment at the beginning of the function. |
| 271 | Undo that for the recursion. */ |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 272 | res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); |
| 273 | res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 274 | shift = (addr & (DATA_SIZE - 1)) * 8; |
| 275 | |
| 276 | /* Big-endian combine. */ |
| 277 | res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); |
| 278 | return res; |
| 279 | } |
| 280 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 281 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
| 282 | res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); |
| 283 | return res; |
| 284 | } |
| 285 | #endif /* DATA_SIZE > 1 */ |
| 286 | |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 287 | #ifndef SOFTMMU_CODE_ACCESS |
| 288 | |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 289 | /* Provide signed versions of the load routines as well. We can of course |
| 290 | avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ |
| 291 | #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 292 | WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 293 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 294 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 295 | return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr); |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 296 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 297 | |
| 298 | # if DATA_SIZE > 1 |
| 299 | WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 300 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 301 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 302 | return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 303 | } |
| 304 | # endif |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 305 | #endif |
| 306 | |
Blue Swirl | 89c3333 | 2012-09-02 15:28:56 +0000 | [diff] [blame] | 307 | static inline void glue(io_write, SUFFIX)(CPUArchState *env, |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 308 | CPUIOTLBEntry *iotlbentry, |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 309 | DATA_TYPE val, |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 310 | target_ulong addr, |
Blue Swirl | 2050396 | 2012-04-09 14:20:20 +0000 | [diff] [blame] | 311 | uintptr_t retaddr) |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 312 | { |
Edgar E. Iglesias | 09daed8 | 2013-12-17 13:06:51 +1000 | [diff] [blame] | 313 | CPUState *cpu = ENV_GET_CPU(env); |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 314 | hwaddr physaddr = iotlbentry->addr; |
Peter Maydell | a54c87b | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 315 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); |
Avi Kivity | 37ec01d | 2012-03-08 18:08:35 +0200 | [diff] [blame] | 316 | |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 317 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
Paolo Bonzini | 414b15c | 2015-06-24 14:16:26 +0200 | [diff] [blame] | 318 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
Andreas Färber | 90b40a6 | 2013-09-01 17:21:47 +0200 | [diff] [blame] | 319 | cpu_io_recompile(cpu, retaddr); |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 320 | } |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 321 | |
Andreas Färber | 93afead | 2013-08-26 03:41:01 +0200 | [diff] [blame] | 322 | cpu->mem_io_vaddr = addr; |
| 323 | cpu->mem_io_pc = retaddr; |
Peter Maydell | 3b64349 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 324 | memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT, |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 325 | iotlbentry->attrs); |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 326 | } |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 327 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 328 | void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 329 | TCGMemOpIdx oi, uintptr_t retaddr) |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 330 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 331 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 332 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 333 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 334 | int a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 335 | uintptr_t haddr; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 336 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 337 | /* Adjust the given return address. */ |
| 338 | retaddr -= GETPC_ADJ; |
| 339 | |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 340 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
| 341 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, |
| 342 | mmu_idx, retaddr); |
| 343 | } |
| 344 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 345 | /* If the TLB entry is for a different page, reload and try again. */ |
| 346 | if ((addr & TARGET_PAGE_MASK) |
| 347 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 348 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 349 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 350 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 351 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 352 | } |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 353 | |
| 354 | /* Handle an IO access. */ |
| 355 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 356 | CPUIOTLBEntry *iotlbentry; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 357 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 358 | goto do_unaligned_access; |
| 359 | } |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 360 | iotlbentry = &env->iotlb[mmu_idx][index]; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 361 | |
| 362 | /* ??? Note that the io helpers always read data in the target |
| 363 | byte ordering. We should push the LE/BE request down into io. */ |
| 364 | val = TGT_LE(val); |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 365 | glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 366 | return; |
| 367 | } |
| 368 | |
| 369 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 370 | if (DATA_SIZE > 1 |
| 371 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 372 | >= TARGET_PAGE_SIZE)) { |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 373 | int i, index2; |
| 374 | target_ulong page2, tlb_addr2; |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 375 | do_unaligned_access: |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 376 | /* Ensure the second page is in the TLB. Note that the first page |
| 377 | is already guaranteed to be filled, and that the second page |
| 378 | cannot evict the first. */ |
| 379 | page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; |
| 380 | index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 381 | tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; |
| 382 | if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) |
| 383 | && !VICTIM_TLB_HIT(addr_write, page2)) { |
| 384 | tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, |
| 385 | mmu_idx, retaddr); |
| 386 | } |
| 387 | |
| 388 | /* XXX: not efficient, but simple. */ |
| 389 | /* This loop must go in the forward direction to avoid issues |
| 390 | with self-modifying code in Windows 64-bit. */ |
| 391 | for (i = 0; i < DATA_SIZE; ++i) { |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 392 | /* Little-endian extract. */ |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 393 | uint8_t val8 = val >> (i * 8); |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 394 | /* Note the adjustment at the beginning of the function. |
| 395 | Undo that for the recursion. */ |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 396 | glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 397 | oi, retaddr + GETPC_ADJ); |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 398 | } |
| 399 | return; |
| 400 | } |
| 401 | |
Richard Henderson | aac1fb0 | 2013-07-26 08:29:15 -1000 | [diff] [blame] | 402 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 403 | #if DATA_SIZE == 1 |
| 404 | glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); |
| 405 | #else |
| 406 | glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); |
| 407 | #endif |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 408 | } |
| 409 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 410 | #if DATA_SIZE > 1 |
| 411 | void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 412 | TCGMemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 413 | { |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 414 | unsigned mmu_idx = get_mmuidx(oi); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 415 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 416 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 417 | int a_bits = get_alignment_bits(get_memop(oi)); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 418 | uintptr_t haddr; |
| 419 | |
| 420 | /* Adjust the given return address. */ |
| 421 | retaddr -= GETPC_ADJ; |
| 422 | |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 423 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
| 424 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, |
| 425 | mmu_idx, retaddr); |
| 426 | } |
| 427 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 428 | /* If the TLB entry is for a different page, reload and try again. */ |
| 429 | if ((addr & TARGET_PAGE_MASK) |
| 430 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 431 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
Leon Alrae | 55e9409 | 2014-07-07 11:23:56 +0100 | [diff] [blame] | 432 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 433 | } |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 434 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
| 435 | } |
| 436 | |
| 437 | /* Handle an IO access. */ |
| 438 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 439 | CPUIOTLBEntry *iotlbentry; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 440 | if ((addr & (DATA_SIZE - 1)) != 0) { |
| 441 | goto do_unaligned_access; |
| 442 | } |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 443 | iotlbentry = &env->iotlb[mmu_idx][index]; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 444 | |
| 445 | /* ??? Note that the io helpers always read data in the target |
| 446 | byte ordering. We should push the LE/BE request down into io. */ |
| 447 | val = TGT_BE(val); |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 448 | glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 449 | return; |
| 450 | } |
| 451 | |
| 452 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 453 | if (DATA_SIZE > 1 |
| 454 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 |
| 455 | >= TARGET_PAGE_SIZE)) { |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 456 | int i, index2; |
| 457 | target_ulong page2, tlb_addr2; |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 458 | do_unaligned_access: |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 459 | /* Ensure the second page is in the TLB. Note that the first page |
| 460 | is already guaranteed to be filled, and that the second page |
| 461 | cannot evict the first. */ |
| 462 | page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; |
| 463 | index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 464 | tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; |
| 465 | if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) |
| 466 | && !VICTIM_TLB_HIT(addr_write, page2)) { |
| 467 | tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, |
| 468 | mmu_idx, retaddr); |
| 469 | } |
| 470 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 471 | /* XXX: not efficient, but simple */ |
Samuel Damashek | 81daaba | 2016-07-08 12:54:34 -0700 | [diff] [blame] | 472 | /* This loop must go in the forward direction to avoid issues |
| 473 | with self-modifying code. */ |
| 474 | for (i = 0; i < DATA_SIZE; ++i) { |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 475 | /* Big-endian extract. */ |
| 476 | uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); |
| 477 | /* Note the adjustment at the beginning of the function. |
| 478 | Undo that for the recursion. */ |
| 479 | glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, |
Richard Henderson | 3972ef6 | 2015-05-13 09:10:33 -0700 | [diff] [blame] | 480 | oi, retaddr + GETPC_ADJ); |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 481 | } |
| 482 | return; |
| 483 | } |
| 484 | |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 485 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
| 486 | glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); |
| 487 | } |
| 488 | #endif /* DATA_SIZE > 1 */ |
| 489 | |
Yongbok Kim | 3b4afc9 | 2015-06-01 12:13:23 +0100 | [diff] [blame] | 490 | #if DATA_SIZE == 1 |
| 491 | /* Probe for whether the specified guest write access is permitted. |
| 492 | * If it is not permitted then an exception will be taken in the same |
| 493 | * way as if this were a real write access (and we will not return). |
| 494 | * Otherwise the function will return, and there will be a valid |
| 495 | * entry in the TLB for this access. |
| 496 | */ |
| 497 | void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, |
| 498 | uintptr_t retaddr) |
| 499 | { |
| 500 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 501 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
| 502 | |
| 503 | if ((addr & TARGET_PAGE_MASK) |
| 504 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
| 505 | /* TLB entry is for a different page */ |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 506 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
Yongbok Kim | 3b4afc9 | 2015-06-01 12:13:23 +0100 | [diff] [blame] | 507 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
| 508 | } |
| 509 | } |
| 510 | } |
| 511 | #endif |
bellard | b769d8f | 2004-10-03 15:07:13 +0000 | [diff] [blame] | 512 | #endif /* !defined(SOFTMMU_CODE_ACCESS) */ |
| 513 | |
| 514 | #undef READ_ACCESS_TYPE |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 515 | #undef SHIFT |
| 516 | #undef DATA_TYPE |
| 517 | #undef SUFFIX |
Richard Henderson | 701e3a5 | 2013-08-27 11:31:48 -0700 | [diff] [blame] | 518 | #undef LSUFFIX |
bellard | b92e5a2 | 2003-08-08 23:58:05 +0000 | [diff] [blame] | 519 | #undef DATA_SIZE |
bellard | 84b7b8e | 2005-11-28 21:19:04 +0000 | [diff] [blame] | 520 | #undef ADDR_READ |
Richard Henderson | c8f94df | 2013-08-27 14:09:14 -0700 | [diff] [blame] | 521 | #undef WORD_TYPE |
| 522 | #undef SDATA_TYPE |
| 523 | #undef USUFFIX |
| 524 | #undef SSUFFIX |
Richard Henderson | 867b320 | 2013-09-04 11:45:20 -0700 | [diff] [blame] | 525 | #undef BSWAP |
| 526 | #undef TGT_BE |
| 527 | #undef TGT_LE |
| 528 | #undef CPU_BE |
| 529 | #undef CPU_LE |
| 530 | #undef helper_le_ld_name |
| 531 | #undef helper_be_ld_name |
| 532 | #undef helper_le_lds_name |
| 533 | #undef helper_be_lds_name |
| 534 | #undef helper_le_st_name |
| 535 | #undef helper_be_st_name |
| 536 | #undef helper_te_ld_name |
| 537 | #undef helper_te_st_name |