Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Common CPU TLB handling |
| 3 | * |
| 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "config.h" |
| 21 | #include "cpu.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 22 | #include "exec/exec-all.h" |
| 23 | #include "exec/memory.h" |
| 24 | #include "exec/address-spaces.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 25 | |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 26 | #include "exec/cputlb.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 27 | |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 28 | #include "exec/memory-internal.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 29 | |
| 30 | //#define DEBUG_TLB |
| 31 | //#define DEBUG_TLB_CHECK |
| 32 | |
| 33 | /* statistics */ |
| 34 | int tlb_flush_count; |
| 35 | |
| 36 | static const CPUTLBEntry s_cputlb_empty_entry = { |
| 37 | .addr_read = -1, |
| 38 | .addr_write = -1, |
| 39 | .addr_code = -1, |
| 40 | .addend = -1, |
| 41 | }; |
| 42 | |
| 43 | /* NOTE: |
| 44 | * If flush_global is true (the usual case), flush all tlb entries. |
| 45 | * If flush_global is false, flush (at least) all tlb entries not |
| 46 | * marked global. |
| 47 | * |
| 48 | * Since QEMU doesn't currently implement a global/not-global flag |
| 49 | * for tlb entries, at the moment tlb_flush() will also flush all |
| 50 | * tlb entries in the flush_global == false case. This is OK because |
| 51 | * CPU architectures generally permit an implementation to drop |
| 52 | * entries from the TLB at any time, so flushing more entries than |
| 53 | * required is only an efficiency issue, not a correctness issue. |
| 54 | */ |
| 55 | void tlb_flush(CPUArchState *env, int flush_global) |
| 56 | { |
Andreas Färber | d77953b | 2013-01-16 19:29:31 +0100 | [diff] [blame] | 57 | CPUState *cpu = ENV_GET_CPU(env); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 58 | int i; |
| 59 | |
| 60 | #if defined(DEBUG_TLB) |
| 61 | printf("tlb_flush:\n"); |
| 62 | #endif |
| 63 | /* must reset current TB so that interrupts cannot modify the |
| 64 | links while we are modifying them */ |
Andreas Färber | d77953b | 2013-01-16 19:29:31 +0100 | [diff] [blame] | 65 | cpu->current_tb = NULL; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 66 | |
| 67 | for (i = 0; i < CPU_TLB_SIZE; i++) { |
| 68 | int mmu_idx; |
| 69 | |
| 70 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 71 | env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
| 76 | |
| 77 | env->tlb_flush_addr = -1; |
| 78 | env->tlb_flush_mask = 0; |
| 79 | tlb_flush_count++; |
| 80 | } |
| 81 | |
| 82 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
| 83 | { |
| 84 | if (addr == (tlb_entry->addr_read & |
| 85 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
| 86 | addr == (tlb_entry->addr_write & |
| 87 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
| 88 | addr == (tlb_entry->addr_code & |
| 89 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
| 90 | *tlb_entry = s_cputlb_empty_entry; |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | void tlb_flush_page(CPUArchState *env, target_ulong addr) |
| 95 | { |
Andreas Färber | d77953b | 2013-01-16 19:29:31 +0100 | [diff] [blame] | 96 | CPUState *cpu = ENV_GET_CPU(env); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 97 | int i; |
| 98 | int mmu_idx; |
| 99 | |
| 100 | #if defined(DEBUG_TLB) |
| 101 | printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
| 102 | #endif |
| 103 | /* Check if we need to flush due to large pages. */ |
| 104 | if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { |
| 105 | #if defined(DEBUG_TLB) |
| 106 | printf("tlb_flush_page: forced full flush (" |
| 107 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", |
| 108 | env->tlb_flush_addr, env->tlb_flush_mask); |
| 109 | #endif |
| 110 | tlb_flush(env, 1); |
| 111 | return; |
| 112 | } |
| 113 | /* must reset current TB so that interrupts cannot modify the |
| 114 | links while we are modifying them */ |
Andreas Färber | d77953b | 2013-01-16 19:29:31 +0100 | [diff] [blame] | 115 | cpu->current_tb = NULL; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 116 | |
| 117 | addr &= TARGET_PAGE_MASK; |
| 118 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 119 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 120 | tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); |
| 121 | } |
| 122 | |
| 123 | tb_flush_jmp_cache(env, addr); |
| 124 | } |
| 125 | |
| 126 | /* update the TLBs so that writes to code in the virtual page 'addr' |
| 127 | can be detected */ |
| 128 | void tlb_protect_code(ram_addr_t ram_addr) |
| 129 | { |
| 130 | cpu_physical_memory_reset_dirty(ram_addr, |
| 131 | ram_addr + TARGET_PAGE_SIZE, |
| 132 | CODE_DIRTY_FLAG); |
| 133 | } |
| 134 | |
| 135 | /* update the TLB so that writes in physical page 'phys_addr' are no longer |
| 136 | tested for self modifying code */ |
| 137 | void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr, |
| 138 | target_ulong vaddr) |
| 139 | { |
| 140 | cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); |
| 141 | } |
| 142 | |
| 143 | static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe) |
| 144 | { |
| 145 | return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0; |
| 146 | } |
| 147 | |
| 148 | void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, |
| 149 | uintptr_t length) |
| 150 | { |
| 151 | uintptr_t addr; |
| 152 | |
| 153 | if (tlb_is_dirty_ram(tlb_entry)) { |
| 154 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
| 155 | if ((addr - start) < length) { |
| 156 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
| 157 | } |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
| 162 | { |
| 163 | ram_addr_t ram_addr; |
| 164 | void *p; |
| 165 | |
| 166 | if (tlb_is_dirty_ram(tlb_entry)) { |
| 167 | p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK) |
| 168 | + tlb_entry->addend); |
| 169 | ram_addr = qemu_ram_addr_from_host_nofail(p); |
| 170 | if (!cpu_physical_memory_is_dirty(ram_addr)) { |
| 171 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
| 172 | } |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length) |
| 177 | { |
| 178 | CPUArchState *env; |
| 179 | |
| 180 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
| 181 | int mmu_idx; |
| 182 | |
| 183 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 184 | unsigned int i; |
| 185 | |
| 186 | for (i = 0; i < CPU_TLB_SIZE; i++) { |
| 187 | tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], |
| 188 | start1, length); |
| 189 | } |
| 190 | } |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
| 195 | { |
| 196 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { |
| 197 | tlb_entry->addr_write = vaddr; |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | /* update the TLB corresponding to virtual page vaddr |
| 202 | so that it is no longer dirty */ |
| 203 | void tlb_set_dirty(CPUArchState *env, target_ulong vaddr) |
| 204 | { |
| 205 | int i; |
| 206 | int mmu_idx; |
| 207 | |
| 208 | vaddr &= TARGET_PAGE_MASK; |
| 209 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 210 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 211 | tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | /* Our TLB does not support large pages, so remember the area covered by |
| 216 | large pages and trigger a full TLB flush if these are invalidated. */ |
| 217 | static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, |
| 218 | target_ulong size) |
| 219 | { |
| 220 | target_ulong mask = ~(size - 1); |
| 221 | |
| 222 | if (env->tlb_flush_addr == (target_ulong)-1) { |
| 223 | env->tlb_flush_addr = vaddr & mask; |
| 224 | env->tlb_flush_mask = mask; |
| 225 | return; |
| 226 | } |
| 227 | /* Extend the existing region to include the new page. |
| 228 | This is a compromise between unnecessary flushes and the cost |
| 229 | of maintaining a full variable size TLB. */ |
| 230 | mask &= env->tlb_flush_mask; |
| 231 | while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { |
| 232 | mask <<= 1; |
| 233 | } |
| 234 | env->tlb_flush_addr &= mask; |
| 235 | env->tlb_flush_mask = mask; |
| 236 | } |
| 237 | |
| 238 | /* Add a new TLB entry. At most one entry for a given virtual address |
| 239 | is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
| 240 | supplied size is only used by tlb_flush_page. */ |
| 241 | void tlb_set_page(CPUArchState *env, target_ulong vaddr, |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 242 | hwaddr paddr, int prot, |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 243 | int mmu_idx, target_ulong size) |
| 244 | { |
| 245 | MemoryRegionSection *section; |
| 246 | unsigned int index; |
| 247 | target_ulong address; |
| 248 | target_ulong code_address; |
| 249 | uintptr_t addend; |
| 250 | CPUTLBEntry *te; |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 251 | hwaddr iotlb; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 252 | |
| 253 | assert(size >= TARGET_PAGE_SIZE); |
| 254 | if (size != TARGET_PAGE_SIZE) { |
| 255 | tlb_add_large_page(env, vaddr, size); |
| 256 | } |
Avi Kivity | ac1970f | 2012-10-03 16:22:53 +0200 | [diff] [blame] | 257 | section = phys_page_find(address_space_memory.dispatch, paddr >> TARGET_PAGE_BITS); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 258 | #if defined(DEBUG_TLB) |
| 259 | printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
| 260 | " prot=%x idx=%d pd=0x%08lx\n", |
| 261 | vaddr, paddr, prot, mmu_idx, pd); |
| 262 | #endif |
| 263 | |
| 264 | address = vaddr; |
Blue Swirl | cc5bea6 | 2012-04-14 14:56:48 +0000 | [diff] [blame] | 265 | if (!(memory_region_is_ram(section->mr) || |
| 266 | memory_region_is_romd(section->mr))) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 267 | /* IO memory case (romd handled later) */ |
| 268 | address |= TLB_MMIO; |
| 269 | } |
Blue Swirl | cc5bea6 | 2012-04-14 14:56:48 +0000 | [diff] [blame] | 270 | if (memory_region_is_ram(section->mr) || |
| 271 | memory_region_is_romd(section->mr)) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 272 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) |
Blue Swirl | cc5bea6 | 2012-04-14 14:56:48 +0000 | [diff] [blame] | 273 | + memory_region_section_addr(section, paddr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 274 | } else { |
| 275 | addend = 0; |
| 276 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 277 | |
| 278 | code_address = address; |
Max Filippov | 56eb21e | 2012-05-06 01:44:31 +0400 | [diff] [blame] | 279 | iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, prot, |
| 280 | &address); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 281 | |
| 282 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 283 | env->iotlb[mmu_idx][index] = iotlb - vaddr; |
| 284 | te = &env->tlb_table[mmu_idx][index]; |
| 285 | te->addend = addend - vaddr; |
| 286 | if (prot & PAGE_READ) { |
| 287 | te->addr_read = address; |
| 288 | } else { |
| 289 | te->addr_read = -1; |
| 290 | } |
| 291 | |
| 292 | if (prot & PAGE_EXEC) { |
| 293 | te->addr_code = code_address; |
| 294 | } else { |
| 295 | te->addr_code = -1; |
| 296 | } |
| 297 | if (prot & PAGE_WRITE) { |
| 298 | if ((memory_region_is_ram(section->mr) && section->readonly) |
Blue Swirl | cc5bea6 | 2012-04-14 14:56:48 +0000 | [diff] [blame] | 299 | || memory_region_is_romd(section->mr)) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 300 | /* Write access calls the I/O callback. */ |
| 301 | te->addr_write = address | TLB_MMIO; |
| 302 | } else if (memory_region_is_ram(section->mr) |
| 303 | && !cpu_physical_memory_is_dirty( |
| 304 | section->mr->ram_addr |
Blue Swirl | cc5bea6 | 2012-04-14 14:56:48 +0000 | [diff] [blame] | 305 | + memory_region_section_addr(section, paddr))) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 306 | te->addr_write = address | TLB_NOTDIRTY; |
| 307 | } else { |
| 308 | te->addr_write = address; |
| 309 | } |
| 310 | } else { |
| 311 | te->addr_write = -1; |
| 312 | } |
| 313 | } |
| 314 | |
| 315 | /* NOTE: this function can trigger an exception */ |
| 316 | /* NOTE2: the returned address is not exactly the physical address: it |
Peter Maydell | 116aae3 | 2012-08-10 17:14:05 +0100 | [diff] [blame] | 317 | * is actually a ram_addr_t (in system mode; the user mode emulation |
| 318 | * version of this function returns a guest virtual address). |
| 319 | */ |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 320 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
| 321 | { |
| 322 | int mmu_idx, page_index, pd; |
| 323 | void *p; |
| 324 | MemoryRegion *mr; |
| 325 | |
| 326 | page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 327 | mmu_idx = cpu_mmu_index(env1); |
| 328 | if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != |
| 329 | (addr & TARGET_PAGE_MASK))) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 330 | cpu_ldub_code(env1, addr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 331 | } |
| 332 | pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; |
| 333 | mr = iotlb_to_region(pd); |
| 334 | if (memory_region_is_unassigned(mr)) { |
| 335 | #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC) |
| 336 | cpu_unassigned_access(env1, addr, 0, 1, 0, 4); |
| 337 | #else |
| 338 | cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" |
| 339 | TARGET_FMT_lx "\n", addr); |
| 340 | #endif |
| 341 | } |
| 342 | p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); |
| 343 | return qemu_ram_addr_from_host_nofail(p); |
| 344 | } |
| 345 | |
| 346 | #define MMUSUFFIX _cmmu |
| 347 | #undef GETPC |
| 348 | #define GETPC() ((uintptr_t)0) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 349 | #define SOFTMMU_CODE_ACCESS |
| 350 | |
| 351 | #define SHIFT 0 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 352 | #include "exec/softmmu_template.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 353 | |
| 354 | #define SHIFT 1 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 355 | #include "exec/softmmu_template.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 356 | |
| 357 | #define SHIFT 2 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 358 | #include "exec/softmmu_template.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 359 | |
| 360 | #define SHIFT 3 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 361 | #include "exec/softmmu_template.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 362 | |
| 363 | #undef env |