bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 1 | /* |
| 2 | * SH4 emulation |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 4 | * Copyright (c) 2005 Samuel Tardieu |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Thomas Huth | 6faf2b6 | 2019-02-13 14:52:50 +0100 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 18 | */ |
Markus Armbruster | 54d3123 | 2019-08-12 07:23:59 +0200 | [diff] [blame] | 19 | |
Peter Maydell | 9d4c994 | 2016-01-26 18:17:20 +0000 | [diff] [blame] | 20 | #include "qemu/osdep.h" |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 21 | |
| 22 | #include "cpu.h" |
Paolo Bonzini | 63c9155 | 2016-03-15 13:18:37 +0100 | [diff] [blame] | 23 | #include "exec/exec-all.h" |
Paolo Bonzini | 508127e | 2016-01-07 16:55:28 +0300 | [diff] [blame] | 24 | #include "exec/log.h" |
Benoît Canet | b279e5e | 2011-11-17 14:23:01 +0100 | [diff] [blame] | 25 | |
| 26 | #if !defined(CONFIG_USER_ONLY) |
Paolo Bonzini | 0d09e41 | 2013-02-05 17:06:20 +0100 | [diff] [blame] | 27 | #include "hw/sh4/sh_intc.h" |
Markus Armbruster | 54d3123 | 2019-08-12 07:23:59 +0200 | [diff] [blame] | 28 | #include "sysemu/runstate.h" |
Benoît Canet | b279e5e | 2011-11-17 14:23:01 +0100 | [diff] [blame] | 29 | #endif |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 30 | |
| 31 | #define MMU_OK 0 |
| 32 | #define MMU_ITLB_MISS (-1) |
| 33 | #define MMU_ITLB_MULTIPLE (-2) |
| 34 | #define MMU_ITLB_VIOLATION (-3) |
| 35 | #define MMU_DTLB_MISS_READ (-4) |
| 36 | #define MMU_DTLB_MISS_WRITE (-5) |
| 37 | #define MMU_DTLB_INITIAL_WRITE (-6) |
| 38 | #define MMU_DTLB_VIOLATION_READ (-7) |
| 39 | #define MMU_DTLB_VIOLATION_WRITE (-8) |
| 40 | #define MMU_DTLB_MULTIPLE (-9) |
| 41 | #define MMU_DTLB_MISS (-10) |
aurel32 | cf7055b | 2008-11-21 22:33:15 +0000 | [diff] [blame] | 42 | #define MMU_IADDR_ERROR (-11) |
| 43 | #define MMU_DADDR_ERROR_READ (-12) |
| 44 | #define MMU_DADDR_ERROR_WRITE (-13) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 45 | |
Richard Henderson | f98bce2 | 2019-04-02 22:18:39 +0700 | [diff] [blame] | 46 | #if defined(CONFIG_USER_ONLY) |
| 47 | |
| 48 | void superh_cpu_do_interrupt(CPUState *cs) |
| 49 | { |
| 50 | cs->exception_index = -1; |
| 51 | } |
| 52 | |
| 53 | int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr) |
| 54 | { |
| 55 | /* For user mode, only U0 area is cacheable. */ |
| 56 | return !(addr & 0x80000000); |
| 57 | } |
| 58 | |
| 59 | #else /* !CONFIG_USER_ONLY */ |
| 60 | |
Andreas Färber | 97a8ea5 | 2013-02-02 10:57:51 +0100 | [diff] [blame] | 61 | void superh_cpu_do_interrupt(CPUState *cs) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 62 | { |
Andreas Färber | 97a8ea5 | 2013-02-02 10:57:51 +0100 | [diff] [blame] | 63 | SuperHCPU *cpu = SUPERH_CPU(cs); |
| 64 | CPUSH4State *env = &cpu->env; |
Andreas Färber | 259186a | 2013-01-17 18:51:17 +0100 | [diff] [blame] | 65 | int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD; |
Andreas Färber | 2710342 | 2013-08-26 08:31:06 +0200 | [diff] [blame] | 66 | int do_exp, irq_vector = cs->exception_index; |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 67 | |
| 68 | /* prioritize exceptions over interrupts */ |
| 69 | |
Andreas Färber | 2710342 | 2013-08-26 08:31:06 +0200 | [diff] [blame] | 70 | do_exp = cs->exception_index != -1; |
| 71 | do_irq = do_irq && (cs->exception_index == -1); |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 72 | |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 73 | if (env->sr & (1u << SR_BL)) { |
Andreas Färber | 2710342 | 2013-08-26 08:31:06 +0200 | [diff] [blame] | 74 | if (do_exp && cs->exception_index != 0x1e0) { |
Aurelien Jarno | 73479c5 | 2017-05-17 00:48:18 +0200 | [diff] [blame] | 75 | /* In theory a masked exception generates a reset exception, |
| 76 | which in turn jumps to the reset vector. However this only |
| 77 | works when using a bootloader. When using a kernel and an |
| 78 | initrd, they need to be reloaded and the program counter |
| 79 | should be loaded with the kernel entry point. |
| 80 | qemu_system_reset_request takes care of that. */ |
| 81 | qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); |
| 82 | return; |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 83 | } |
Aurelien Jarno | efac415 | 2011-02-24 12:31:41 +0100 | [diff] [blame] | 84 | if (do_irq && !env->in_sleep) { |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 85 | return; /* masked */ |
| 86 | } |
| 87 | } |
Aurelien Jarno | efac415 | 2011-02-24 12:31:41 +0100 | [diff] [blame] | 88 | env->in_sleep = 0; |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 89 | |
| 90 | if (do_irq) { |
| 91 | irq_vector = sh_intc_get_pending_vector(env->intc_handle, |
| 92 | (env->sr >> 4) & 0xf); |
| 93 | if (irq_vector == -1) { |
| 94 | return; /* masked */ |
| 95 | } |
| 96 | } |
| 97 | |
aliguori | 8fec2b8 | 2009-01-15 22:36:53 +0000 | [diff] [blame] | 98 | if (qemu_loglevel_mask(CPU_LOG_INT)) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 99 | const char *expname; |
Andreas Färber | 2710342 | 2013-08-26 08:31:06 +0200 | [diff] [blame] | 100 | switch (cs->exception_index) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 101 | case 0x0e0: |
| 102 | expname = "addr_error"; |
| 103 | break; |
| 104 | case 0x040: |
| 105 | expname = "tlb_miss"; |
| 106 | break; |
| 107 | case 0x0a0: |
| 108 | expname = "tlb_violation"; |
| 109 | break; |
| 110 | case 0x180: |
| 111 | expname = "illegal_instruction"; |
| 112 | break; |
| 113 | case 0x1a0: |
| 114 | expname = "slot_illegal_instruction"; |
| 115 | break; |
| 116 | case 0x800: |
| 117 | expname = "fpu_disable"; |
| 118 | break; |
| 119 | case 0x820: |
| 120 | expname = "slot_fpu"; |
| 121 | break; |
| 122 | case 0x100: |
| 123 | expname = "data_write"; |
| 124 | break; |
| 125 | case 0x060: |
| 126 | expname = "dtlb_miss_write"; |
| 127 | break; |
| 128 | case 0x0c0: |
| 129 | expname = "dtlb_violation_write"; |
| 130 | break; |
| 131 | case 0x120: |
| 132 | expname = "fpu_exception"; |
| 133 | break; |
| 134 | case 0x080: |
| 135 | expname = "initial_page_write"; |
| 136 | break; |
| 137 | case 0x160: |
| 138 | expname = "trapa"; |
| 139 | break; |
| 140 | default: |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 141 | expname = do_irq ? "interrupt" : "???"; |
| 142 | break; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 143 | } |
aliguori | 93fcfe3 | 2009-01-15 22:34:14 +0000 | [diff] [blame] | 144 | qemu_log("exception 0x%03x [%s] raised\n", |
| 145 | irq_vector, expname); |
Andreas Färber | a076285 | 2013-06-16 07:28:50 +0200 | [diff] [blame] | 146 | log_cpu_state(cs, 0); |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 147 | } |
| 148 | |
Aurelien Jarno | 3408694 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 149 | env->ssr = cpu_read_sr(env); |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 150 | env->spc = env->pc; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 151 | env->sgr = env->gregs[15]; |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 152 | env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB); |
Richard Henderson | f85da30 | 2017-09-07 11:50:53 -0700 | [diff] [blame] | 153 | env->lock_addr = -1; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 154 | |
Aurelien Jarno | 9a562ae | 2017-05-17 00:48:18 +0200 | [diff] [blame] | 155 | if (env->flags & DELAY_SLOT_MASK) { |
aurel32 | 274a9e7 | 2008-08-22 08:57:35 +0000 | [diff] [blame] | 156 | /* Branch instruction should be executed again before delay slot. */ |
| 157 | env->spc -= 2; |
| 158 | /* Clear flags for exception/interrupt routine. */ |
Aurelien Jarno | 9a562ae | 2017-05-17 00:48:18 +0200 | [diff] [blame] | 159 | env->flags &= ~DELAY_SLOT_MASK; |
aurel32 | 274a9e7 | 2008-08-22 08:57:35 +0000 | [diff] [blame] | 160 | } |
aurel32 | 274a9e7 | 2008-08-22 08:57:35 +0000 | [diff] [blame] | 161 | |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 162 | if (do_exp) { |
Andreas Färber | 2710342 | 2013-08-26 08:31:06 +0200 | [diff] [blame] | 163 | env->expevt = cs->exception_index; |
| 164 | switch (cs->exception_index) { |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 165 | case 0x000: |
| 166 | case 0x020: |
| 167 | case 0x140: |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 168 | env->sr &= ~(1u << SR_FD); |
ths | e96e204 | 2007-12-02 06:18:24 +0000 | [diff] [blame] | 169 | env->sr |= 0xf << 4; /* IMASK */ |
| 170 | env->pc = 0xa0000000; |
| 171 | break; |
| 172 | case 0x040: |
| 173 | case 0x060: |
| 174 | env->pc = env->vbr + 0x400; |
| 175 | break; |
| 176 | case 0x160: |
| 177 | env->spc += 2; /* special case for TRAPA */ |
| 178 | /* fall through */ |
| 179 | default: |
| 180 | env->pc = env->vbr + 0x100; |
| 181 | break; |
| 182 | } |
| 183 | return; |
| 184 | } |
| 185 | |
| 186 | if (do_irq) { |
| 187 | env->intevt = irq_vector; |
| 188 | env->pc = env->vbr + 0x600; |
| 189 | return; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 190 | } |
| 191 | } |
| 192 | |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 193 | static void update_itlb_use(CPUSH4State * env, int itlbnb) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 194 | { |
| 195 | uint8_t or_mask = 0, and_mask = (uint8_t) - 1; |
| 196 | |
| 197 | switch (itlbnb) { |
| 198 | case 0: |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 199 | and_mask = 0x1f; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 200 | break; |
| 201 | case 1: |
| 202 | and_mask = 0xe7; |
| 203 | or_mask = 0x80; |
| 204 | break; |
| 205 | case 2: |
| 206 | and_mask = 0xfb; |
| 207 | or_mask = 0x50; |
| 208 | break; |
| 209 | case 3: |
| 210 | or_mask = 0x2c; |
| 211 | break; |
| 212 | } |
| 213 | |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 214 | env->mmucr &= (and_mask << 24) | 0x00ffffff; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 215 | env->mmucr |= (or_mask << 24); |
| 216 | } |
| 217 | |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 218 | static int itlb_replacement(CPUSH4State * env) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 219 | { |
Andreas Färber | a47dddd | 2013-09-03 17:38:47 +0200 | [diff] [blame] | 220 | if ((env->mmucr & 0xe0000000) == 0xe0000000) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 221 | return 0; |
Andreas Färber | a47dddd | 2013-09-03 17:38:47 +0200 | [diff] [blame] | 222 | } |
| 223 | if ((env->mmucr & 0x98000000) == 0x18000000) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 224 | return 1; |
Andreas Färber | a47dddd | 2013-09-03 17:38:47 +0200 | [diff] [blame] | 225 | } |
| 226 | if ((env->mmucr & 0x54000000) == 0x04000000) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 227 | return 2; |
Andreas Färber | a47dddd | 2013-09-03 17:38:47 +0200 | [diff] [blame] | 228 | } |
| 229 | if ((env->mmucr & 0x2c000000) == 0x00000000) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 230 | return 3; |
Andreas Färber | a47dddd | 2013-09-03 17:38:47 +0200 | [diff] [blame] | 231 | } |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 232 | cpu_abort(env_cpu(env), "Unhandled itlb_replacement"); |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | /* Find the corresponding entry in the right TLB |
| 236 | Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE |
| 237 | */ |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 238 | static int find_tlb_entry(CPUSH4State * env, target_ulong address, |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 239 | tlb_t * entries, uint8_t nbtlb, int use_asid) |
| 240 | { |
| 241 | int match = MMU_DTLB_MISS; |
| 242 | uint32_t start, end; |
| 243 | uint8_t asid; |
| 244 | int i; |
| 245 | |
| 246 | asid = env->pteh & 0xff; |
| 247 | |
| 248 | for (i = 0; i < nbtlb; i++) { |
| 249 | if (!entries[i].v) |
| 250 | continue; /* Invalid entry */ |
aurel32 | eeda677 | 2008-12-10 17:31:51 +0000 | [diff] [blame] | 251 | if (!entries[i].sh && use_asid && entries[i].asid != asid) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 252 | continue; /* Bad ASID */ |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 253 | start = (entries[i].vpn << 10) & ~(entries[i].size - 1); |
| 254 | end = start + entries[i].size - 1; |
| 255 | if (address >= start && address <= end) { /* Match */ |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 256 | if (match != MMU_DTLB_MISS) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 257 | return MMU_DTLB_MULTIPLE; /* Multiple match */ |
| 258 | match = i; |
| 259 | } |
| 260 | } |
| 261 | return match; |
| 262 | } |
| 263 | |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 264 | static void increment_urc(CPUSH4State * env) |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 265 | { |
| 266 | uint8_t urb, urc; |
| 267 | |
| 268 | /* Increment URC */ |
| 269 | urb = ((env->mmucr) >> 18) & 0x3f; |
| 270 | urc = ((env->mmucr) >> 10) & 0x3f; |
| 271 | urc++; |
aurel32 | 927e3a4 | 2009-03-03 09:14:01 +0000 | [diff] [blame] | 272 | if ((urb > 0 && urc > urb) || urc > (UTLB_SIZE - 1)) |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 273 | urc = 0; |
| 274 | env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10); |
| 275 | } |
| 276 | |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 277 | /* Copy and utlb entry into itlb |
| 278 | Return entry |
| 279 | */ |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 280 | static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb) |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 281 | { |
| 282 | int itlb; |
| 283 | |
| 284 | tlb_t * ientry; |
| 285 | itlb = itlb_replacement(env); |
| 286 | ientry = &env->itlb[itlb]; |
| 287 | if (ientry->v) { |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 288 | tlb_flush_page(env_cpu(env), ientry->vpn << 10); |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 289 | } |
| 290 | *ientry = env->utlb[utlb]; |
| 291 | update_itlb_use(env, itlb); |
| 292 | return itlb; |
| 293 | } |
| 294 | |
| 295 | /* Find itlb entry |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 296 | Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 297 | */ |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 298 | static int find_itlb_entry(CPUSH4State * env, target_ulong address, |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 299 | int use_asid) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 300 | { |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 301 | int e; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 302 | |
| 303 | e = find_tlb_entry(env, address, env->itlb, ITLB_SIZE, use_asid); |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 304 | if (e == MMU_DTLB_MULTIPLE) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 305 | e = MMU_ITLB_MULTIPLE; |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 306 | } else if (e == MMU_DTLB_MISS) { |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 307 | e = MMU_ITLB_MISS; |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 308 | } else if (e >= 0) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 309 | update_itlb_use(env, e); |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 310 | } |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 311 | return e; |
| 312 | } |
| 313 | |
| 314 | /* Find utlb entry |
| 315 | Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */ |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 316 | static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 317 | { |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 318 | /* per utlb access */ |
| 319 | increment_urc(env); |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 320 | |
| 321 | /* Return entry */ |
| 322 | return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid); |
| 323 | } |
| 324 | |
| 325 | /* Match address against MMU |
| 326 | Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE, |
| 327 | MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ, |
| 328 | MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS, |
aurel32 | cf7055b | 2008-11-21 22:33:15 +0000 | [diff] [blame] | 329 | MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION, |
| 330 | MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE. |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 331 | */ |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 332 | static int get_mmu_address(CPUSH4State * env, target_ulong * physical, |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 333 | int *prot, target_ulong address, |
| 334 | int rw, int access_type) |
| 335 | { |
aurel32 | cf7055b | 2008-11-21 22:33:15 +0000 | [diff] [blame] | 336 | int use_asid, n; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 337 | tlb_t *matching = NULL; |
| 338 | |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 339 | use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD)); |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 340 | |
aurel32 | cf7055b | 2008-11-21 22:33:15 +0000 | [diff] [blame] | 341 | if (rw == 2) { |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 342 | n = find_itlb_entry(env, address, use_asid); |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 343 | if (n >= 0) { |
| 344 | matching = &env->itlb[n]; |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 345 | if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 346 | n = MMU_ITLB_VIOLATION; |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 347 | } else { |
Aurelien Jarno | 5a25cc2 | 2010-02-02 22:32:14 +0100 | [diff] [blame] | 348 | *prot = PAGE_EXEC; |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 349 | } |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 350 | } else { |
| 351 | n = find_utlb_entry(env, address, use_asid); |
| 352 | if (n >= 0) { |
| 353 | n = copy_utlb_entry_itlb(env, n); |
| 354 | matching = &env->itlb[n]; |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 355 | if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) { |
| 356 | n = MMU_ITLB_VIOLATION; |
Aurelien Jarno | 829a492 | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 357 | } else { |
| 358 | *prot = PAGE_READ | PAGE_EXEC; |
| 359 | if ((matching->pr & 1) && matching->d) { |
| 360 | *prot |= PAGE_WRITE; |
| 361 | } |
| 362 | } |
| 363 | } else if (n == MMU_DTLB_MULTIPLE) { |
| 364 | n = MMU_ITLB_MULTIPLE; |
| 365 | } else if (n == MMU_DTLB_MISS) { |
| 366 | n = MMU_ITLB_MISS; |
| 367 | } |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 368 | } |
| 369 | } else { |
| 370 | n = find_utlb_entry(env, address, use_asid); |
| 371 | if (n >= 0) { |
| 372 | matching = &env->utlb[n]; |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 373 | if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) { |
Aurelien Jarno | 628b61a | 2010-02-02 19:50:51 +0100 | [diff] [blame] | 374 | n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE : |
| 375 | MMU_DTLB_VIOLATION_READ; |
| 376 | } else if ((rw == 1) && !(matching->pr & 1)) { |
| 377 | n = MMU_DTLB_VIOLATION_WRITE; |
Aurelien Jarno | 0c16e71 | 2011-01-15 13:50:38 +0100 | [diff] [blame] | 378 | } else if ((rw == 1) && !matching->d) { |
Aurelien Jarno | 628b61a | 2010-02-02 19:50:51 +0100 | [diff] [blame] | 379 | n = MMU_DTLB_INITIAL_WRITE; |
| 380 | } else { |
| 381 | *prot = PAGE_READ; |
| 382 | if ((matching->pr & 1) && matching->d) { |
| 383 | *prot |= PAGE_WRITE; |
| 384 | } |
| 385 | } |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 386 | } else if (n == MMU_DTLB_MISS) { |
aurel32 | cf7055b | 2008-11-21 22:33:15 +0000 | [diff] [blame] | 387 | n = (rw == 1) ? MMU_DTLB_MISS_WRITE : |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 388 | MMU_DTLB_MISS_READ; |
| 389 | } |
| 390 | } |
| 391 | if (n >= 0) { |
Aurelien Jarno | 628b61a | 2010-02-02 19:50:51 +0100 | [diff] [blame] | 392 | n = MMU_OK; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 393 | *physical = ((matching->ppn << 10) & ~(matching->size - 1)) | |
| 394 | (address & (matching->size - 1)); |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 395 | } |
| 396 | return n; |
| 397 | } |
| 398 | |
Andreas Färber | 73e5716 | 2012-03-14 01:38:22 +0100 | [diff] [blame] | 399 | static int get_physical_address(CPUSH4State * env, target_ulong * physical, |
aurel32 | ef7ec1c | 2009-03-03 06:12:03 +0000 | [diff] [blame] | 400 | int *prot, target_ulong address, |
| 401 | int rw, int access_type) |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 402 | { |
| 403 | /* P1, P2 and P4 areas do not use translation */ |
| 404 | if ((address >= 0x80000000 && address < 0xc0000000) || |
| 405 | address >= 0xe0000000) { |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 406 | if (!(env->sr & (1u << SR_MD)) |
Aurelien Jarno | 03e3b61 | 2010-02-03 18:02:55 +0100 | [diff] [blame] | 407 | && (address < 0xe0000000 || address >= 0xe4000000)) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 408 | /* Unauthorized access in user mode (only store queues are available) */ |
Aurelien Jarno | 324189b | 2017-05-17 00:48:18 +0200 | [diff] [blame] | 409 | qemu_log_mask(LOG_GUEST_ERROR, "Unauthorized access\n"); |
aurel32 | cf7055b | 2008-11-21 22:33:15 +0000 | [diff] [blame] | 410 | if (rw == 0) |
| 411 | return MMU_DADDR_ERROR_READ; |
| 412 | else if (rw == 1) |
| 413 | return MMU_DADDR_ERROR_WRITE; |
| 414 | else |
| 415 | return MMU_IADDR_ERROR; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 416 | } |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 417 | if (address >= 0x80000000 && address < 0xc0000000) { |
| 418 | /* Mask upper 3 bits for P1 and P2 areas */ |
| 419 | *physical = address & 0x1fffffff; |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 420 | } else { |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 421 | *physical = address; |
| 422 | } |
Aurelien Jarno | 5a25cc2 | 2010-02-02 22:32:14 +0100 | [diff] [blame] | 423 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 424 | return MMU_OK; |
| 425 | } |
| 426 | |
| 427 | /* If MMU is disabled, return the corresponding physical page */ |
Aurelien Jarno | 0c16e71 | 2011-01-15 13:50:38 +0100 | [diff] [blame] | 428 | if (!(env->mmucr & MMUCR_AT)) { |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 429 | *physical = address & 0x1FFFFFFF; |
Aurelien Jarno | 5a25cc2 | 2010-02-02 22:32:14 +0100 | [diff] [blame] | 430 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
bellard | fdf9b3e | 2006-04-27 21:07:38 +0000 | [diff] [blame] | 431 | return MMU_OK; |
| 432 | } |
| 433 | |
| 434 | /* We need to resort to the MMU */ |
| 435 | return get_mmu_address(env, physical, prot, address, rw, access_type); |
| 436 | } |
| 437 | |
Andreas Färber | 00b941e | 2013-06-29 18:55:54 +0200 | [diff] [blame] | 438 | hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
pbrook | 355fb23 | 2006-06-17 19:58:25 +0000 | [diff] [blame] | 439 | { |
Andreas Färber | 00b941e | 2013-06-29 18:55:54 +0200 | [diff] [blame] | 440 | SuperHCPU *cpu = SUPERH_CPU(cs); |
pbrook | 355fb23 | 2006-06-17 19:58:25 +0000 | [diff] [blame] | 441 | target_ulong physical; |
| 442 | int prot; |
| 443 | |
Andreas Färber | 00b941e | 2013-06-29 18:55:54 +0200 | [diff] [blame] | 444 | get_physical_address(&cpu->env, &physical, &prot, addr, 0, 0); |
pbrook | 355fb23 | 2006-06-17 19:58:25 +0000 | [diff] [blame] | 445 | return physical; |
| 446 | } |
| 447 | |
aurel32 | ef7ec1c | 2009-03-03 06:12:03 +0000 | [diff] [blame] | 448 | void cpu_load_tlb(CPUSH4State * env) |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 449 | { |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 450 | CPUState *cs = env_cpu(env); |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 451 | int n = cpu_mmucr_urc(env->mmucr); |
| 452 | tlb_t * entry = &env->utlb[n]; |
| 453 | |
aurel32 | 06afe2c | 2008-08-22 08:57:52 +0000 | [diff] [blame] | 454 | if (entry->v) { |
| 455 | /* Overwriting valid entry in utlb. */ |
| 456 | target_ulong address = entry->vpn << 10; |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 457 | tlb_flush_page(cs, address); |
aurel32 | 06afe2c | 2008-08-22 08:57:52 +0000 | [diff] [blame] | 458 | } |
| 459 | |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 460 | /* Take values into cpu status from registers. */ |
| 461 | entry->asid = (uint8_t)cpu_pteh_asid(env->pteh); |
| 462 | entry->vpn = cpu_pteh_vpn(env->pteh); |
| 463 | entry->v = (uint8_t)cpu_ptel_v(env->ptel); |
| 464 | entry->ppn = cpu_ptel_ppn(env->ptel); |
| 465 | entry->sz = (uint8_t)cpu_ptel_sz(env->ptel); |
| 466 | switch (entry->sz) { |
| 467 | case 0: /* 00 */ |
| 468 | entry->size = 1024; /* 1K */ |
| 469 | break; |
| 470 | case 1: /* 01 */ |
| 471 | entry->size = 1024 * 4; /* 4K */ |
| 472 | break; |
| 473 | case 2: /* 10 */ |
| 474 | entry->size = 1024 * 64; /* 64K */ |
| 475 | break; |
| 476 | case 3: /* 11 */ |
| 477 | entry->size = 1024 * 1024; /* 1M */ |
| 478 | break; |
| 479 | default: |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 480 | cpu_abort(cs, "Unhandled load_tlb"); |
aurel32 | ea2b542 | 2008-05-09 18:45:55 +0000 | [diff] [blame] | 481 | break; |
| 482 | } |
| 483 | entry->sh = (uint8_t)cpu_ptel_sh(env->ptel); |
| 484 | entry->c = (uint8_t)cpu_ptel_c(env->ptel); |
| 485 | entry->pr = (uint8_t)cpu_ptel_pr(env->ptel); |
| 486 | entry->d = (uint8_t)cpu_ptel_d(env->ptel); |
| 487 | entry->wt = (uint8_t)cpu_ptel_wt(env->ptel); |
| 488 | entry->sa = (uint8_t)cpu_ptea_sa(env->ptea); |
| 489 | entry->tc = (uint8_t)cpu_ptea_tc(env->ptea); |
| 490 | } |
| 491 | |
Aurelien Jarno | e0bcb9c | 2010-02-02 19:39:11 +0100 | [diff] [blame] | 492 | void cpu_sh4_invalidate_tlb(CPUSH4State *s) |
| 493 | { |
| 494 | int i; |
| 495 | |
| 496 | /* UTLB */ |
| 497 | for (i = 0; i < UTLB_SIZE; i++) { |
| 498 | tlb_t * entry = &s->utlb[i]; |
| 499 | entry->v = 0; |
| 500 | } |
| 501 | /* ITLB */ |
Alexandre Courbot | e40a67b | 2011-01-25 15:32:01 +0900 | [diff] [blame] | 502 | for (i = 0; i < ITLB_SIZE; i++) { |
| 503 | tlb_t * entry = &s->itlb[i]; |
Aurelien Jarno | e0bcb9c | 2010-02-02 19:39:11 +0100 | [diff] [blame] | 504 | entry->v = 0; |
| 505 | } |
| 506 | |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 507 | tlb_flush(env_cpu(s)); |
Aurelien Jarno | e0bcb9c | 2010-02-02 19:39:11 +0100 | [diff] [blame] | 508 | } |
| 509 | |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 510 | uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s, |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 511 | hwaddr addr) |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 512 | { |
| 513 | int index = (addr & 0x00000300) >> 8; |
| 514 | tlb_t * entry = &s->itlb[index]; |
| 515 | |
| 516 | return (entry->vpn << 10) | |
| 517 | (entry->v << 8) | |
| 518 | (entry->asid); |
| 519 | } |
| 520 | |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 521 | void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr, |
Aurelien Jarno | c0f809c | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 522 | uint32_t mem_value) |
| 523 | { |
| 524 | uint32_t vpn = (mem_value & 0xfffffc00) >> 10; |
| 525 | uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8); |
| 526 | uint8_t asid = (uint8_t)(mem_value & 0x000000ff); |
| 527 | |
Aurelien Jarno | 9f97309 | 2011-01-26 02:07:50 +0100 | [diff] [blame] | 528 | int index = (addr & 0x00000300) >> 8; |
Aurelien Jarno | c0f809c | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 529 | tlb_t * entry = &s->itlb[index]; |
| 530 | if (entry->v) { |
| 531 | /* Overwriting valid entry in itlb. */ |
| 532 | target_ulong address = entry->vpn << 10; |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 533 | tlb_flush_page(env_cpu(s), address); |
Aurelien Jarno | c0f809c | 2011-01-09 23:53:45 +0100 | [diff] [blame] | 534 | } |
| 535 | entry->asid = asid; |
| 536 | entry->vpn = vpn; |
| 537 | entry->v = v; |
| 538 | } |
| 539 | |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 540 | uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s, |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 541 | hwaddr addr) |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 542 | { |
| 543 | int array = (addr & 0x00800000) >> 23; |
| 544 | int index = (addr & 0x00000300) >> 8; |
| 545 | tlb_t * entry = &s->itlb[index]; |
| 546 | |
| 547 | if (array == 0) { |
| 548 | /* ITLB Data Array 1 */ |
| 549 | return (entry->ppn << 10) | |
| 550 | (entry->v << 8) | |
| 551 | (entry->pr << 5) | |
| 552 | ((entry->sz & 1) << 6) | |
| 553 | ((entry->sz & 2) << 4) | |
| 554 | (entry->c << 3) | |
| 555 | (entry->sh << 1); |
| 556 | } else { |
| 557 | /* ITLB Data Array 2 */ |
| 558 | return (entry->tc << 1) | |
| 559 | (entry->sa); |
| 560 | } |
| 561 | } |
| 562 | |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 563 | void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr, |
Aurelien Jarno | 9f97309 | 2011-01-26 02:07:50 +0100 | [diff] [blame] | 564 | uint32_t mem_value) |
| 565 | { |
| 566 | int array = (addr & 0x00800000) >> 23; |
| 567 | int index = (addr & 0x00000300) >> 8; |
| 568 | tlb_t * entry = &s->itlb[index]; |
| 569 | |
| 570 | if (array == 0) { |
| 571 | /* ITLB Data Array 1 */ |
| 572 | if (entry->v) { |
| 573 | /* Overwriting valid entry in utlb. */ |
| 574 | target_ulong address = entry->vpn << 10; |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 575 | tlb_flush_page(env_cpu(s), address); |
Aurelien Jarno | 9f97309 | 2011-01-26 02:07:50 +0100 | [diff] [blame] | 576 | } |
| 577 | entry->ppn = (mem_value & 0x1ffffc00) >> 10; |
| 578 | entry->v = (mem_value & 0x00000100) >> 8; |
| 579 | entry->sz = (mem_value & 0x00000080) >> 6 | |
| 580 | (mem_value & 0x00000010) >> 4; |
| 581 | entry->pr = (mem_value & 0x00000040) >> 5; |
| 582 | entry->c = (mem_value & 0x00000008) >> 3; |
| 583 | entry->sh = (mem_value & 0x00000002) >> 1; |
| 584 | } else { |
| 585 | /* ITLB Data Array 2 */ |
| 586 | entry->tc = (mem_value & 0x00000008) >> 3; |
| 587 | entry->sa = (mem_value & 0x00000007); |
| 588 | } |
| 589 | } |
| 590 | |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 591 | uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s, |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 592 | hwaddr addr) |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 593 | { |
| 594 | int index = (addr & 0x00003f00) >> 8; |
| 595 | tlb_t * entry = &s->utlb[index]; |
| 596 | |
| 597 | increment_urc(s); /* per utlb access */ |
| 598 | |
| 599 | return (entry->vpn << 10) | |
| 600 | (entry->v << 8) | |
| 601 | (entry->asid); |
| 602 | } |
| 603 | |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 604 | void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr, |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 605 | uint32_t mem_value) |
| 606 | { |
| 607 | int associate = addr & 0x0000080; |
| 608 | uint32_t vpn = (mem_value & 0xfffffc00) >> 10; |
| 609 | uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9); |
| 610 | uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8); |
| 611 | uint8_t asid = (uint8_t)(mem_value & 0x000000ff); |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 612 | int use_asid = !(s->mmucr & MMUCR_SV) || !(s->sr & (1u << SR_MD)); |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 613 | |
| 614 | if (associate) { |
| 615 | int i; |
| 616 | tlb_t * utlb_match_entry = NULL; |
| 617 | int needs_tlb_flush = 0; |
| 618 | |
| 619 | /* search UTLB */ |
| 620 | for (i = 0; i < UTLB_SIZE; i++) { |
| 621 | tlb_t * entry = &s->utlb[i]; |
| 622 | if (!entry->v) |
| 623 | continue; |
| 624 | |
aurel32 | eeda677 | 2008-12-10 17:31:51 +0000 | [diff] [blame] | 625 | if (entry->vpn == vpn |
| 626 | && (!use_asid || entry->asid == asid || entry->sh)) { |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 627 | if (utlb_match_entry) { |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 628 | CPUState *cs = env_cpu(s); |
Andreas Färber | 2710342 | 2013-08-26 08:31:06 +0200 | [diff] [blame] | 629 | |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 630 | /* Multiple TLB Exception */ |
Andreas Färber | 2710342 | 2013-08-26 08:31:06 +0200 | [diff] [blame] | 631 | cs->exception_index = 0x140; |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 632 | s->tea = addr; |
| 633 | break; |
| 634 | } |
| 635 | if (entry->v && !v) |
| 636 | needs_tlb_flush = 1; |
| 637 | entry->v = v; |
| 638 | entry->d = d; |
| 639 | utlb_match_entry = entry; |
| 640 | } |
| 641 | increment_urc(s); /* per utlb access */ |
| 642 | } |
| 643 | |
| 644 | /* search ITLB */ |
| 645 | for (i = 0; i < ITLB_SIZE; i++) { |
| 646 | tlb_t * entry = &s->itlb[i]; |
aurel32 | eeda677 | 2008-12-10 17:31:51 +0000 | [diff] [blame] | 647 | if (entry->vpn == vpn |
| 648 | && (!use_asid || entry->asid == asid || entry->sh)) { |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 649 | if (entry->v && !v) |
| 650 | needs_tlb_flush = 1; |
| 651 | if (utlb_match_entry) |
| 652 | *entry = *utlb_match_entry; |
| 653 | else |
| 654 | entry->v = v; |
| 655 | break; |
| 656 | } |
| 657 | } |
| 658 | |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 659 | if (needs_tlb_flush) { |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 660 | tlb_flush_page(env_cpu(s), vpn << 10); |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 661 | } |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 662 | } else { |
| 663 | int index = (addr & 0x00003f00) >> 8; |
| 664 | tlb_t * entry = &s->utlb[index]; |
| 665 | if (entry->v) { |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 666 | CPUState *cs = env_cpu(s); |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 667 | |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 668 | /* Overwriting valid entry in utlb. */ |
| 669 | target_ulong address = entry->vpn << 10; |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 670 | tlb_flush_page(cs, address); |
aurel32 | 29e179b | 2008-08-22 08:57:43 +0000 | [diff] [blame] | 671 | } |
| 672 | entry->asid = asid; |
| 673 | entry->vpn = vpn; |
| 674 | entry->d = d; |
| 675 | entry->v = v; |
| 676 | increment_urc(s); |
| 677 | } |
| 678 | } |
| 679 | |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 680 | uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s, |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 681 | hwaddr addr) |
Aurelien Jarno | bc656a2 | 2011-01-26 02:16:39 +0100 | [diff] [blame] | 682 | { |
| 683 | int array = (addr & 0x00800000) >> 23; |
| 684 | int index = (addr & 0x00003f00) >> 8; |
| 685 | tlb_t * entry = &s->utlb[index]; |
| 686 | |
| 687 | increment_urc(s); /* per utlb access */ |
| 688 | |
| 689 | if (array == 0) { |
| 690 | /* ITLB Data Array 1 */ |
| 691 | return (entry->ppn << 10) | |
| 692 | (entry->v << 8) | |
| 693 | (entry->pr << 5) | |
| 694 | ((entry->sz & 1) << 6) | |
| 695 | ((entry->sz & 2) << 4) | |
| 696 | (entry->c << 3) | |
| 697 | (entry->d << 2) | |
| 698 | (entry->sh << 1) | |
| 699 | (entry->wt); |
| 700 | } else { |
| 701 | /* ITLB Data Array 2 */ |
| 702 | return (entry->tc << 1) | |
| 703 | (entry->sa); |
| 704 | } |
| 705 | } |
| 706 | |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 707 | void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr, |
Aurelien Jarno | 9f97309 | 2011-01-26 02:07:50 +0100 | [diff] [blame] | 708 | uint32_t mem_value) |
| 709 | { |
| 710 | int array = (addr & 0x00800000) >> 23; |
| 711 | int index = (addr & 0x00003f00) >> 8; |
| 712 | tlb_t * entry = &s->utlb[index]; |
| 713 | |
| 714 | increment_urc(s); /* per utlb access */ |
| 715 | |
| 716 | if (array == 0) { |
| 717 | /* UTLB Data Array 1 */ |
| 718 | if (entry->v) { |
| 719 | /* Overwriting valid entry in utlb. */ |
| 720 | target_ulong address = entry->vpn << 10; |
Richard Henderson | dad1c8e | 2019-03-22 19:26:42 -0700 | [diff] [blame] | 721 | tlb_flush_page(env_cpu(s), address); |
Aurelien Jarno | 9f97309 | 2011-01-26 02:07:50 +0100 | [diff] [blame] | 722 | } |
| 723 | entry->ppn = (mem_value & 0x1ffffc00) >> 10; |
| 724 | entry->v = (mem_value & 0x00000100) >> 8; |
| 725 | entry->sz = (mem_value & 0x00000080) >> 6 | |
| 726 | (mem_value & 0x00000010) >> 4; |
| 727 | entry->pr = (mem_value & 0x00000060) >> 5; |
| 728 | entry->c = (mem_value & 0x00000008) >> 3; |
| 729 | entry->d = (mem_value & 0x00000004) >> 2; |
| 730 | entry->sh = (mem_value & 0x00000002) >> 1; |
| 731 | entry->wt = (mem_value & 0x00000001); |
| 732 | } else { |
| 733 | /* UTLB Data Array 2 */ |
| 734 | entry->tc = (mem_value & 0x00000008) >> 3; |
| 735 | entry->sa = (mem_value & 0x00000007); |
| 736 | } |
| 737 | } |
| 738 | |
edgar_igl | 852d481 | 2009-04-01 23:10:46 +0000 | [diff] [blame] | 739 | int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr) |
| 740 | { |
| 741 | int n; |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 742 | int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD)); |
edgar_igl | 852d481 | 2009-04-01 23:10:46 +0000 | [diff] [blame] | 743 | |
| 744 | /* check area */ |
Aurelien Jarno | 5ed9a25 | 2015-05-25 01:28:56 +0200 | [diff] [blame] | 745 | if (env->sr & (1u << SR_MD)) { |
Veres Lajos | 67cc32e | 2015-09-08 22:45:14 +0100 | [diff] [blame] | 746 | /* For privileged mode, P2 and P4 area is not cacheable. */ |
edgar_igl | 852d481 | 2009-04-01 23:10:46 +0000 | [diff] [blame] | 747 | if ((0xA0000000 <= addr && addr < 0xC0000000) || 0xE0000000 <= addr) |
| 748 | return 0; |
| 749 | } else { |
Veres Lajos | 67cc32e | 2015-09-08 22:45:14 +0100 | [diff] [blame] | 750 | /* For user mode, only U0 area is cacheable. */ |
edgar_igl | 852d481 | 2009-04-01 23:10:46 +0000 | [diff] [blame] | 751 | if (0x80000000 <= addr) |
| 752 | return 0; |
| 753 | } |
| 754 | |
| 755 | /* |
| 756 | * TODO : Evaluate CCR and check if the cache is on or off. |
| 757 | * Now CCR is not in CPUSH4State, but in SH7750State. |
Dong Xu Wang | 4abf79a | 2011-11-22 18:06:21 +0800 | [diff] [blame] | 758 | * When you move the ccr into CPUSH4State, the code will be |
edgar_igl | 852d481 | 2009-04-01 23:10:46 +0000 | [diff] [blame] | 759 | * as follows. |
| 760 | */ |
| 761 | #if 0 |
| 762 | /* check if operand cache is enabled or not. */ |
| 763 | if (!(env->ccr & 1)) |
| 764 | return 0; |
| 765 | #endif |
| 766 | |
| 767 | /* if MMU is off, no check for TLB. */ |
| 768 | if (env->mmucr & MMUCR_AT) |
| 769 | return 1; |
| 770 | |
| 771 | /* check TLB */ |
| 772 | n = find_tlb_entry(env, addr, env->itlb, ITLB_SIZE, use_asid); |
| 773 | if (n >= 0) |
| 774 | return env->itlb[n].c; |
| 775 | |
| 776 | n = find_tlb_entry(env, addr, env->utlb, UTLB_SIZE, use_asid); |
| 777 | if (n >= 0) |
| 778 | return env->utlb[n].c; |
| 779 | |
| 780 | return 0; |
| 781 | } |
| 782 | |
pbrook | 355fb23 | 2006-06-17 19:58:25 +0000 | [diff] [blame] | 783 | #endif |
Richard Henderson | f47ede1 | 2014-09-13 09:45:23 -0700 | [diff] [blame] | 784 | |
| 785 | bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request) |
| 786 | { |
| 787 | if (interrupt_request & CPU_INTERRUPT_HARD) { |
Aurelien Jarno | 5c6f3eb | 2017-05-17 00:48:18 +0200 | [diff] [blame] | 788 | SuperHCPU *cpu = SUPERH_CPU(cs); |
| 789 | CPUSH4State *env = &cpu->env; |
| 790 | |
| 791 | /* Delay slots are indivisible, ignore interrupts */ |
| 792 | if (env->flags & DELAY_SLOT_MASK) { |
| 793 | return false; |
| 794 | } else { |
| 795 | superh_cpu_do_interrupt(cs); |
| 796 | return true; |
| 797 | } |
Richard Henderson | f47ede1 | 2014-09-13 09:45:23 -0700 | [diff] [blame] | 798 | } |
| 799 | return false; |
| 800 | } |
Richard Henderson | f98bce2 | 2019-04-02 22:18:39 +0700 | [diff] [blame] | 801 | |
| 802 | bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
| 803 | MMUAccessType access_type, int mmu_idx, |
| 804 | bool probe, uintptr_t retaddr) |
| 805 | { |
| 806 | SuperHCPU *cpu = SUPERH_CPU(cs); |
| 807 | CPUSH4State *env = &cpu->env; |
| 808 | int ret; |
| 809 | |
| 810 | #ifdef CONFIG_USER_ONLY |
| 811 | ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE : |
| 812 | access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION : |
| 813 | MMU_DTLB_VIOLATION_READ); |
| 814 | #else |
| 815 | target_ulong physical; |
| 816 | int prot, sh_access_type; |
| 817 | |
| 818 | sh_access_type = ACCESS_INT; |
| 819 | ret = get_physical_address(env, &physical, &prot, address, |
| 820 | access_type, sh_access_type); |
| 821 | |
| 822 | if (ret == MMU_OK) { |
| 823 | address &= TARGET_PAGE_MASK; |
| 824 | physical &= TARGET_PAGE_MASK; |
| 825 | tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE); |
| 826 | return true; |
| 827 | } |
| 828 | if (probe) { |
| 829 | return false; |
| 830 | } |
| 831 | |
| 832 | if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) { |
| 833 | env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK); |
| 834 | } |
| 835 | #endif |
| 836 | |
| 837 | env->tea = address; |
| 838 | switch (ret) { |
| 839 | case MMU_ITLB_MISS: |
| 840 | case MMU_DTLB_MISS_READ: |
| 841 | cs->exception_index = 0x040; |
| 842 | break; |
| 843 | case MMU_DTLB_MULTIPLE: |
| 844 | case MMU_ITLB_MULTIPLE: |
| 845 | cs->exception_index = 0x140; |
| 846 | break; |
| 847 | case MMU_ITLB_VIOLATION: |
| 848 | cs->exception_index = 0x0a0; |
| 849 | break; |
| 850 | case MMU_DTLB_MISS_WRITE: |
| 851 | cs->exception_index = 0x060; |
| 852 | break; |
| 853 | case MMU_DTLB_INITIAL_WRITE: |
| 854 | cs->exception_index = 0x080; |
| 855 | break; |
| 856 | case MMU_DTLB_VIOLATION_READ: |
| 857 | cs->exception_index = 0x0a0; |
| 858 | break; |
| 859 | case MMU_DTLB_VIOLATION_WRITE: |
| 860 | cs->exception_index = 0x0c0; |
| 861 | break; |
| 862 | case MMU_IADDR_ERROR: |
| 863 | case MMU_DADDR_ERROR_READ: |
| 864 | cs->exception_index = 0x0e0; |
| 865 | break; |
| 866 | case MMU_DADDR_ERROR_WRITE: |
| 867 | cs->exception_index = 0x100; |
| 868 | break; |
| 869 | default: |
| 870 | cpu_abort(cs, "Unhandled MMU fault"); |
| 871 | } |
| 872 | cpu_loop_exit_restore(cs, retaddr); |
| 873 | } |