Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 1 | /* |
| 2 | * QEMU RISC-V PMP (Physical Memory Protection) |
| 3 | * |
| 4 | * Author: Daire McNamara, daire.mcnamara@emdalo.com |
| 5 | * Ivan Griffin, ivan.griffin@emdalo.com |
| 6 | * |
| 7 | * This provides a RISC-V Physical Memory Protection implementation |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify it |
| 10 | * under the terms and conditions of the GNU General Public License, |
| 11 | * version 2 or later, as published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 16 | * more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License along with |
| 19 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * PMP (Physical Memory Protection) is as-of-yet unused and needs testing. |
| 24 | */ |
| 25 | |
| 26 | #include "qemu/osdep.h" |
| 27 | #include "qemu/log.h" |
| 28 | #include "qapi/error.h" |
| 29 | #include "cpu.h" |
Philippe Mathieu-Daudé | 6591efb | 2019-07-23 14:08:16 +0200 | [diff] [blame] | 30 | #include "trace.h" |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 31 | |
| 32 | static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, |
| 33 | uint8_t val); |
| 34 | static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); |
| 35 | static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); |
| 36 | |
| 37 | /* |
| 38 | * Accessor method to extract address matching type 'a field' from cfg reg |
| 39 | */ |
| 40 | static inline uint8_t pmp_get_a_field(uint8_t cfg) |
| 41 | { |
| 42 | uint8_t a = cfg >> 3; |
| 43 | return a & 0x3; |
| 44 | } |
| 45 | |
| 46 | /* |
| 47 | * Check whether a PMP is locked or not. |
| 48 | */ |
| 49 | static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) |
| 50 | { |
| 51 | |
| 52 | if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { |
| 53 | return 1; |
| 54 | } |
| 55 | |
| 56 | /* Top PMP has no 'next' to check */ |
| 57 | if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | /* In TOR mode, need to check the lock bit of the next pmp |
| 62 | * (if there is a next) |
| 63 | */ |
| 64 | const uint8_t a_field = |
| 65 | pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg); |
| 66 | if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) && |
| 67 | (PMP_AMATCH_TOR == a_field)) { |
| 68 | return 1; |
| 69 | } |
| 70 | |
| 71 | return 0; |
| 72 | } |
| 73 | |
| 74 | /* |
| 75 | * Count the number of active rules. |
| 76 | */ |
| 77 | static inline uint32_t pmp_get_num_rules(CPURISCVState *env) |
| 78 | { |
| 79 | return env->pmp_state.num_rules; |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * Accessor to get the cfg reg for a specific PMP/HART |
| 84 | */ |
| 85 | static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) |
| 86 | { |
| 87 | if (pmp_index < MAX_RISCV_PMPS) { |
| 88 | return env->pmp_state.pmp[pmp_index].cfg_reg; |
| 89 | } |
| 90 | |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | |
| 95 | /* |
| 96 | * Accessor to set the cfg reg for a specific PMP/HART |
| 97 | * Bounds checks and relevant lock bit. |
| 98 | */ |
| 99 | static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) |
| 100 | { |
| 101 | if (pmp_index < MAX_RISCV_PMPS) { |
| 102 | if (!pmp_is_locked(env, pmp_index)) { |
| 103 | env->pmp_state.pmp[pmp_index].cfg_reg = val; |
| 104 | pmp_update_rule(env, pmp_index); |
| 105 | } else { |
Alistair Francis | aad5ac2 | 2019-03-16 01:20:02 +0000 | [diff] [blame] | 106 | qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 107 | } |
| 108 | } else { |
Alistair Francis | aad5ac2 | 2019-03-16 01:20:02 +0000 | [diff] [blame] | 109 | qemu_log_mask(LOG_GUEST_ERROR, |
| 110 | "ignoring pmpcfg write - out of bounds\n"); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 111 | } |
| 112 | } |
| 113 | |
| 114 | static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) |
| 115 | { |
| 116 | /* |
| 117 | aaaa...aaa0 8-byte NAPOT range |
| 118 | aaaa...aa01 16-byte NAPOT range |
| 119 | aaaa...a011 32-byte NAPOT range |
| 120 | ... |
| 121 | aa01...1111 2^XLEN-byte NAPOT range |
| 122 | a011...1111 2^(XLEN+1)-byte NAPOT range |
| 123 | 0111...1111 2^(XLEN+2)-byte NAPOT range |
| 124 | 1111...1111 Reserved |
| 125 | */ |
| 126 | if (a == -1) { |
| 127 | *sa = 0u; |
| 128 | *ea = -1; |
| 129 | return; |
| 130 | } else { |
| 131 | target_ulong t1 = ctz64(~a); |
Anup Patel | 71a150b | 2018-12-14 19:12:32 +0000 | [diff] [blame] | 132 | target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2; |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 133 | target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1; |
| 134 | *sa = base; |
| 135 | *ea = base + range; |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | |
| 140 | /* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' |
| 141 | * end address values. |
| 142 | * This function is called relatively infrequently whereas the check that |
| 143 | * an address is within a pmp rule is called often, so optimise that one |
| 144 | */ |
| 145 | static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) |
| 146 | { |
| 147 | int i; |
| 148 | |
| 149 | env->pmp_state.num_rules = 0; |
| 150 | |
| 151 | uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; |
| 152 | target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; |
| 153 | target_ulong prev_addr = 0u; |
| 154 | target_ulong sa = 0u; |
| 155 | target_ulong ea = 0u; |
| 156 | |
| 157 | if (pmp_index >= 1u) { |
| 158 | prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; |
| 159 | } |
| 160 | |
| 161 | switch (pmp_get_a_field(this_cfg)) { |
| 162 | case PMP_AMATCH_OFF: |
| 163 | sa = 0u; |
| 164 | ea = -1; |
| 165 | break; |
| 166 | |
| 167 | case PMP_AMATCH_TOR: |
| 168 | sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ |
| 169 | ea = (this_addr << 2) - 1u; |
| 170 | break; |
| 171 | |
| 172 | case PMP_AMATCH_NA4: |
| 173 | sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ |
Alexandre Mergnat | cfad709 | 2020-07-06 10:45:50 +0200 | [diff] [blame] | 174 | ea = (sa + 4u) - 1u; |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 175 | break; |
| 176 | |
| 177 | case PMP_AMATCH_NAPOT: |
| 178 | pmp_decode_napot(this_addr, &sa, &ea); |
| 179 | break; |
| 180 | |
| 181 | default: |
| 182 | sa = 0u; |
| 183 | ea = 0u; |
| 184 | break; |
| 185 | } |
| 186 | |
| 187 | env->pmp_state.addr[pmp_index].sa = sa; |
| 188 | env->pmp_state.addr[pmp_index].ea = ea; |
| 189 | |
| 190 | for (i = 0; i < MAX_RISCV_PMPS; i++) { |
| 191 | const uint8_t a_field = |
| 192 | pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); |
| 193 | if (PMP_AMATCH_OFF != a_field) { |
| 194 | env->pmp_state.num_rules++; |
| 195 | } |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) |
| 200 | { |
| 201 | int result = 0; |
| 202 | |
| 203 | if ((addr >= env->pmp_state.addr[pmp_index].sa) |
| 204 | && (addr <= env->pmp_state.addr[pmp_index].ea)) { |
| 205 | result = 1; |
| 206 | } else { |
| 207 | result = 0; |
| 208 | } |
| 209 | |
| 210 | return result; |
| 211 | } |
| 212 | |
| 213 | |
| 214 | /* |
| 215 | * Public Interface |
| 216 | */ |
| 217 | |
| 218 | /* |
| 219 | * Check if the address has required RWX privs to complete desired operation |
| 220 | */ |
| 221 | bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, |
Hesham Almatary | cc0fdb2 | 2019-05-30 14:51:32 +0100 | [diff] [blame] | 222 | target_ulong size, pmp_priv_t privs, target_ulong mode) |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 223 | { |
| 224 | int i = 0; |
| 225 | int ret = -1; |
Dayeol Lee | 9667e53 | 2019-10-22 21:21:29 +0000 | [diff] [blame] | 226 | int pmp_size = 0; |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 227 | target_ulong s = 0; |
| 228 | target_ulong e = 0; |
| 229 | pmp_priv_t allowed_privs = 0; |
| 230 | |
| 231 | /* Short cut if no rules */ |
| 232 | if (0 == pmp_get_num_rules(env)) { |
| 233 | return true; |
| 234 | } |
| 235 | |
Dayeol Lee | 9667e53 | 2019-10-22 21:21:29 +0000 | [diff] [blame] | 236 | if (size == 0) { |
Alistair Francis | 1145188 | 2020-04-23 18:47:38 -0700 | [diff] [blame] | 237 | if (riscv_feature(env, RISCV_FEATURE_MMU)) { |
| 238 | /* |
| 239 | * If size is unknown (0), assume that all bytes |
| 240 | * from addr to the end of the page will be accessed. |
| 241 | */ |
| 242 | pmp_size = -(addr | TARGET_PAGE_MASK); |
| 243 | } else { |
| 244 | pmp_size = sizeof(target_ulong); |
| 245 | } |
Dayeol Lee | 9667e53 | 2019-10-22 21:21:29 +0000 | [diff] [blame] | 246 | } else { |
| 247 | pmp_size = size; |
| 248 | } |
| 249 | |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 250 | /* 1.10 draft priv spec states there is an implicit order |
| 251 | from low to high */ |
| 252 | for (i = 0; i < MAX_RISCV_PMPS; i++) { |
| 253 | s = pmp_is_in_range(env, i, addr); |
Dayeol Lee | 9667e53 | 2019-10-22 21:21:29 +0000 | [diff] [blame] | 254 | e = pmp_is_in_range(env, i, addr + pmp_size - 1); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 255 | |
| 256 | /* partially inside */ |
| 257 | if ((s + e) == 1) { |
Alistair Francis | aad5ac2 | 2019-03-16 01:20:02 +0000 | [diff] [blame] | 258 | qemu_log_mask(LOG_GUEST_ERROR, |
| 259 | "pmp violation - access is partially inside\n"); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 260 | ret = 0; |
| 261 | break; |
| 262 | } |
| 263 | |
| 264 | /* fully inside */ |
| 265 | const uint8_t a_field = |
| 266 | pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 267 | |
Hesham Almatary | f816206 | 2019-05-30 14:51:34 +0100 | [diff] [blame] | 268 | /* |
| 269 | * If the PMP entry is not off and the address is in range, do the priv |
| 270 | * check |
| 271 | */ |
| 272 | if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 273 | allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; |
Hesham Almatary | cc0fdb2 | 2019-05-30 14:51:32 +0100 | [diff] [blame] | 274 | if ((mode != PRV_M) || pmp_is_locked(env, i)) { |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 275 | allowed_privs &= env->pmp_state.pmp[i].cfg_reg; |
| 276 | } |
| 277 | |
| 278 | if ((privs & allowed_privs) == privs) { |
| 279 | ret = 1; |
| 280 | break; |
| 281 | } else { |
| 282 | ret = 0; |
| 283 | break; |
| 284 | } |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | /* No rule matched */ |
| 289 | if (ret == -1) { |
Hesham Almatary | cc0fdb2 | 2019-05-30 14:51:32 +0100 | [diff] [blame] | 290 | if (mode == PRV_M) { |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 291 | ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an |
| 292 | * M-Mode access, the access succeeds */ |
| 293 | } else { |
| 294 | ret = 0; /* Other modes are not allowed to succeed if they don't |
| 295 | * match a rule, but there are rules. We've checked for |
| 296 | * no rule earlier in this function. */ |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | return ret == 1 ? true : false; |
| 301 | } |
| 302 | |
| 303 | |
| 304 | /* |
| 305 | * Handle a write to a pmpcfg CSP |
| 306 | */ |
| 307 | void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, |
| 308 | target_ulong val) |
| 309 | { |
| 310 | int i; |
| 311 | uint8_t cfg_val; |
| 312 | |
Philippe Mathieu-Daudé | 6591efb | 2019-07-23 14:08:16 +0200 | [diff] [blame] | 313 | trace_pmpcfg_csr_write(env->mhartid, reg_index, val); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 314 | |
| 315 | if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { |
Alistair Francis | aad5ac2 | 2019-03-16 01:20:02 +0000 | [diff] [blame] | 316 | qemu_log_mask(LOG_GUEST_ERROR, |
| 317 | "ignoring pmpcfg write - incorrect address\n"); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 318 | return; |
| 319 | } |
| 320 | |
| 321 | for (i = 0; i < sizeof(target_ulong); i++) { |
| 322 | cfg_val = (val >> 8 * i) & 0xff; |
Hou Weiying | fdd33b8 | 2020-08-08 16:56:40 +0800 | [diff] [blame] | 323 | pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 324 | } |
| 325 | } |
| 326 | |
| 327 | |
| 328 | /* |
| 329 | * Handle a read from a pmpcfg CSP |
| 330 | */ |
| 331 | target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) |
| 332 | { |
| 333 | int i; |
| 334 | target_ulong cfg_val = 0; |
Dayeol Lee | 4a9b31b | 2018-10-26 18:04:27 +0000 | [diff] [blame] | 335 | target_ulong val = 0; |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 336 | |
| 337 | for (i = 0; i < sizeof(target_ulong); i++) { |
Hou Weiying | fdd33b8 | 2020-08-08 16:56:40 +0800 | [diff] [blame] | 338 | val = pmp_read_cfg(env, (reg_index * 4) + i); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 339 | cfg_val |= (val << (i * 8)); |
| 340 | } |
Philippe Mathieu-Daudé | 6591efb | 2019-07-23 14:08:16 +0200 | [diff] [blame] | 341 | trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 342 | |
| 343 | return cfg_val; |
| 344 | } |
| 345 | |
| 346 | |
| 347 | /* |
| 348 | * Handle a write to a pmpaddr CSP |
| 349 | */ |
| 350 | void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, |
| 351 | target_ulong val) |
| 352 | { |
Philippe Mathieu-Daudé | 6591efb | 2019-07-23 14:08:16 +0200 | [diff] [blame] | 353 | trace_pmpaddr_csr_write(env->mhartid, addr_index, val); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 354 | if (addr_index < MAX_RISCV_PMPS) { |
| 355 | if (!pmp_is_locked(env, addr_index)) { |
| 356 | env->pmp_state.pmp[addr_index].addr_reg = val; |
| 357 | pmp_update_rule(env, addr_index); |
| 358 | } else { |
Alistair Francis | aad5ac2 | 2019-03-16 01:20:02 +0000 | [diff] [blame] | 359 | qemu_log_mask(LOG_GUEST_ERROR, |
| 360 | "ignoring pmpaddr write - locked\n"); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 361 | } |
| 362 | } else { |
Alistair Francis | aad5ac2 | 2019-03-16 01:20:02 +0000 | [diff] [blame] | 363 | qemu_log_mask(LOG_GUEST_ERROR, |
| 364 | "ignoring pmpaddr write - out of bounds\n"); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 365 | } |
| 366 | } |
| 367 | |
| 368 | |
| 369 | /* |
| 370 | * Handle a read from a pmpaddr CSP |
| 371 | */ |
| 372 | target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) |
| 373 | { |
Philippe Mathieu-Daudé | 6591efb | 2019-07-23 14:08:16 +0200 | [diff] [blame] | 374 | target_ulong val = 0; |
| 375 | |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 376 | if (addr_index < MAX_RISCV_PMPS) { |
Philippe Mathieu-Daudé | 6591efb | 2019-07-23 14:08:16 +0200 | [diff] [blame] | 377 | val = env->pmp_state.pmp[addr_index].addr_reg; |
| 378 | trace_pmpaddr_csr_read(env->mhartid, addr_index, val); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 379 | } else { |
Alistair Francis | aad5ac2 | 2019-03-16 01:20:02 +0000 | [diff] [blame] | 380 | qemu_log_mask(LOG_GUEST_ERROR, |
| 381 | "ignoring pmpaddr read - out of bounds\n"); |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 382 | } |
Philippe Mathieu-Daudé | 6591efb | 2019-07-23 14:08:16 +0200 | [diff] [blame] | 383 | |
| 384 | return val; |
Michael Clark | 65c5b75 | 2018-03-03 01:31:11 +1300 | [diff] [blame] | 385 | } |
Zong Li | af3fc19 | 2020-07-28 16:26:17 +0800 | [diff] [blame] | 386 | |
| 387 | /* |
| 388 | * Calculate the TLB size if the start address or the end address of |
| 389 | * PMP entry is presented in thie TLB page. |
| 390 | */ |
| 391 | static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, |
| 392 | target_ulong tlb_sa, target_ulong tlb_ea) |
| 393 | { |
| 394 | target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa; |
| 395 | target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea; |
| 396 | |
| 397 | if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) { |
| 398 | return pmp_ea - pmp_sa + 1; |
| 399 | } |
| 400 | |
| 401 | if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) { |
| 402 | return tlb_ea - pmp_sa + 1; |
| 403 | } |
| 404 | |
| 405 | if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) { |
| 406 | return pmp_ea - tlb_sa + 1; |
| 407 | } |
| 408 | |
| 409 | return 0; |
| 410 | } |
| 411 | |
| 412 | /* |
| 413 | * Check is there a PMP entry which range covers this page. If so, |
| 414 | * try to find the minimum granularity for the TLB size. |
| 415 | */ |
| 416 | bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, |
| 417 | target_ulong *tlb_size) |
| 418 | { |
| 419 | int i; |
| 420 | target_ulong val; |
| 421 | target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1); |
| 422 | |
| 423 | for (i = 0; i < MAX_RISCV_PMPS; i++) { |
| 424 | val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea); |
| 425 | if (val) { |
| 426 | if (*tlb_size == 0 || *tlb_size > val) { |
| 427 | *tlb_size = val; |
| 428 | } |
| 429 | } |
| 430 | } |
| 431 | |
| 432 | if (*tlb_size != 0) { |
| 433 | return true; |
| 434 | } |
| 435 | |
| 436 | return false; |
| 437 | } |