Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 1 | /* |
| 2 | * ARM v8.5-MemTag Operations |
| 3 | * |
| 4 | * Copyright (c) 2020 Linaro, Ltd. |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2.1 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "qemu/osdep.h" |
Philippe Mathieu-Daudé | cd61748 | 2022-02-07 09:27:56 +0100 | [diff] [blame] | 21 | #include "qemu/log.h" |
Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 22 | #include "cpu.h" |
| 23 | #include "internals.h" |
| 24 | #include "exec/exec-all.h" |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 25 | #include "exec/ram_addr.h" |
Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 26 | #include "exec/cpu_ldst.h" |
| 27 | #include "exec/helper-proto.h" |
Philippe Mathieu-Daudé | 6eece7f | 2023-03-28 19:31:15 +0200 | [diff] [blame] | 28 | #include "hw/core/tcg-cpu-ops.h" |
Richard Henderson | d4f6dda | 2020-07-27 16:12:11 +0100 | [diff] [blame] | 29 | #include "qapi/error.h" |
| 30 | #include "qemu/guest-random.h" |
Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 31 | |
| 32 | |
| 33 | static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) |
| 34 | { |
| 35 | if (exclude == 0xffff) { |
| 36 | return 0; |
| 37 | } |
| 38 | if (offset == 0) { |
| 39 | while (exclude & (1 << tag)) { |
| 40 | tag = (tag + 1) & 15; |
| 41 | } |
| 42 | } else { |
| 43 | do { |
| 44 | do { |
| 45 | tag = (tag + 1) & 15; |
| 46 | } while (exclude & (1 << tag)); |
| 47 | } while (--offset > 0); |
| 48 | } |
| 49 | return tag; |
| 50 | } |
| 51 | |
Richard Henderson | c15294c | 2020-06-25 20:31:13 -0700 | [diff] [blame] | 52 | /** |
| 53 | * allocation_tag_mem: |
| 54 | * @env: the cpu environment |
| 55 | * @ptr_mmu_idx: the addressing regime to use for the virtual address |
| 56 | * @ptr: the virtual address for which to look up tag memory |
| 57 | * @ptr_access: the access to use for the virtual address |
| 58 | * @ptr_size: the number of bytes in the normal memory access |
| 59 | * @tag_access: the access to use for the tag memory |
| 60 | * @tag_size: the number of bytes in the tag memory access |
| 61 | * @ra: the return address for exception handling |
| 62 | * |
| 63 | * Our tag memory is formatted as a sequence of little-endian nibbles. |
| 64 | * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two |
| 65 | * tags, with the tag at [3:0] for the lower addr and the tag at [7:4] |
| 66 | * for the higher addr. |
| 67 | * |
| 68 | * Here, resolve the physical address from the virtual address, and return |
| 69 | * a pointer to the corresponding tag byte. Exit with exception if the |
| 70 | * virtual address is not accessible for @ptr_access. |
| 71 | * |
| 72 | * The @ptr_size and @tag_size values may not have an obvious relation |
| 73 | * due to the alignment of @ptr, and the number of tag checks required. |
| 74 | * |
| 75 | * If there is no tag storage corresponding to @ptr, return NULL. |
| 76 | */ |
| 77 | static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, |
| 78 | uint64_t ptr, MMUAccessType ptr_access, |
| 79 | int ptr_size, MMUAccessType tag_access, |
| 80 | int tag_size, uintptr_t ra) |
| 81 | { |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 82 | #ifdef CONFIG_USER_ONLY |
Richard Henderson | a11d383 | 2021-02-12 10:49:00 -0800 | [diff] [blame] | 83 | uint64_t clean_ptr = useronly_clean_ptr(ptr); |
| 84 | int flags = page_get_flags(clean_ptr); |
| 85 | uint8_t *tags; |
| 86 | uintptr_t index; |
| 87 | |
Richard Henderson | ff38bca | 2021-04-06 10:40:21 -0700 | [diff] [blame] | 88 | if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { |
Richard Henderson | 5e98763 | 2021-09-17 17:49:05 -0700 | [diff] [blame] | 89 | cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access, |
| 90 | !(flags & PAGE_VALID), ra); |
Richard Henderson | a11d383 | 2021-02-12 10:49:00 -0800 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | /* Require both MAP_ANON and PROT_MTE for the page. */ |
| 94 | if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) { |
| 95 | return NULL; |
| 96 | } |
| 97 | |
| 98 | tags = page_get_target_data(clean_ptr); |
Richard Henderson | a11d383 | 2021-02-12 10:49:00 -0800 | [diff] [blame] | 99 | |
| 100 | index = extract32(ptr, LOG2_TAG_GRANULE + 1, |
| 101 | TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1); |
| 102 | return tags + index; |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 103 | #else |
Richard Henderson | 25d3ec5 | 2022-08-19 14:20:37 -0700 | [diff] [blame] | 104 | CPUTLBEntryFull *full; |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 105 | MemTxAttrs attrs; |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 106 | int in_page, flags; |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 107 | hwaddr ptr_paddr, tag_paddr, xlat; |
| 108 | MemoryRegion *mr; |
| 109 | ARMASIdx tag_asi; |
| 110 | AddressSpace *tag_as; |
| 111 | void *host; |
| 112 | |
| 113 | /* |
| 114 | * Probe the first byte of the virtual address. This raises an |
| 115 | * exception for inaccessible pages, and resolves the virtual address |
| 116 | * into the softmmu tlb. |
| 117 | * |
Richard Henderson | d304d28 | 2021-04-16 11:31:04 -0700 | [diff] [blame] | 118 | * When RA == 0, this is for mte_probe. The page is expected to be |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 119 | * valid. Indicate to probe_access_flags no-fault, then assert that |
| 120 | * we received a valid page. |
| 121 | */ |
Richard Henderson | d507e6c | 2023-02-23 14:44:14 -1000 | [diff] [blame] | 122 | flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx, |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 123 | ra == 0, &host, &full, ra); |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 124 | assert(!(flags & TLB_INVALID_MASK)); |
| 125 | |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 126 | /* If the virtual page MemAttr != Tagged, access unchecked. */ |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 127 | if (full->pte_attrs != 0xf0) { |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 128 | return NULL; |
| 129 | } |
| 130 | |
| 131 | /* |
| 132 | * If not backed by host ram, there is no tag storage: access unchecked. |
| 133 | * This is probably a guest os bug though, so log it. |
| 134 | */ |
| 135 | if (unlikely(flags & TLB_MMIO)) { |
| 136 | qemu_log_mask(LOG_GUEST_ERROR, |
| 137 | "Page @ 0x%" PRIx64 " indicates Tagged Normal memory " |
| 138 | "but is not backed by host ram\n", ptr); |
| 139 | return NULL; |
| 140 | } |
| 141 | |
| 142 | /* |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 143 | * Remember these values across the second lookup below, |
| 144 | * which may invalidate this pointer via tlb resize. |
| 145 | */ |
Richard Henderson | 28fb921 | 2023-01-13 17:12:13 -1000 | [diff] [blame] | 146 | ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK); |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 147 | attrs = full->attrs; |
| 148 | full = NULL; |
| 149 | |
| 150 | /* |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 151 | * The Normal memory access can extend to the next page. E.g. a single |
| 152 | * 8-byte access to the last byte of a page will check only the last |
| 153 | * tag on the first page. |
| 154 | * Any page access exception has priority over tag check exception. |
| 155 | */ |
| 156 | in_page = -(ptr | TARGET_PAGE_MASK); |
| 157 | if (unlikely(ptr_size > in_page)) { |
Richard Henderson | d507e6c | 2023-02-23 14:44:14 -1000 | [diff] [blame] | 158 | flags |= probe_access_full(env, ptr + in_page, 0, ptr_access, |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 159 | ptr_mmu_idx, ra == 0, &host, &full, ra); |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 160 | assert(!(flags & TLB_INVALID_MASK)); |
| 161 | } |
| 162 | |
| 163 | /* Any debug exception has priority over a tag check exception. */ |
| 164 | if (unlikely(flags & TLB_WATCHPOINT)) { |
| 165 | int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; |
| 166 | assert(ra != 0); |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 167 | cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra); |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 168 | } |
| 169 | |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 170 | /* Convert to the physical address in tag space. */ |
| 171 | tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1); |
| 172 | |
| 173 | /* Look up the address in tag space. */ |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 174 | tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 175 | tag_as = cpu_get_address_space(env_cpu(env), tag_asi); |
| 176 | mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL, |
Richard Henderson | b8967dd | 2022-10-10 20:18:49 -0700 | [diff] [blame] | 177 | tag_access == MMU_DATA_STORE, attrs); |
Richard Henderson | e4d5bf4 | 2020-06-25 20:31:42 -0700 | [diff] [blame] | 178 | |
| 179 | /* |
| 180 | * Note that @mr will never be NULL. If there is nothing in the address |
| 181 | * space at @tag_paddr, the translation will return the unallocated memory |
| 182 | * region. For our purposes, the result must be ram. |
| 183 | */ |
| 184 | if (unlikely(!memory_region_is_ram(mr))) { |
| 185 | /* ??? Failure is a board configuration error. */ |
| 186 | qemu_log_mask(LOG_UNIMP, |
| 187 | "Tag Memory @ 0x%" HWADDR_PRIx " not found for " |
| 188 | "Normal Memory @ 0x%" HWADDR_PRIx "\n", |
| 189 | tag_paddr, ptr_paddr); |
| 190 | return NULL; |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * Ensure the tag memory is dirty on write, for migration. |
| 195 | * Tag memory can never contain code or display memory (vga). |
| 196 | */ |
| 197 | if (tag_access == MMU_DATA_STORE) { |
| 198 | ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat; |
| 199 | cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); |
| 200 | } |
| 201 | |
| 202 | return memory_region_get_ram_ptr(mr) + xlat; |
| 203 | #endif |
Richard Henderson | c15294c | 2020-06-25 20:31:13 -0700 | [diff] [blame] | 204 | } |
| 205 | |
Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 206 | uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm) |
| 207 | { |
Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 208 | uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16); |
Richard Henderson | d4f6dda | 2020-07-27 16:12:11 +0100 | [diff] [blame] | 209 | int rrnd = extract32(env->cp15.gcr_el1, 16, 1); |
Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 210 | int start = extract32(env->cp15.rgsr_el1, 0, 4); |
| 211 | int seed = extract32(env->cp15.rgsr_el1, 8, 16); |
Richard Henderson | d4f6dda | 2020-07-27 16:12:11 +0100 | [diff] [blame] | 212 | int offset, i, rtag; |
| 213 | |
| 214 | /* |
| 215 | * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the |
| 216 | * deterministic algorithm. Except that with RRND==1 the kernel is |
| 217 | * not required to have set RGSR_EL1.SEED != 0, which is required for |
| 218 | * the deterministic algorithm to function. So we force a non-zero |
| 219 | * SEED for that case. |
| 220 | */ |
| 221 | if (unlikely(seed == 0) && rrnd) { |
| 222 | do { |
| 223 | Error *err = NULL; |
| 224 | uint16_t two; |
| 225 | |
| 226 | if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) { |
| 227 | /* |
| 228 | * Failed, for unknown reasons in the crypto subsystem. |
| 229 | * Best we can do is log the reason and use a constant seed. |
| 230 | */ |
| 231 | qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n", |
| 232 | error_get_pretty(err)); |
| 233 | error_free(err); |
| 234 | two = 1; |
| 235 | } |
| 236 | seed = two; |
| 237 | } while (seed == 0); |
| 238 | } |
Richard Henderson | da54941 | 2020-06-25 20:31:07 -0700 | [diff] [blame] | 239 | |
| 240 | /* RandomTag */ |
| 241 | for (i = offset = 0; i < 4; ++i) { |
| 242 | /* NextRandomTagBit */ |
| 243 | int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^ |
| 244 | extract32(seed, 2, 1) ^ extract32(seed, 0, 1)); |
| 245 | seed = (top << 15) | (seed >> 1); |
| 246 | offset |= top << i; |
| 247 | } |
| 248 | rtag = choose_nonexcluded_tag(start, offset, exclude); |
| 249 | env->cp15.rgsr_el1 = rtag | (seed << 8); |
| 250 | |
| 251 | return address_with_allocation_tag(rn, rtag); |
| 252 | } |
Richard Henderson | efbc78a | 2020-06-25 20:31:09 -0700 | [diff] [blame] | 253 | |
| 254 | uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr, |
| 255 | int32_t offset, uint32_t tag_offset) |
| 256 | { |
| 257 | int start_tag = allocation_tag_from_addr(ptr); |
| 258 | uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16); |
| 259 | int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude); |
| 260 | |
| 261 | return address_with_allocation_tag(ptr + offset, rtag); |
| 262 | } |
Richard Henderson | c15294c | 2020-06-25 20:31:13 -0700 | [diff] [blame] | 263 | |
| 264 | static int load_tag1(uint64_t ptr, uint8_t *mem) |
| 265 | { |
| 266 | int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; |
| 267 | return extract32(*mem, ofs, 4); |
| 268 | } |
| 269 | |
| 270 | uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt) |
| 271 | { |
| 272 | int mmu_idx = cpu_mmu_index(env, false); |
| 273 | uint8_t *mem; |
| 274 | int rtag = 0; |
| 275 | |
| 276 | /* Trap if accessing an invalid page. */ |
| 277 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1, |
| 278 | MMU_DATA_LOAD, 1, GETPC()); |
| 279 | |
| 280 | /* Load if page supports tags. */ |
| 281 | if (mem) { |
| 282 | rtag = load_tag1(ptr, mem); |
| 283 | } |
| 284 | |
| 285 | return address_with_allocation_tag(xt, rtag); |
| 286 | } |
| 287 | |
| 288 | static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra) |
| 289 | { |
| 290 | if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) { |
| 291 | arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE, |
| 292 | cpu_mmu_index(env, false), ra); |
| 293 | g_assert_not_reached(); |
| 294 | } |
| 295 | } |
| 296 | |
| 297 | /* For use in a non-parallel context, store to the given nibble. */ |
| 298 | static void store_tag1(uint64_t ptr, uint8_t *mem, int tag) |
| 299 | { |
| 300 | int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; |
| 301 | *mem = deposit32(*mem, ofs, 4, tag); |
| 302 | } |
| 303 | |
| 304 | /* For use in a parallel context, atomically store to the given nibble. */ |
| 305 | static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag) |
| 306 | { |
| 307 | int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 308 | uint8_t old = qatomic_read(mem); |
Richard Henderson | c15294c | 2020-06-25 20:31:13 -0700 | [diff] [blame] | 309 | |
| 310 | while (1) { |
| 311 | uint8_t new = deposit32(old, ofs, 4, tag); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 312 | uint8_t cmp = qatomic_cmpxchg(mem, old, new); |
Richard Henderson | c15294c | 2020-06-25 20:31:13 -0700 | [diff] [blame] | 313 | if (likely(cmp == old)) { |
| 314 | return; |
| 315 | } |
| 316 | old = cmp; |
| 317 | } |
| 318 | } |
| 319 | |
| 320 | typedef void stg_store1(uint64_t, uint8_t *, int); |
| 321 | |
| 322 | static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt, |
| 323 | uintptr_t ra, stg_store1 store1) |
| 324 | { |
| 325 | int mmu_idx = cpu_mmu_index(env, false); |
| 326 | uint8_t *mem; |
| 327 | |
| 328 | check_tag_aligned(env, ptr, ra); |
| 329 | |
| 330 | /* Trap if accessing an invalid page. */ |
| 331 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE, |
| 332 | MMU_DATA_STORE, 1, ra); |
| 333 | |
| 334 | /* Store if page supports tags. */ |
| 335 | if (mem) { |
| 336 | store1(ptr, mem, allocation_tag_from_addr(xt)); |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt) |
| 341 | { |
| 342 | do_stg(env, ptr, xt, GETPC(), store_tag1); |
| 343 | } |
| 344 | |
| 345 | void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) |
| 346 | { |
| 347 | do_stg(env, ptr, xt, GETPC(), store_tag1_parallel); |
| 348 | } |
| 349 | |
| 350 | void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr) |
| 351 | { |
| 352 | int mmu_idx = cpu_mmu_index(env, false); |
| 353 | uintptr_t ra = GETPC(); |
| 354 | |
| 355 | check_tag_aligned(env, ptr, ra); |
| 356 | probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); |
| 357 | } |
| 358 | |
| 359 | static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, |
| 360 | uintptr_t ra, stg_store1 store1) |
| 361 | { |
| 362 | int mmu_idx = cpu_mmu_index(env, false); |
| 363 | int tag = allocation_tag_from_addr(xt); |
| 364 | uint8_t *mem1, *mem2; |
| 365 | |
| 366 | check_tag_aligned(env, ptr, ra); |
| 367 | |
| 368 | /* |
| 369 | * Trap if accessing an invalid page(s). |
| 370 | * This takes priority over !allocation_tag_access_enabled. |
| 371 | */ |
| 372 | if (ptr & TAG_GRANULE) { |
| 373 | /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */ |
| 374 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, |
| 375 | TAG_GRANULE, MMU_DATA_STORE, 1, ra); |
| 376 | mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE, |
| 377 | MMU_DATA_STORE, TAG_GRANULE, |
| 378 | MMU_DATA_STORE, 1, ra); |
| 379 | |
| 380 | /* Store if page(s) support tags. */ |
| 381 | if (mem1) { |
| 382 | store1(TAG_GRANULE, mem1, tag); |
| 383 | } |
| 384 | if (mem2) { |
| 385 | store1(0, mem2, tag); |
| 386 | } |
| 387 | } else { |
| 388 | /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */ |
| 389 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, |
| 390 | 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra); |
| 391 | if (mem1) { |
| 392 | tag |= tag << 4; |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 393 | qatomic_set(mem1, tag); |
Richard Henderson | c15294c | 2020-06-25 20:31:13 -0700 | [diff] [blame] | 394 | } |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt) |
| 399 | { |
| 400 | do_st2g(env, ptr, xt, GETPC(), store_tag1); |
| 401 | } |
| 402 | |
| 403 | void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) |
| 404 | { |
| 405 | do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel); |
| 406 | } |
| 407 | |
| 408 | void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr) |
| 409 | { |
| 410 | int mmu_idx = cpu_mmu_index(env, false); |
| 411 | uintptr_t ra = GETPC(); |
| 412 | int in_page = -(ptr | TARGET_PAGE_MASK); |
| 413 | |
| 414 | check_tag_aligned(env, ptr, ra); |
| 415 | |
| 416 | if (likely(in_page >= 2 * TAG_GRANULE)) { |
| 417 | probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra); |
| 418 | } else { |
| 419 | probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); |
| 420 | probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra); |
| 421 | } |
| 422 | } |
Richard Henderson | 5f716a8 | 2020-06-25 20:31:17 -0700 | [diff] [blame] | 423 | |
| 424 | #define LDGM_STGM_SIZE (4 << GMID_EL1_BS) |
| 425 | |
| 426 | uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) |
| 427 | { |
| 428 | int mmu_idx = cpu_mmu_index(env, false); |
| 429 | uintptr_t ra = GETPC(); |
| 430 | void *tag_mem; |
| 431 | |
| 432 | ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE); |
| 433 | |
| 434 | /* Trap if accessing an invalid page. */ |
| 435 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, |
| 436 | LDGM_STGM_SIZE, MMU_DATA_LOAD, |
| 437 | LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra); |
| 438 | |
| 439 | /* The tag is squashed to zero if the page does not support tags. */ |
| 440 | if (!tag_mem) { |
| 441 | return 0; |
| 442 | } |
| 443 | |
| 444 | QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6); |
| 445 | /* |
| 446 | * We are loading 64-bits worth of tags. The ordering of elements |
| 447 | * within the word corresponds to a 64-bit little-endian operation. |
| 448 | */ |
| 449 | return ldq_le_p(tag_mem); |
| 450 | } |
| 451 | |
| 452 | void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) |
| 453 | { |
| 454 | int mmu_idx = cpu_mmu_index(env, false); |
| 455 | uintptr_t ra = GETPC(); |
| 456 | void *tag_mem; |
| 457 | |
| 458 | ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE); |
| 459 | |
| 460 | /* Trap if accessing an invalid page. */ |
| 461 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, |
| 462 | LDGM_STGM_SIZE, MMU_DATA_LOAD, |
| 463 | LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra); |
| 464 | |
| 465 | /* |
| 466 | * Tag store only happens if the page support tags, |
| 467 | * and if the OS has enabled access to the tags. |
| 468 | */ |
| 469 | if (!tag_mem) { |
| 470 | return; |
| 471 | } |
| 472 | |
| 473 | QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6); |
| 474 | /* |
| 475 | * We are storing 64-bits worth of tags. The ordering of elements |
| 476 | * within the word corresponds to a 64-bit little-endian operation. |
| 477 | */ |
| 478 | stq_le_p(tag_mem, val); |
| 479 | } |
| 480 | |
| 481 | void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) |
| 482 | { |
| 483 | uintptr_t ra = GETPC(); |
| 484 | int mmu_idx = cpu_mmu_index(env, false); |
| 485 | int log2_dcz_bytes, log2_tag_bytes; |
| 486 | intptr_t dcz_bytes, tag_bytes; |
| 487 | uint8_t *mem; |
| 488 | |
| 489 | /* |
| 490 | * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1, |
| 491 | * i.e. 32 bytes, which is an unreasonably small dcz anyway, |
| 492 | * to make sure that we can access one complete tag byte here. |
| 493 | */ |
| 494 | log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; |
| 495 | log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); |
| 496 | dcz_bytes = (intptr_t)1 << log2_dcz_bytes; |
| 497 | tag_bytes = (intptr_t)1 << log2_tag_bytes; |
| 498 | ptr &= -dcz_bytes; |
| 499 | |
| 500 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes, |
| 501 | MMU_DATA_STORE, tag_bytes, ra); |
| 502 | if (mem) { |
| 503 | int tag_pair = (val & 0xf) * 0x11; |
| 504 | memset(mem, tag_pair, tag_bytes); |
| 505 | } |
| 506 | } |
Richard Henderson | 0a405be | 2020-06-25 20:31:21 -0700 | [diff] [blame] | 507 | |
Peter Collingbourne | 86f0d4c | 2021-06-16 12:56:14 -0700 | [diff] [blame] | 508 | static void mte_sync_check_fail(CPUARMState *env, uint32_t desc, |
| 509 | uint64_t dirty_ptr, uintptr_t ra) |
| 510 | { |
| 511 | int is_write, syn; |
| 512 | |
| 513 | env->exception.vaddress = dirty_ptr; |
| 514 | |
| 515 | is_write = FIELD_EX32(desc, MTEDESC, WRITE); |
| 516 | syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write, |
| 517 | 0x11); |
| 518 | raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra); |
| 519 | g_assert_not_reached(); |
| 520 | } |
| 521 | |
| 522 | static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr, |
| 523 | uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el) |
| 524 | { |
| 525 | int select; |
| 526 | |
| 527 | if (regime_has_2_ranges(arm_mmu_idx)) { |
| 528 | select = extract64(dirty_ptr, 55, 1); |
| 529 | } else { |
| 530 | select = 0; |
| 531 | } |
| 532 | env->cp15.tfsr_el[el] |= 1 << select; |
| 533 | #ifdef CONFIG_USER_ONLY |
| 534 | /* |
| 535 | * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT, |
| 536 | * which then sends a SIGSEGV when the thread is next scheduled. |
| 537 | * This cpu will return to the main loop at the end of the TB, |
| 538 | * which is rather sooner than "normal". But the alternative |
| 539 | * is waiting until the next syscall. |
| 540 | */ |
| 541 | qemu_cpu_kick(env_cpu(env)); |
| 542 | #endif |
| 543 | } |
| 544 | |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 545 | /* Record a tag check failure. */ |
Richard Henderson | dbf8c32 | 2020-08-28 10:02:44 +0100 | [diff] [blame] | 546 | static void mte_check_fail(CPUARMState *env, uint32_t desc, |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 547 | uint64_t dirty_ptr, uintptr_t ra) |
| 548 | { |
Richard Henderson | dbf8c32 | 2020-08-28 10:02:44 +0100 | [diff] [blame] | 549 | int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 550 | ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx); |
Peter Collingbourne | 86f0d4c | 2021-06-16 12:56:14 -0700 | [diff] [blame] | 551 | int el, reg_el, tcf; |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 552 | uint64_t sctlr; |
| 553 | |
| 554 | reg_el = regime_el(env, arm_mmu_idx); |
| 555 | sctlr = env->cp15.sctlr_el[reg_el]; |
| 556 | |
Peter Collingbourne | 2d928ad | 2021-02-19 12:18:20 -0800 | [diff] [blame] | 557 | switch (arm_mmu_idx) { |
| 558 | case ARMMMUIdx_E10_0: |
| 559 | case ARMMMUIdx_E20_0: |
| 560 | el = 0; |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 561 | tcf = extract64(sctlr, 38, 2); |
Peter Collingbourne | 2d928ad | 2021-02-19 12:18:20 -0800 | [diff] [blame] | 562 | break; |
| 563 | default: |
| 564 | el = reg_el; |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 565 | tcf = extract64(sctlr, 40, 2); |
| 566 | } |
| 567 | |
| 568 | switch (tcf) { |
| 569 | case 1: |
Jamie Iles | 5bf100c | 2021-05-26 13:18:46 +0100 | [diff] [blame] | 570 | /* Tag check fail causes a synchronous exception. */ |
Peter Collingbourne | 86f0d4c | 2021-06-16 12:56:14 -0700 | [diff] [blame] | 571 | mte_sync_check_fail(env, desc, dirty_ptr, ra); |
| 572 | break; |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 573 | |
| 574 | case 0: |
| 575 | /* |
| 576 | * Tag check fail does not affect the PE. |
| 577 | * We eliminate this case by not setting MTE_ACTIVE |
| 578 | * in tb_flags, so that we never make this runtime call. |
| 579 | */ |
| 580 | g_assert_not_reached(); |
| 581 | |
| 582 | case 2: |
| 583 | /* Tag check fail causes asynchronous flag set. */ |
Peter Collingbourne | 86f0d4c | 2021-06-16 12:56:14 -0700 | [diff] [blame] | 584 | mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el); |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 585 | break; |
| 586 | |
Peter Collingbourne | 86f0d4c | 2021-06-16 12:56:14 -0700 | [diff] [blame] | 587 | case 3: |
| 588 | /* |
| 589 | * Tag check fail causes asynchronous flag set for stores, or |
| 590 | * a synchronous exception for loads. |
| 591 | */ |
| 592 | if (FIELD_EX32(desc, MTEDESC, WRITE)) { |
| 593 | mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el); |
| 594 | } else { |
| 595 | mte_sync_check_fail(env, desc, dirty_ptr, ra); |
| 596 | } |
Richard Henderson | 2e34ff4 | 2020-06-25 20:31:23 -0700 | [diff] [blame] | 597 | break; |
| 598 | } |
| 599 | } |
| 600 | |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 601 | /** |
| 602 | * checkN: |
| 603 | * @tag: tag memory to test |
| 604 | * @odd: true to begin testing at tags at odd nibble |
| 605 | * @cmp: the tag to compare against |
| 606 | * @count: number of tags to test |
| 607 | * |
| 608 | * Return the number of successful tests. |
| 609 | * Thus a return value < @count indicates a failure. |
| 610 | * |
| 611 | * A note about sizes: count is expected to be small. |
| 612 | * |
| 613 | * The most common use will be LDP/STP of two integer registers, |
| 614 | * which means 16 bytes of memory touching at most 2 tags, but |
| 615 | * often the access is aligned and thus just 1 tag. |
| 616 | * |
| 617 | * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory, |
| 618 | * touching at most 5 tags. SVE LDR/STR (vector) with the default |
| 619 | * vector length is also 64 bytes; the maximum architectural length |
| 620 | * is 256 bytes touching at most 9 tags. |
| 621 | * |
| 622 | * The loop below uses 7 logical operations and 1 memory operation |
| 623 | * per tag pair. An implementation that loads an aligned word and |
| 624 | * uses masking to ignore adjacent tags requires 18 logical operations |
| 625 | * and thus does not begin to pay off until 6 tags. |
| 626 | * Which, according to the survey above, is unlikely to be common. |
| 627 | */ |
| 628 | static int checkN(uint8_t *mem, int odd, int cmp, int count) |
| 629 | { |
| 630 | int n = 0, diff; |
| 631 | |
| 632 | /* Replicate the test tag and compare. */ |
| 633 | cmp *= 0x11; |
| 634 | diff = *mem++ ^ cmp; |
| 635 | |
| 636 | if (odd) { |
| 637 | goto start_odd; |
| 638 | } |
| 639 | |
| 640 | while (1) { |
| 641 | /* Test even tag. */ |
| 642 | if (unlikely((diff) & 0x0f)) { |
| 643 | break; |
| 644 | } |
| 645 | if (++n == count) { |
| 646 | break; |
| 647 | } |
| 648 | |
| 649 | start_odd: |
| 650 | /* Test odd tag. */ |
| 651 | if (unlikely((diff) & 0xf0)) { |
| 652 | break; |
| 653 | } |
| 654 | if (++n == count) { |
| 655 | break; |
| 656 | } |
| 657 | |
| 658 | diff = *mem++ ^ cmp; |
| 659 | } |
| 660 | return n; |
| 661 | } |
| 662 | |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 663 | /** |
| 664 | * mte_probe_int() - helper for mte_probe and mte_check |
| 665 | * @env: CPU environment |
| 666 | * @desc: MTEDESC descriptor |
| 667 | * @ptr: virtual address of the base of the access |
| 668 | * @fault: return virtual address of the first check failure |
| 669 | * |
| 670 | * Internal routine for both mte_probe and mte_check. |
| 671 | * Return zero on failure, filling in *fault. |
| 672 | * Return negative on trivial success for tbi disabled. |
| 673 | * Return positive on success with tbi enabled. |
| 674 | */ |
| 675 | static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, |
Richard Henderson | 28f3250 | 2021-04-16 11:31:02 -0700 | [diff] [blame] | 676 | uintptr_t ra, uint64_t *fault) |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 677 | { |
| 678 | int mmu_idx, ptr_tag, bit55; |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 679 | uint64_t ptr_last, prev_page, next_page; |
| 680 | uint64_t tag_first, tag_last; |
| 681 | uint64_t tag_byte_first, tag_byte_last; |
Richard Henderson | 28f3250 | 2021-04-16 11:31:02 -0700 | [diff] [blame] | 682 | uint32_t sizem1, tag_count, tag_size, n, c; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 683 | uint8_t *mem1, *mem2; |
| 684 | MMUAccessType type; |
| 685 | |
| 686 | bit55 = extract64(ptr, 55, 1); |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 687 | *fault = ptr; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 688 | |
| 689 | /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ |
| 690 | if (unlikely(!tbi_check(desc, bit55))) { |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 691 | return -1; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 692 | } |
| 693 | |
| 694 | ptr_tag = allocation_tag_from_addr(ptr); |
| 695 | |
| 696 | if (tcma_check(desc, bit55, ptr_tag)) { |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 697 | return 1; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 698 | } |
| 699 | |
| 700 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); |
| 701 | type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; |
Richard Henderson | 28f3250 | 2021-04-16 11:31:02 -0700 | [diff] [blame] | 702 | sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1); |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 703 | |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 704 | /* Find the addr of the end of the access */ |
Richard Henderson | 28f3250 | 2021-04-16 11:31:02 -0700 | [diff] [blame] | 705 | ptr_last = ptr + sizem1; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 706 | |
| 707 | /* Round the bounds to the tag granule, and compute the number of tags. */ |
| 708 | tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 709 | tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE); |
| 710 | tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 711 | |
| 712 | /* Round the bounds to twice the tag granule, and compute the bytes. */ |
| 713 | tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE); |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 714 | tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE); |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 715 | |
| 716 | /* Locate the page boundaries. */ |
| 717 | prev_page = ptr & TARGET_PAGE_MASK; |
| 718 | next_page = prev_page + TARGET_PAGE_SIZE; |
| 719 | |
Richard Henderson | d3327a3 | 2021-06-12 12:57:07 -0700 | [diff] [blame] | 720 | if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) { |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 721 | /* Memory access stays on one page. */ |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 722 | tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1; |
Richard Henderson | 28f3250 | 2021-04-16 11:31:02 -0700 | [diff] [blame] | 723 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1, |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 724 | MMU_DATA_LOAD, tag_size, ra); |
| 725 | if (!mem1) { |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 726 | return 1; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 727 | } |
| 728 | /* Perform all of the comparisons. */ |
| 729 | n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count); |
| 730 | } else { |
| 731 | /* Memory access crosses to next page. */ |
| 732 | tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE); |
| 733 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr, |
| 734 | MMU_DATA_LOAD, tag_size, ra); |
| 735 | |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 736 | tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 737 | mem2 = allocation_tag_mem(env, mmu_idx, next_page, type, |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 738 | ptr_last - next_page + 1, |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 739 | MMU_DATA_LOAD, tag_size, ra); |
| 740 | |
| 741 | /* |
| 742 | * Perform all of the comparisons. |
| 743 | * Note the possible but unlikely case of the operation spanning |
| 744 | * two pages that do not both have tagging enabled. |
| 745 | */ |
| 746 | n = c = (next_page - tag_first) / TAG_GRANULE; |
| 747 | if (mem1) { |
| 748 | n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c); |
| 749 | } |
| 750 | if (n == c) { |
| 751 | if (!mem2) { |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 752 | return 1; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 753 | } |
| 754 | n += checkN(mem2, 0, ptr_tag, tag_count - c); |
| 755 | } |
| 756 | } |
| 757 | |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 758 | if (likely(n == tag_count)) { |
| 759 | return 1; |
| 760 | } |
| 761 | |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 762 | /* |
Richard Henderson | 98f9605 | 2021-04-16 11:30:58 -0700 | [diff] [blame] | 763 | * If we failed, we know which granule. For the first granule, the |
| 764 | * failure address is @ptr, the first byte accessed. Otherwise the |
| 765 | * failure address is the first byte of the nth granule. |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 766 | */ |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 767 | if (n > 0) { |
| 768 | *fault = tag_first + n * TAG_GRANULE; |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 769 | } |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 770 | return 0; |
| 771 | } |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 772 | |
Richard Henderson | bd47b61 | 2021-04-16 11:31:03 -0700 | [diff] [blame] | 773 | uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra) |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 774 | { |
| 775 | uint64_t fault; |
Richard Henderson | 28f3250 | 2021-04-16 11:31:02 -0700 | [diff] [blame] | 776 | int ret = mte_probe_int(env, desc, ptr, ra, &fault); |
Richard Henderson | f8c8a86 | 2021-04-16 11:30:59 -0700 | [diff] [blame] | 777 | |
| 778 | if (unlikely(ret == 0)) { |
| 779 | mte_check_fail(env, desc, fault, ra); |
| 780 | } else if (ret < 0) { |
| 781 | return ptr; |
| 782 | } |
Richard Henderson | 5add824 | 2020-06-25 20:31:24 -0700 | [diff] [blame] | 783 | return useronly_clean_ptr(ptr); |
| 784 | } |
| 785 | |
Richard Henderson | bd47b61 | 2021-04-16 11:31:03 -0700 | [diff] [blame] | 786 | uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr) |
Richard Henderson | 73ceeb0 | 2020-06-25 20:31:22 -0700 | [diff] [blame] | 787 | { |
Richard Henderson | 523da6b | 2023-06-06 10:19:38 +0100 | [diff] [blame] | 788 | /* |
| 789 | * R_XCHFJ: Alignment check not caused by memory type is priority 1, |
| 790 | * higher than any translation fault. When MTE is disabled, tcg |
| 791 | * performs the alignment check during the code generated for the |
| 792 | * memory access. With MTE enabled, we must check this here before |
| 793 | * raising any translation fault in allocation_tag_mem. |
| 794 | */ |
| 795 | unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN); |
| 796 | if (unlikely(align)) { |
| 797 | align = (1u << align) - 1; |
| 798 | if (unlikely(ptr & align)) { |
| 799 | int idx = FIELD_EX32(desc, MTEDESC, MIDX); |
| 800 | bool w = FIELD_EX32(desc, MTEDESC, WRITE); |
| 801 | MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD; |
| 802 | arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC()); |
| 803 | } |
| 804 | } |
| 805 | |
Richard Henderson | bd47b61 | 2021-04-16 11:31:03 -0700 | [diff] [blame] | 806 | return mte_check(env, desc, ptr, GETPC()); |
Richard Henderson | 4a09a21 | 2021-04-16 11:31:00 -0700 | [diff] [blame] | 807 | } |
| 808 | |
| 809 | /* |
Richard Henderson | d304d28 | 2021-04-16 11:31:04 -0700 | [diff] [blame] | 810 | * No-fault version of mte_check, to be used by SVE for MemSingleNF. |
Richard Henderson | 4a09a21 | 2021-04-16 11:31:00 -0700 | [diff] [blame] | 811 | * Returns false if the access is Checked and the check failed. This |
| 812 | * is only intended to probe the tag -- the validity of the page must |
| 813 | * be checked beforehand. |
| 814 | */ |
Richard Henderson | d304d28 | 2021-04-16 11:31:04 -0700 | [diff] [blame] | 815 | bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr) |
Richard Henderson | 4a09a21 | 2021-04-16 11:31:00 -0700 | [diff] [blame] | 816 | { |
| 817 | uint64_t fault; |
Richard Henderson | 28f3250 | 2021-04-16 11:31:02 -0700 | [diff] [blame] | 818 | int ret = mte_probe_int(env, desc, ptr, 0, &fault); |
Richard Henderson | 4a09a21 | 2021-04-16 11:31:00 -0700 | [diff] [blame] | 819 | |
| 820 | return ret != 0; |
| 821 | } |
| 822 | |
Richard Henderson | 46dc1bc | 2020-06-25 20:31:25 -0700 | [diff] [blame] | 823 | /* |
| 824 | * Perform an MTE checked access for DC_ZVA. |
| 825 | */ |
| 826 | uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) |
| 827 | { |
| 828 | uintptr_t ra = GETPC(); |
| 829 | int log2_dcz_bytes, log2_tag_bytes; |
| 830 | int mmu_idx, bit55; |
| 831 | intptr_t dcz_bytes, tag_bytes, i; |
| 832 | void *mem; |
| 833 | uint64_t ptr_tag, mem_tag, align_ptr; |
| 834 | |
| 835 | bit55 = extract64(ptr, 55, 1); |
| 836 | |
| 837 | /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ |
| 838 | if (unlikely(!tbi_check(desc, bit55))) { |
| 839 | return ptr; |
| 840 | } |
| 841 | |
| 842 | ptr_tag = allocation_tag_from_addr(ptr); |
| 843 | |
| 844 | if (tcma_check(desc, bit55, ptr_tag)) { |
| 845 | goto done; |
| 846 | } |
| 847 | |
| 848 | /* |
| 849 | * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1, |
| 850 | * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make |
| 851 | * sure that we can access one complete tag byte here. |
| 852 | */ |
| 853 | log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; |
| 854 | log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); |
| 855 | dcz_bytes = (intptr_t)1 << log2_dcz_bytes; |
| 856 | tag_bytes = (intptr_t)1 << log2_tag_bytes; |
| 857 | align_ptr = ptr & -dcz_bytes; |
| 858 | |
| 859 | /* |
| 860 | * Trap if accessing an invalid page. DC_ZVA requires that we supply |
| 861 | * the original pointer for an invalid page. But watchpoints require |
| 862 | * that we probe the actual space. So do both. |
| 863 | */ |
| 864 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); |
| 865 | (void) probe_write(env, ptr, 1, mmu_idx, ra); |
| 866 | mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE, |
| 867 | dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra); |
| 868 | if (!mem) { |
| 869 | goto done; |
| 870 | } |
| 871 | |
| 872 | /* |
| 873 | * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus |
| 874 | * it is quite easy to perform all of the comparisons at once without |
| 875 | * any extra masking. |
| 876 | * |
| 877 | * The most common zva block size is 64; some of the thunderx cpus use |
| 878 | * a block size of 128. For user-only, aarch64_max_initfn will set the |
| 879 | * block size to 512. Fill out the other cases for future-proofing. |
| 880 | * |
| 881 | * In order to be able to find the first miscompare later, we want the |
| 882 | * tag bytes to be in little-endian order. |
| 883 | */ |
| 884 | switch (log2_tag_bytes) { |
| 885 | case 0: /* zva_blocksize 32 */ |
| 886 | mem_tag = *(uint8_t *)mem; |
| 887 | ptr_tag *= 0x11u; |
| 888 | break; |
| 889 | case 1: /* zva_blocksize 64 */ |
| 890 | mem_tag = cpu_to_le16(*(uint16_t *)mem); |
| 891 | ptr_tag *= 0x1111u; |
| 892 | break; |
| 893 | case 2: /* zva_blocksize 128 */ |
| 894 | mem_tag = cpu_to_le32(*(uint32_t *)mem); |
| 895 | ptr_tag *= 0x11111111u; |
| 896 | break; |
| 897 | case 3: /* zva_blocksize 256 */ |
| 898 | mem_tag = cpu_to_le64(*(uint64_t *)mem); |
| 899 | ptr_tag *= 0x1111111111111111ull; |
| 900 | break; |
| 901 | |
| 902 | default: /* zva_blocksize 512, 1024, 2048 */ |
| 903 | ptr_tag *= 0x1111111111111111ull; |
| 904 | i = 0; |
| 905 | do { |
| 906 | mem_tag = cpu_to_le64(*(uint64_t *)(mem + i)); |
| 907 | if (unlikely(mem_tag != ptr_tag)) { |
| 908 | goto fail; |
| 909 | } |
| 910 | i += 8; |
| 911 | align_ptr += 16 * TAG_GRANULE; |
| 912 | } while (i < tag_bytes); |
| 913 | goto done; |
| 914 | } |
| 915 | |
| 916 | if (likely(mem_tag == ptr_tag)) { |
| 917 | goto done; |
| 918 | } |
| 919 | |
| 920 | fail: |
| 921 | /* Locate the first nibble that differs. */ |
| 922 | i = ctz64(mem_tag ^ ptr_tag) >> 4; |
Richard Henderson | dbf8c32 | 2020-08-28 10:02:44 +0100 | [diff] [blame] | 923 | mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra); |
Richard Henderson | 46dc1bc | 2020-06-25 20:31:25 -0700 | [diff] [blame] | 924 | |
| 925 | done: |
| 926 | return useronly_clean_ptr(ptr); |
| 927 | } |