| /* |
| * SPDX-License-Identifier: BSD-2-Clause |
| * |
| * Copyright (c) 2019 Western Digital Corporation or its affiliates. |
| * |
| * Authors: |
| * Anup Patel <anup.patel@wdc.com> |
| */ |
| |
| #include <sbi/riscv_asm.h> |
| #include <sbi/riscv_barrier.h> |
| #include <sbi/riscv_encoding.h> |
| #include <sbi/riscv_fp.h> |
| #include <sbi/sbi_bitops.h> |
| #include <sbi/sbi_console.h> |
| #include <sbi/sbi_domain.h> |
| #include <sbi/sbi_csr_detect.h> |
| #include <sbi/sbi_error.h> |
| #include <sbi/sbi_hart.h> |
| #include <sbi/sbi_math.h> |
| #include <sbi/sbi_platform.h> |
| #include <sbi/sbi_pmu.h> |
| #include <sbi/sbi_string.h> |
| #include <sbi/sbi_trap.h> |
| #include <sbi/sbi_hfence.h> |
| |
| extern void __sbi_expected_trap(void); |
| extern void __sbi_expected_trap_hext(void); |
| |
| void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap; |
| |
| static unsigned long hart_features_offset; |
| |
| static void mstatus_init(struct sbi_scratch *scratch) |
| { |
| int cidx; |
| unsigned long mstatus_val = 0; |
| unsigned int mhpm_mask = sbi_hart_mhpm_mask(scratch); |
| uint64_t mhpmevent_init_val = 0; |
| uint64_t menvcfg_val, mstateen_val; |
| |
| /* Enable FPU */ |
| if (misa_extension('D') || misa_extension('F')) |
| mstatus_val |= MSTATUS_FS; |
| |
| /* Enable Vector context */ |
| if (misa_extension('V')) |
| mstatus_val |= MSTATUS_VS; |
| |
| csr_write(CSR_MSTATUS, mstatus_val); |
| |
| /* Disable user mode usage of all perf counters except default ones (CY, TM, IR) */ |
| if (misa_extension('S') && |
| sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10) |
| csr_write(CSR_SCOUNTEREN, 7); |
| |
| /** |
| * OpenSBI doesn't use any PMU counters in M-mode. |
| * Supervisor mode usage for all counters are enabled by default |
| * But counters will not run until mcountinhibit is set. |
| */ |
| if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10) |
| csr_write(CSR_MCOUNTEREN, -1); |
| |
| /* All programmable counters will start running at runtime after S-mode request */ |
| if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11) |
| csr_write(CSR_MCOUNTINHIBIT, 0xFFFFFFF8); |
| |
| /** |
| * The mhpmeventn[h] CSR should be initialized with interrupt disabled |
| * and inhibited running in M-mode during init. |
| */ |
| mhpmevent_init_val |= (MHPMEVENT_OF | MHPMEVENT_MINH); |
| for (cidx = 0; cidx <= 28; cidx++) { |
| if (!(mhpm_mask & 1 << (cidx + 3))) |
| continue; |
| #if __riscv_xlen == 32 |
| csr_write_num(CSR_MHPMEVENT3 + cidx, |
| mhpmevent_init_val & 0xFFFFFFFF); |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF)) |
| csr_write_num(CSR_MHPMEVENT3H + cidx, |
| mhpmevent_init_val >> BITS_PER_LONG); |
| #else |
| csr_write_num(CSR_MHPMEVENT3 + cidx, mhpmevent_init_val); |
| #endif |
| } |
| |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMSTATEEN)) { |
| mstateen_val = csr_read(CSR_MSTATEEN0); |
| #if __riscv_xlen == 32 |
| mstateen_val |= ((uint64_t)csr_read(CSR_MSTATEEN0H)) << 32; |
| #endif |
| mstateen_val |= SMSTATEEN_STATEN; |
| mstateen_val |= SMSTATEEN0_CONTEXT; |
| mstateen_val |= SMSTATEEN0_HSENVCFG; |
| |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMAIA)) |
| mstateen_val |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC); |
| else |
| mstateen_val &= ~(SMSTATEEN0_AIA | SMSTATEEN0_IMSIC); |
| |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMAIA) || |
| sbi_hart_has_extension(scratch, SBI_HART_EXT_SMCSRIND)) |
| mstateen_val |= (SMSTATEEN0_SVSLCT); |
| else |
| mstateen_val &= ~(SMSTATEEN0_SVSLCT); |
| |
| csr_write(CSR_MSTATEEN0, mstateen_val); |
| #if __riscv_xlen == 32 |
| csr_write(CSR_MSTATEEN0H, mstateen_val >> 32); |
| #endif |
| } |
| |
| if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) { |
| menvcfg_val = csr_read(CSR_MENVCFG); |
| #if __riscv_xlen == 32 |
| menvcfg_val |= ((uint64_t)csr_read(CSR_MENVCFGH)) << 32; |
| #endif |
| |
| #define __set_menvcfg_ext(__ext, __bits) \ |
| if (sbi_hart_has_extension(scratch, __ext)) \ |
| menvcfg_val |= __bits; |
| |
| /* |
| * Enable access to extensions if they are present in the |
| * hardware or in the device tree. |
| */ |
| |
| __set_menvcfg_ext(SBI_HART_EXT_ZICBOZ, ENVCFG_CBZE) |
| __set_menvcfg_ext(SBI_HART_EXT_ZICBOM, ENVCFG_CBCFE) |
| __set_menvcfg_ext(SBI_HART_EXT_ZICBOM, |
| ENVCFG_CBIE_INV << ENVCFG_CBIE_SHIFT) |
| #if __riscv_xlen > 32 |
| __set_menvcfg_ext(SBI_HART_EXT_SVPBMT, ENVCFG_PBMTE) |
| #endif |
| __set_menvcfg_ext(SBI_HART_EXT_SSTC, ENVCFG_STCE) |
| __set_menvcfg_ext(SBI_HART_EXT_SMCDELEG, ENVCFG_CDE); |
| __set_menvcfg_ext(SBI_HART_EXT_SVADU, ENVCFG_ADUE); |
| |
| #undef __set_menvcfg_ext |
| |
| /* |
| * When both Svade and Svadu are present in DT, the default scheme for managing |
| * the PTE A/D bits should use Svade. Check Svadu before Svade extension to ensure |
| * that the ADUE bit is cleared when the Svade support are specified. |
| */ |
| |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SVADE)) |
| menvcfg_val &= ~ENVCFG_ADUE; |
| |
| csr_write(CSR_MENVCFG, menvcfg_val); |
| #if __riscv_xlen == 32 |
| csr_write(CSR_MENVCFGH, menvcfg_val >> 32); |
| #endif |
| |
| /* Enable S-mode access to seed CSR */ |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_ZKR)) { |
| csr_set(CSR_MSECCFG, MSECCFG_SSEED); |
| csr_clear(CSR_MSECCFG, MSECCFG_USEED); |
| } |
| } |
| |
| /* Disable all interrupts */ |
| csr_write(CSR_MIE, 0); |
| |
| /* Disable S-mode paging */ |
| if (misa_extension('S')) |
| csr_write(CSR_SATP, 0); |
| } |
| |
| static int fp_init(struct sbi_scratch *scratch) |
| { |
| #ifdef __riscv_flen |
| int i; |
| #endif |
| |
| if (!misa_extension('D') && !misa_extension('F')) |
| return 0; |
| |
| if (!(csr_read(CSR_MSTATUS) & MSTATUS_FS)) |
| return SBI_EINVAL; |
| |
| #ifdef __riscv_flen |
| for (i = 0; i < 32; i++) |
| init_fp_reg(i); |
| csr_write(CSR_FCSR, 0); |
| #endif |
| |
| return 0; |
| } |
| |
| static int delegate_traps(struct sbi_scratch *scratch) |
| { |
| const struct sbi_platform *plat = sbi_platform_ptr(scratch); |
| unsigned long interrupts, exceptions; |
| |
| if (!misa_extension('S')) |
| /* No delegation possible as mideleg does not exist */ |
| return 0; |
| |
| /* Send M-mode interrupts and most exceptions to S-mode */ |
| interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP; |
| interrupts |= sbi_pmu_irq_bit(); |
| |
| exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) | |
| (1U << CAUSE_USER_ECALL); |
| if (sbi_platform_has_mfaults_delegation(plat)) |
| exceptions |= (1U << CAUSE_FETCH_PAGE_FAULT) | |
| (1U << CAUSE_LOAD_PAGE_FAULT) | |
| (1U << CAUSE_STORE_PAGE_FAULT); |
| |
| /* |
| * If hypervisor extension available then we only handle hypervisor |
| * calls (i.e. ecalls from HS-mode) in M-mode. |
| * |
| * The HS-mode will additionally handle supervisor calls (i.e. ecalls |
| * from VS-mode), Guest page faults and Virtual interrupts. |
| */ |
| if (misa_extension('H')) { |
| exceptions |= (1U << CAUSE_VIRTUAL_SUPERVISOR_ECALL); |
| exceptions |= (1U << CAUSE_FETCH_GUEST_PAGE_FAULT); |
| exceptions |= (1U << CAUSE_LOAD_GUEST_PAGE_FAULT); |
| exceptions |= (1U << CAUSE_VIRTUAL_INST_FAULT); |
| exceptions |= (1U << CAUSE_STORE_GUEST_PAGE_FAULT); |
| } |
| |
| csr_write(CSR_MIDELEG, interrupts); |
| csr_write(CSR_MEDELEG, exceptions); |
| |
| return 0; |
| } |
| |
| void sbi_hart_delegation_dump(struct sbi_scratch *scratch, |
| const char *prefix, const char *suffix) |
| { |
| if (!misa_extension('S')) |
| /* No delegation possible as mideleg does not exist*/ |
| return; |
| |
| sbi_printf("%sMIDELEG%s: 0x%" PRILX "\n", |
| prefix, suffix, csr_read(CSR_MIDELEG)); |
| sbi_printf("%sMEDELEG%s: 0x%" PRILX "\n", |
| prefix, suffix, csr_read(CSR_MEDELEG)); |
| } |
| |
| unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| return hfeatures->mhpm_mask; |
| } |
| |
| unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| return hfeatures->pmp_count; |
| } |
| |
| unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| return hfeatures->pmp_log2gran; |
| } |
| |
| unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| return hfeatures->pmp_addr_bits; |
| } |
| |
| unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| return hfeatures->mhpm_bits; |
| } |
| |
| /* |
| * Returns Smepmp flags for a given domain and region based on permissions. |
| */ |
| static unsigned int sbi_hart_get_smepmp_flags(struct sbi_scratch *scratch, |
| struct sbi_domain *dom, |
| struct sbi_domain_memregion *reg) |
| { |
| unsigned int pmp_flags = 0; |
| |
| if (SBI_DOMAIN_MEMREGION_IS_SHARED(reg->flags)) { |
| /* Read only for both M and SU modes */ |
| if (SBI_DOMAIN_MEMREGION_IS_SUR_MR(reg->flags)) |
| pmp_flags = (PMP_L | PMP_R | PMP_W | PMP_X); |
| |
| /* Execute for SU but Read/Execute for M mode */ |
| else if (SBI_DOMAIN_MEMREGION_IS_SUX_MRX(reg->flags)) |
| /* locked region */ |
| pmp_flags = (PMP_L | PMP_W | PMP_X); |
| |
| /* Execute only for both M and SU modes */ |
| else if (SBI_DOMAIN_MEMREGION_IS_SUX_MX(reg->flags)) |
| pmp_flags = (PMP_L | PMP_W); |
| |
| /* Read/Write for both M and SU modes */ |
| else if (SBI_DOMAIN_MEMREGION_IS_SURW_MRW(reg->flags)) |
| pmp_flags = (PMP_W | PMP_X); |
| |
| /* Read only for SU mode but Read/Write for M mode */ |
| else if (SBI_DOMAIN_MEMREGION_IS_SUR_MRW(reg->flags)) |
| pmp_flags = (PMP_W); |
| } else if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) { |
| /* |
| * When smepmp is supported and used, M region cannot have RWX |
| * permissions on any region. |
| */ |
| if ((reg->flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK) |
| == SBI_DOMAIN_MEMREGION_M_RWX) { |
| sbi_printf("%s: M-mode only regions cannot have" |
| "RWX permissions\n", __func__); |
| return 0; |
| } |
| |
| /* M-mode only access regions are always locked */ |
| pmp_flags |= PMP_L; |
| |
| if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE) |
| pmp_flags |= PMP_R; |
| if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE) |
| pmp_flags |= PMP_W; |
| if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE) |
| pmp_flags |= PMP_X; |
| } else if (SBI_DOMAIN_MEMREGION_SU_ONLY_ACCESS(reg->flags)) { |
| if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE) |
| pmp_flags |= PMP_R; |
| if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE) |
| pmp_flags |= PMP_W; |
| if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE) |
| pmp_flags |= PMP_X; |
| } |
| |
| return pmp_flags; |
| } |
| |
| static void sbi_hart_smepmp_set(struct sbi_scratch *scratch, |
| struct sbi_domain *dom, |
| struct sbi_domain_memregion *reg, |
| unsigned int pmp_idx, |
| unsigned int pmp_flags, |
| unsigned int pmp_log2gran, |
| unsigned long pmp_addr_max) |
| { |
| unsigned long pmp_addr = reg->base >> PMP_SHIFT; |
| |
| if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) { |
| pmp_set(pmp_idx, pmp_flags, reg->base, reg->order); |
| } else { |
| sbi_printf("Can not configure pmp for domain %s because" |
| " memory region address 0x%lx or size 0x%lx " |
| "is not in range.\n", dom->name, reg->base, |
| reg->order); |
| } |
| } |
| |
| static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch, |
| unsigned int pmp_count, |
| unsigned int pmp_log2gran, |
| unsigned long pmp_addr_max) |
| { |
| struct sbi_domain_memregion *reg; |
| struct sbi_domain *dom = sbi_domain_thishart_ptr(); |
| unsigned int pmp_idx, pmp_flags; |
| |
| /* |
| * Set the RLB so that, we can write to PMP entries without |
| * enforcement even if some entries are locked. |
| */ |
| csr_set(CSR_MSECCFG, MSECCFG_RLB); |
| |
| /* Disable the reserved entry */ |
| pmp_disable(SBI_SMEPMP_RESV_ENTRY); |
| |
| /* Program M-only regions when MML is not set. */ |
| pmp_idx = 0; |
| sbi_domain_for_each_memregion(dom, reg) { |
| /* Skip reserved entry */ |
| if (pmp_idx == SBI_SMEPMP_RESV_ENTRY) |
| pmp_idx++; |
| if (pmp_count <= pmp_idx) |
| break; |
| |
| /* Skip shared and SU-only regions */ |
| if (!SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) { |
| pmp_idx++; |
| continue; |
| } |
| |
| pmp_flags = sbi_hart_get_smepmp_flags(scratch, dom, reg); |
| if (!pmp_flags) |
| return 0; |
| |
| sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags, |
| pmp_log2gran, pmp_addr_max); |
| } |
| |
| /* Set the MML to enforce new encoding */ |
| csr_set(CSR_MSECCFG, MSECCFG_MML); |
| |
| /* Program shared and SU-only regions */ |
| pmp_idx = 0; |
| sbi_domain_for_each_memregion(dom, reg) { |
| /* Skip reserved entry */ |
| if (pmp_idx == SBI_SMEPMP_RESV_ENTRY) |
| pmp_idx++; |
| if (pmp_count <= pmp_idx) |
| break; |
| |
| /* Skip M-only regions */ |
| if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) { |
| pmp_idx++; |
| continue; |
| } |
| |
| pmp_flags = sbi_hart_get_smepmp_flags(scratch, dom, reg); |
| if (!pmp_flags) |
| return 0; |
| |
| sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags, |
| pmp_log2gran, pmp_addr_max); |
| } |
| |
| /* |
| * All entries are programmed. |
| * Keep the RLB bit so that dynamic mappings can be done. |
| */ |
| |
| return 0; |
| } |
| |
| static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch, |
| unsigned int pmp_count, |
| unsigned int pmp_log2gran, |
| unsigned long pmp_addr_max) |
| { |
| struct sbi_domain_memregion *reg; |
| struct sbi_domain *dom = sbi_domain_thishart_ptr(); |
| unsigned int pmp_idx = 0; |
| unsigned int pmp_flags; |
| unsigned long pmp_addr; |
| |
| sbi_domain_for_each_memregion(dom, reg) { |
| if (pmp_count <= pmp_idx) |
| break; |
| |
| pmp_flags = 0; |
| |
| /* |
| * If permissions are to be enforced for all modes on |
| * this region, the lock bit should be set. |
| */ |
| if (reg->flags & SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS) |
| pmp_flags |= PMP_L; |
| |
| if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE) |
| pmp_flags |= PMP_R; |
| if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE) |
| pmp_flags |= PMP_W; |
| if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE) |
| pmp_flags |= PMP_X; |
| |
| pmp_addr = reg->base >> PMP_SHIFT; |
| if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) { |
| pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order); |
| } else { |
| sbi_printf("Can not configure pmp for domain %s because" |
| " memory region address 0x%lx or size 0x%lx " |
| "is not in range.\n", dom->name, reg->base, |
| reg->order); |
| } |
| } |
| |
| return 0; |
| } |
| |
| int sbi_hart_map_saddr(unsigned long addr, unsigned long size) |
| { |
| /* shared R/W access for M and S/U mode */ |
| unsigned int pmp_flags = (PMP_W | PMP_X); |
| unsigned long order, base = 0; |
| struct sbi_scratch *scratch = sbi_scratch_thishart_ptr(); |
| |
| /* If Smepmp is not supported no special mapping is required */ |
| if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) |
| return SBI_OK; |
| |
| if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY)) |
| return SBI_ENOSPC; |
| |
| for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size)); |
| order <= __riscv_xlen; order++) { |
| if (order < __riscv_xlen) { |
| base = addr & ~((1UL << order) - 1UL); |
| if ((base <= addr) && |
| (addr < (base + (1UL << order))) && |
| (base <= (addr + size - 1UL)) && |
| ((addr + size - 1UL) < (base + (1UL << order)))) |
| break; |
| } else { |
| return SBI_EFAIL; |
| } |
| } |
| |
| pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order); |
| |
| return SBI_OK; |
| } |
| |
| int sbi_hart_unmap_saddr(void) |
| { |
| struct sbi_scratch *scratch = sbi_scratch_thishart_ptr(); |
| |
| if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) |
| return SBI_OK; |
| |
| return pmp_disable(SBI_SMEPMP_RESV_ENTRY); |
| } |
| |
| int sbi_hart_pmp_configure(struct sbi_scratch *scratch) |
| { |
| int rc; |
| unsigned int pmp_bits, pmp_log2gran; |
| unsigned int pmp_count = sbi_hart_pmp_count(scratch); |
| unsigned long pmp_addr_max; |
| |
| if (!pmp_count) |
| return 0; |
| |
| pmp_log2gran = sbi_hart_pmp_log2gran(scratch); |
| pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1; |
| pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1); |
| |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) |
| rc = sbi_hart_smepmp_configure(scratch, pmp_count, |
| pmp_log2gran, pmp_addr_max); |
| else |
| rc = sbi_hart_oldpmp_configure(scratch, pmp_count, |
| pmp_log2gran, pmp_addr_max); |
| |
| /* |
| * As per section 3.7.2 of privileged specification v1.12, |
| * virtual address translations can be speculatively performed |
| * (even before actual access). These, along with PMP traslations, |
| * can be cached. This can pose a problem with CPU hotplug |
| * and non-retentive suspend scenario because PMP states are |
| * not preserved. |
| * It is advisable to flush the caching structures under such |
| * conditions. |
| */ |
| if (misa_extension('S')) { |
| __asm__ __volatile__("sfence.vma"); |
| |
| /* |
| * If hypervisor mode is supported, flush caching |
| * structures in guest mode too. |
| */ |
| if (misa_extension('H')) |
| __sbi_hfence_gvma_all(); |
| } |
| |
| return rc; |
| } |
| |
| int sbi_hart_priv_version(struct sbi_scratch *scratch) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| return hfeatures->priv_version; |
| } |
| |
| void sbi_hart_get_priv_version_str(struct sbi_scratch *scratch, |
| char *version_str, int nvstr) |
| { |
| char *temp; |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| switch (hfeatures->priv_version) { |
| case SBI_HART_PRIV_VER_1_10: |
| temp = "v1.10"; |
| break; |
| case SBI_HART_PRIV_VER_1_11: |
| temp = "v1.11"; |
| break; |
| case SBI_HART_PRIV_VER_1_12: |
| temp = "v1.12"; |
| break; |
| default: |
| temp = "unknown"; |
| break; |
| } |
| |
| sbi_snprintf(version_str, nvstr, "%s", temp); |
| } |
| |
| static inline void __sbi_hart_update_extension( |
| struct sbi_hart_features *hfeatures, |
| enum sbi_hart_extensions ext, |
| bool enable) |
| { |
| if (enable) |
| __set_bit(ext, hfeatures->extensions); |
| else |
| __clear_bit(ext, hfeatures->extensions); |
| } |
| |
| /** |
| * Enable/Disable a particular hart extension |
| * |
| * @param scratch pointer to the HART scratch space |
| * @param ext the extension number to check |
| * @param enable new state of hart extension |
| */ |
| void sbi_hart_update_extension(struct sbi_scratch *scratch, |
| enum sbi_hart_extensions ext, |
| bool enable) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| __sbi_hart_update_extension(hfeatures, ext, enable); |
| } |
| |
| /** |
| * Check whether a particular hart extension is available |
| * |
| * @param scratch pointer to the HART scratch space |
| * @param ext the extension number to check |
| * @returns true (available) or false (not available) |
| */ |
| bool sbi_hart_has_extension(struct sbi_scratch *scratch, |
| enum sbi_hart_extensions ext) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| |
| if (__test_bit(ext, hfeatures->extensions)) |
| return true; |
| else |
| return false; |
| } |
| |
| #define __SBI_HART_EXT_DATA(_name, _id) { \ |
| .name = #_name, \ |
| .id = _id, \ |
| } |
| |
| const struct sbi_hart_ext_data sbi_hart_ext[] = { |
| __SBI_HART_EXT_DATA(smaia, SBI_HART_EXT_SMAIA), |
| __SBI_HART_EXT_DATA(smepmp, SBI_HART_EXT_SMEPMP), |
| __SBI_HART_EXT_DATA(smstateen, SBI_HART_EXT_SMSTATEEN), |
| __SBI_HART_EXT_DATA(sscofpmf, SBI_HART_EXT_SSCOFPMF), |
| __SBI_HART_EXT_DATA(sstc, SBI_HART_EXT_SSTC), |
| __SBI_HART_EXT_DATA(zicntr, SBI_HART_EXT_ZICNTR), |
| __SBI_HART_EXT_DATA(zihpm, SBI_HART_EXT_ZIHPM), |
| __SBI_HART_EXT_DATA(zkr, SBI_HART_EXT_ZKR), |
| __SBI_HART_EXT_DATA(smcntrpmf, SBI_HART_EXT_SMCNTRPMF), |
| __SBI_HART_EXT_DATA(xandespmu, SBI_HART_EXT_XANDESPMU), |
| __SBI_HART_EXT_DATA(zicboz, SBI_HART_EXT_ZICBOZ), |
| __SBI_HART_EXT_DATA(zicbom, SBI_HART_EXT_ZICBOM), |
| __SBI_HART_EXT_DATA(svpbmt, SBI_HART_EXT_SVPBMT), |
| __SBI_HART_EXT_DATA(sdtrig, SBI_HART_EXT_SDTRIG), |
| __SBI_HART_EXT_DATA(smcsrind, SBI_HART_EXT_SMCSRIND), |
| __SBI_HART_EXT_DATA(smcdeleg, SBI_HART_EXT_SMCDELEG), |
| __SBI_HART_EXT_DATA(sscsrind, SBI_HART_EXT_SSCSRIND), |
| __SBI_HART_EXT_DATA(ssccfg, SBI_HART_EXT_SSCCFG), |
| __SBI_HART_EXT_DATA(svade, SBI_HART_EXT_SVADE), |
| __SBI_HART_EXT_DATA(svadu, SBI_HART_EXT_SVADU), |
| }; |
| |
| _Static_assert(SBI_HART_EXT_MAX == array_size(sbi_hart_ext), |
| "sbi_hart_ext[]: wrong number of entries"); |
| |
| /** |
| * Get the hart extensions in string format |
| * |
| * @param scratch pointer to the HART scratch space |
| * @param extensions_str pointer to a char array where the extensions string |
| * will be updated |
| * @param nestr length of the features_str. The feature string will be |
| * truncated if nestr is not long enough. |
| */ |
| void sbi_hart_get_extensions_str(struct sbi_scratch *scratch, |
| char *extensions_str, int nestr) |
| { |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| int offset = 0, ext = 0; |
| |
| if (!extensions_str || nestr <= 0) |
| return; |
| sbi_memset(extensions_str, 0, nestr); |
| |
| for_each_set_bit(ext, hfeatures->extensions, SBI_HART_EXT_MAX) { |
| sbi_snprintf(extensions_str + offset, |
| nestr - offset, |
| "%s,", sbi_hart_ext[ext].name); |
| offset = offset + sbi_strlen(sbi_hart_ext[ext].name) + 1; |
| } |
| |
| if (offset) |
| extensions_str[offset - 1] = '\0'; |
| else |
| sbi_strncpy(extensions_str, "none", nestr); |
| } |
| |
| static unsigned long hart_pmp_get_allowed_addr(void) |
| { |
| unsigned long val = 0; |
| struct sbi_trap_info trap = {0}; |
| |
| csr_write_allowed(CSR_PMPCFG0, (ulong)&trap, 0); |
| if (trap.cause) |
| return 0; |
| |
| csr_write_allowed(CSR_PMPADDR0, (ulong)&trap, PMP_ADDR_MASK); |
| if (!trap.cause) { |
| val = csr_read_allowed(CSR_PMPADDR0, (ulong)&trap); |
| if (trap.cause) |
| val = 0; |
| } |
| |
| return val; |
| } |
| |
| static int hart_mhpm_get_allowed_bits(void) |
| { |
| unsigned long val = ~(0UL); |
| struct sbi_trap_info trap = {0}; |
| int num_bits = 0; |
| |
| /** |
| * It is assumed that platforms will implement same number of bits for |
| * all the performance counters including mcycle/minstret. |
| */ |
| csr_write_allowed(CSR_MHPMCOUNTER3, (ulong)&trap, val); |
| if (!trap.cause) { |
| val = csr_read_allowed(CSR_MHPMCOUNTER3, (ulong)&trap); |
| if (trap.cause) |
| return 0; |
| } |
| num_bits = sbi_fls(val) + 1; |
| #if __riscv_xlen == 32 |
| csr_write_allowed(CSR_MHPMCOUNTER3H, (ulong)&trap, val); |
| if (!trap.cause) { |
| val = csr_read_allowed(CSR_MHPMCOUNTER3H, (ulong)&trap); |
| if (trap.cause) |
| return num_bits; |
| } |
| num_bits += sbi_fls(val) + 1; |
| |
| #endif |
| |
| return num_bits; |
| } |
| |
| static int hart_detect_features(struct sbi_scratch *scratch) |
| { |
| struct sbi_trap_info trap = {0}; |
| struct sbi_hart_features *hfeatures = |
| sbi_scratch_offset_ptr(scratch, hart_features_offset); |
| unsigned long val, oldval; |
| bool has_zicntr = false; |
| int rc; |
| |
| /* If hart features already detected then do nothing */ |
| if (hfeatures->detected) |
| return 0; |
| |
| /* Clear hart features */ |
| sbi_memset(hfeatures->extensions, 0, sizeof(hfeatures->extensions)); |
| hfeatures->pmp_count = 0; |
| hfeatures->mhpm_mask = 0; |
| hfeatures->priv_version = SBI_HART_PRIV_VER_UNKNOWN; |
| |
| #define __check_hpm_csr(__csr, __mask) \ |
| oldval = csr_read_allowed(__csr, (ulong)&trap); \ |
| if (!trap.cause) { \ |
| csr_write_allowed(__csr, (ulong)&trap, 1UL); \ |
| if (!trap.cause && csr_swap(__csr, oldval) == 1UL) { \ |
| (hfeatures->__mask) |= 1 << (__csr - CSR_MCYCLE); \ |
| } \ |
| } |
| |
| #define __check_hpm_csr_2(__csr, __mask) \ |
| __check_hpm_csr(__csr + 0, __mask) \ |
| __check_hpm_csr(__csr + 1, __mask) |
| #define __check_hpm_csr_4(__csr, __mask) \ |
| __check_hpm_csr_2(__csr + 0, __mask) \ |
| __check_hpm_csr_2(__csr + 2, __mask) |
| #define __check_hpm_csr_8(__csr, __mask) \ |
| __check_hpm_csr_4(__csr + 0, __mask) \ |
| __check_hpm_csr_4(__csr + 4, __mask) |
| #define __check_hpm_csr_16(__csr, __mask) \ |
| __check_hpm_csr_8(__csr + 0, __mask) \ |
| __check_hpm_csr_8(__csr + 8, __mask) |
| |
| #define __check_csr(__csr, __rdonly, __wrval, __field, __skip) \ |
| oldval = csr_read_allowed(__csr, (ulong)&trap); \ |
| if (!trap.cause) { \ |
| if (__rdonly) { \ |
| (hfeatures->__field)++; \ |
| } else { \ |
| csr_write_allowed(__csr, (ulong)&trap, __wrval);\ |
| if (!trap.cause) { \ |
| if (csr_swap(__csr, oldval) == __wrval) \ |
| (hfeatures->__field)++; \ |
| else \ |
| goto __skip; \ |
| } else { \ |
| goto __skip; \ |
| } \ |
| } \ |
| } else { \ |
| goto __skip; \ |
| } |
| #define __check_csr_2(__csr, __rdonly, __wrval, __field, __skip) \ |
| __check_csr(__csr + 0, __rdonly, __wrval, __field, __skip) \ |
| __check_csr(__csr + 1, __rdonly, __wrval, __field, __skip) |
| #define __check_csr_4(__csr, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_2(__csr + 0, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_2(__csr + 2, __rdonly, __wrval, __field, __skip) |
| #define __check_csr_8(__csr, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_4(__csr + 0, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_4(__csr + 4, __rdonly, __wrval, __field, __skip) |
| #define __check_csr_16(__csr, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_8(__csr + 0, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_8(__csr + 8, __rdonly, __wrval, __field, __skip) |
| #define __check_csr_32(__csr, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_16(__csr + 0, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_16(__csr + 16, __rdonly, __wrval, __field, __skip) |
| #define __check_csr_64(__csr, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_32(__csr + 0, __rdonly, __wrval, __field, __skip) \ |
| __check_csr_32(__csr + 32, __rdonly, __wrval, __field, __skip) |
| |
| /** |
| * Detect the allowed address bits & granularity. At least PMPADDR0 |
| * should be implemented. |
| */ |
| val = hart_pmp_get_allowed_addr(); |
| if (val) { |
| hfeatures->pmp_log2gran = sbi_ffs(val) + 2; |
| hfeatures->pmp_addr_bits = sbi_fls(val) + 1; |
| /* Detect number of PMP regions. At least PMPADDR0 should be implemented*/ |
| __check_csr_64(CSR_PMPADDR0, 0, val, pmp_count, __pmp_skip); |
| } |
| __pmp_skip: |
| /* Detect number of MHPM counters */ |
| __check_hpm_csr(CSR_MHPMCOUNTER3, mhpm_mask); |
| hfeatures->mhpm_bits = hart_mhpm_get_allowed_bits(); |
| __check_hpm_csr_4(CSR_MHPMCOUNTER4, mhpm_mask); |
| __check_hpm_csr_8(CSR_MHPMCOUNTER8, mhpm_mask); |
| __check_hpm_csr_16(CSR_MHPMCOUNTER16, mhpm_mask); |
| |
| /** |
| * No need to check for MHPMCOUNTERH for RV32 as they are expected to be |
| * implemented if MHPMCOUNTER is implemented. |
| */ |
| #undef __check_csr_64 |
| #undef __check_csr_32 |
| #undef __check_csr_16 |
| #undef __check_csr_8 |
| #undef __check_csr_4 |
| #undef __check_csr_2 |
| #undef __check_csr |
| |
| |
| #define __check_priv(__csr, __base_priv, __priv) \ |
| val = csr_read_allowed(__csr, (ulong)&trap); \ |
| if (!trap.cause && (hfeatures->priv_version >= __base_priv)) { \ |
| hfeatures->priv_version = __priv; \ |
| } |
| |
| /* Detect if hart supports Priv v1.10 */ |
| __check_priv(CSR_MCOUNTEREN, |
| SBI_HART_PRIV_VER_UNKNOWN, SBI_HART_PRIV_VER_1_10); |
| /* Detect if hart supports Priv v1.11 */ |
| __check_priv(CSR_MCOUNTINHIBIT, |
| SBI_HART_PRIV_VER_1_10, SBI_HART_PRIV_VER_1_11); |
| /* Detect if hart supports Priv v1.12 */ |
| __check_priv(CSR_MENVCFG, |
| SBI_HART_PRIV_VER_1_11, SBI_HART_PRIV_VER_1_12); |
| |
| #undef __check_priv_csr |
| |
| #define __check_ext_csr(__base_priv, __csr, __ext) \ |
| if (hfeatures->priv_version >= __base_priv) { \ |
| csr_read_allowed(__csr, (ulong)&trap); \ |
| if (!trap.cause) \ |
| __sbi_hart_update_extension(hfeatures, \ |
| __ext, true); \ |
| } |
| |
| /* Counter overflow/filtering is not useful without mcounter/inhibit */ |
| /* Detect if hart supports sscofpmf */ |
| __check_ext_csr(SBI_HART_PRIV_VER_1_11, |
| CSR_SCOUNTOVF, SBI_HART_EXT_SSCOFPMF); |
| /* Detect if hart supports time CSR */ |
| __check_ext_csr(SBI_HART_PRIV_VER_UNKNOWN, |
| CSR_TIME, SBI_HART_EXT_ZICNTR); |
| /* Detect if hart has AIA local interrupt CSRs */ |
| __check_ext_csr(SBI_HART_PRIV_VER_UNKNOWN, |
| CSR_MTOPI, SBI_HART_EXT_SMAIA); |
| /* Detect if hart supports stimecmp CSR(Sstc extension) */ |
| __check_ext_csr(SBI_HART_PRIV_VER_1_12, |
| CSR_STIMECMP, SBI_HART_EXT_SSTC); |
| /* Detect if hart supports mstateen CSRs */ |
| __check_ext_csr(SBI_HART_PRIV_VER_1_12, |
| CSR_MSTATEEN0, SBI_HART_EXT_SMSTATEEN); |
| /* Detect if hart supports smcntrpmf */ |
| __check_ext_csr(SBI_HART_PRIV_VER_1_12, |
| CSR_MCYCLECFG, SBI_HART_EXT_SMCNTRPMF); |
| /* Detect if hart support sdtrig (debug triggers) */ |
| __check_ext_csr(SBI_HART_PRIV_VER_UNKNOWN, |
| CSR_TSELECT, SBI_HART_EXT_SDTRIG); |
| |
| #undef __check_ext_csr |
| |
| /* Save trap based detection of Zicntr */ |
| has_zicntr = sbi_hart_has_extension(scratch, SBI_HART_EXT_ZICNTR); |
| |
| /* Let platform populate extensions */ |
| rc = sbi_platform_extensions_init(sbi_platform_thishart_ptr(), |
| hfeatures); |
| if (rc) |
| return rc; |
| |
| /* Zicntr should only be detected using traps */ |
| __sbi_hart_update_extension(hfeatures, SBI_HART_EXT_ZICNTR, |
| has_zicntr); |
| |
| /* Extensions implied by other extensions and features */ |
| if (hfeatures->mhpm_mask) |
| __sbi_hart_update_extension(hfeatures, |
| SBI_HART_EXT_ZIHPM, true); |
| |
| /* Mark hart feature detection done */ |
| hfeatures->detected = true; |
| |
| /* |
| * On platforms with Smepmp, the previous booting stage must |
| * enter OpenSBI with mseccfg.MML == 0. This allows OpenSBI |
| * to configure it's own M-mode only regions without depending |
| * on the previous booting stage. |
| */ |
| if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP) && |
| (csr_read(CSR_MSECCFG) & MSECCFG_MML)) |
| return SBI_EILL; |
| |
| return 0; |
| } |
| |
| int sbi_hart_reinit(struct sbi_scratch *scratch) |
| { |
| int rc; |
| |
| mstatus_init(scratch); |
| |
| rc = fp_init(scratch); |
| if (rc) |
| return rc; |
| |
| rc = delegate_traps(scratch); |
| if (rc) |
| return rc; |
| |
| return 0; |
| } |
| |
| int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot) |
| { |
| int rc; |
| |
| /* |
| * Clear mip CSR before proceeding with init to avoid any spurious |
| * external interrupts in S-mode. |
| */ |
| csr_write(CSR_MIP, 0); |
| |
| if (cold_boot) { |
| if (misa_extension('H')) |
| sbi_hart_expected_trap = &__sbi_expected_trap_hext; |
| |
| hart_features_offset = sbi_scratch_alloc_offset( |
| sizeof(struct sbi_hart_features)); |
| if (!hart_features_offset) |
| return SBI_ENOMEM; |
| } |
| |
| rc = hart_detect_features(scratch); |
| if (rc) |
| return rc; |
| |
| return sbi_hart_reinit(scratch); |
| } |
| |
| void __attribute__((noreturn)) sbi_hart_hang(void) |
| { |
| while (1) |
| wfi(); |
| __builtin_unreachable(); |
| } |
| |
| void __attribute__((noreturn)) |
| sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1, |
| unsigned long next_addr, unsigned long next_mode, |
| bool next_virt) |
| { |
| #if __riscv_xlen == 32 |
| unsigned long val, valH; |
| #else |
| unsigned long val; |
| #endif |
| |
| switch (next_mode) { |
| case PRV_M: |
| break; |
| case PRV_S: |
| if (!misa_extension('S')) |
| sbi_hart_hang(); |
| break; |
| case PRV_U: |
| if (!misa_extension('U')) |
| sbi_hart_hang(); |
| break; |
| default: |
| sbi_hart_hang(); |
| } |
| |
| val = csr_read(CSR_MSTATUS); |
| val = INSERT_FIELD(val, MSTATUS_MPP, next_mode); |
| val = INSERT_FIELD(val, MSTATUS_MPIE, 0); |
| #if __riscv_xlen == 32 |
| if (misa_extension('H')) { |
| valH = csr_read(CSR_MSTATUSH); |
| valH = INSERT_FIELD(valH, MSTATUSH_MPV, next_virt); |
| csr_write(CSR_MSTATUSH, valH); |
| } |
| #else |
| if (misa_extension('H')) |
| val = INSERT_FIELD(val, MSTATUS_MPV, next_virt); |
| #endif |
| csr_write(CSR_MSTATUS, val); |
| csr_write(CSR_MEPC, next_addr); |
| |
| if (next_mode == PRV_S) { |
| if (next_virt) { |
| csr_write(CSR_VSTVEC, next_addr); |
| csr_write(CSR_VSSCRATCH, 0); |
| csr_write(CSR_VSIE, 0); |
| csr_write(CSR_VSATP, 0); |
| } else { |
| csr_write(CSR_STVEC, next_addr); |
| csr_write(CSR_SSCRATCH, 0); |
| csr_write(CSR_SIE, 0); |
| csr_write(CSR_SATP, 0); |
| } |
| } else if (next_mode == PRV_U) { |
| if (misa_extension('N')) { |
| csr_write(CSR_UTVEC, next_addr); |
| csr_write(CSR_USCRATCH, 0); |
| csr_write(CSR_UIE, 0); |
| } |
| } |
| |
| register unsigned long a0 asm("a0") = arg0; |
| register unsigned long a1 asm("a1") = arg1; |
| __asm__ __volatile__("mret" : : "r"(a0), "r"(a1)); |
| __builtin_unreachable(); |
| } |