blob: a8dd7974768d2d42e1350795905fc6d9cbf514aa [file] [log] [blame]
Michael Clark65c5b752018-03-03 01:31:11 +13001/*
2 * QEMU RISC-V PMP (Physical Memory Protection)
3 *
4 * Author: Daire McNamara, daire.mcnamara@emdalo.com
5 * Ivan Griffin, ivan.griffin@emdalo.com
6 *
7 * This provides a RISC-V Physical Memory Protection interface
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2 or later, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
Markus Armbrustera8b991b2019-03-15 15:51:21 +010022#ifndef RISCV_PMP_H
23#define RISCV_PMP_H
Michael Clark65c5b752018-03-03 01:31:11 +130024
Philippe Mathieu-Daudé3cb1a412022-02-07 12:44:46 +010025#include "cpu.h"
26
Michael Clark65c5b752018-03-03 01:31:11 +130027typedef enum {
28 PMP_READ = 1 << 0,
29 PMP_WRITE = 1 << 1,
30 PMP_EXEC = 1 << 2,
31 PMP_LOCK = 1 << 7
32} pmp_priv_t;
33
34typedef enum {
35 PMP_AMATCH_OFF, /* Null (off) */
36 PMP_AMATCH_TOR, /* Top of Range */
37 PMP_AMATCH_NA4, /* Naturally aligned four-byte region */
38 PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */
39} pmp_am_t;
40
Hou Weiying2582a952021-04-19 16:16:53 +100041typedef enum {
Weiwei Li77442382022-04-23 10:35:08 +080042 MSECCFG_MML = 1 << 0,
43 MSECCFG_MMWP = 1 << 1,
44 MSECCFG_RLB = 1 << 2,
45 MSECCFG_USEED = 1 << 8,
46 MSECCFG_SSEED = 1 << 9
Hou Weiying2582a952021-04-19 16:16:53 +100047} mseccfg_field_t;
48
Michael Clark65c5b752018-03-03 01:31:11 +130049typedef struct {
50 target_ulong addr_reg;
51 uint8_t cfg_reg;
52} pmp_entry_t;
53
54typedef struct {
55 target_ulong sa;
56 target_ulong ea;
57} pmp_addr_t;
58
59typedef struct {
60 pmp_entry_t pmp[MAX_RISCV_PMPS];
61 pmp_addr_t addr[MAX_RISCV_PMPS];
62 uint32_t num_rules;
63} pmp_table_t;
64
65void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
66 target_ulong val);
67target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index);
Hou Weiying2582a952021-04-19 16:16:53 +100068
69void mseccfg_csr_write(CPURISCVState *env, target_ulong val);
70target_ulong mseccfg_csr_read(CPURISCVState *env);
71
Michael Clark65c5b752018-03-03 01:31:11 +130072void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
73 target_ulong val);
74target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
75bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
Jim Shub2971292021-02-21 22:01:20 +080076 target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
77 target_ulong mode);
Zong Liaf3fc192020-07-28 16:26:17 +080078bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
79 target_ulong *tlb_size);
Yifei Jiang24beb032020-10-26 19:55:27 +080080void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
81void pmp_update_rule_nums(CPURISCVState *env);
Atish Patrad102f192020-12-23 11:25:53 -080082uint32_t pmp_get_num_rules(CPURISCVState *env);
Jim Shub2971292021-02-21 22:01:20 +080083int pmp_priv_to_page_prot(pmp_priv_t pmp_priv);
Michael Clark65c5b752018-03-03 01:31:11 +130084
Hou Weiying2582a952021-04-19 16:16:53 +100085#define MSECCFG_MML_ISSET(env) get_field(env->mseccfg, MSECCFG_MML)
86#define MSECCFG_MMWP_ISSET(env) get_field(env->mseccfg, MSECCFG_MMWP)
87#define MSECCFG_RLB_ISSET(env) get_field(env->mseccfg, MSECCFG_RLB)
88
Michael Clark65c5b752018-03-03 01:31:11 +130089#endif