Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 1 | /* |
| 2 | * ARM GICv3 support - internal interfaces |
| 3 | * |
| 4 | * Copyright (c) 2012 Linaro Limited |
| 5 | * Copyright (c) 2015 Huawei. |
| 6 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
| 7 | * Written by Peter Maydell |
| 8 | * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License as published by |
| 12 | * the Free Software Foundation, either version 2 of the License, or |
| 13 | * (at your option) any later version. |
| 14 | * |
| 15 | * This program is distributed in the hope that it will be useful, |
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | * GNU General Public License for more details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License along |
| 21 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
| 22 | */ |
| 23 | |
| 24 | #ifndef QEMU_ARM_GICV3_INTERNAL_H |
| 25 | #define QEMU_ARM_GICV3_INTERNAL_H |
| 26 | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 27 | #include "hw/registerfields.h" |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 28 | #include "hw/intc/arm_gicv3_common.h" |
| 29 | |
| 30 | /* Distributor registers, as offsets from the distributor base address */ |
| 31 | #define GICD_CTLR 0x0000 |
| 32 | #define GICD_TYPER 0x0004 |
| 33 | #define GICD_IIDR 0x0008 |
| 34 | #define GICD_STATUSR 0x0010 |
| 35 | #define GICD_SETSPI_NSR 0x0040 |
| 36 | #define GICD_CLRSPI_NSR 0x0048 |
| 37 | #define GICD_SETSPI_SR 0x0050 |
| 38 | #define GICD_CLRSPI_SR 0x0058 |
| 39 | #define GICD_SEIR 0x0068 |
| 40 | #define GICD_IGROUPR 0x0080 |
| 41 | #define GICD_ISENABLER 0x0100 |
| 42 | #define GICD_ICENABLER 0x0180 |
| 43 | #define GICD_ISPENDR 0x0200 |
| 44 | #define GICD_ICPENDR 0x0280 |
| 45 | #define GICD_ISACTIVER 0x0300 |
| 46 | #define GICD_ICACTIVER 0x0380 |
| 47 | #define GICD_IPRIORITYR 0x0400 |
| 48 | #define GICD_ITARGETSR 0x0800 |
| 49 | #define GICD_ICFGR 0x0C00 |
| 50 | #define GICD_IGRPMODR 0x0D00 |
| 51 | #define GICD_NSACR 0x0E00 |
| 52 | #define GICD_SGIR 0x0F00 |
| 53 | #define GICD_CPENDSGIR 0x0F10 |
| 54 | #define GICD_SPENDSGIR 0x0F20 |
| 55 | #define GICD_IROUTER 0x6000 |
| 56 | #define GICD_IDREGS 0xFFD0 |
| 57 | |
| 58 | /* GICD_CTLR fields */ |
| 59 | #define GICD_CTLR_EN_GRP0 (1U << 0) |
| 60 | #define GICD_CTLR_EN_GRP1NS (1U << 1) /* GICv3 5.3.20 */ |
| 61 | #define GICD_CTLR_EN_GRP1S (1U << 2) |
| 62 | #define GICD_CTLR_EN_GRP1_ALL (GICD_CTLR_EN_GRP1NS | GICD_CTLR_EN_GRP1S) |
| 63 | /* Bit 4 is ARE if the system doesn't support TrustZone, ARE_S otherwise */ |
| 64 | #define GICD_CTLR_ARE (1U << 4) |
| 65 | #define GICD_CTLR_ARE_S (1U << 4) |
| 66 | #define GICD_CTLR_ARE_NS (1U << 5) |
| 67 | #define GICD_CTLR_DS (1U << 6) |
| 68 | #define GICD_CTLR_E1NWF (1U << 7) |
| 69 | #define GICD_CTLR_RWP (1U << 31) |
| 70 | |
Shashi Mallela | ac30dec | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 71 | #define GICD_TYPER_LPIS_SHIFT 17 |
| 72 | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 73 | /* 16 bits EventId */ |
| 74 | #define GICD_TYPER_IDBITS 0xf |
| 75 | |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 76 | /* |
| 77 | * Redistributor frame offsets from RD_base |
| 78 | */ |
| 79 | #define GICR_SGI_OFFSET 0x10000 |
Peter Maydell | 641be69 | 2022-04-08 15:15:32 +0100 | [diff] [blame] | 80 | #define GICR_VLPI_OFFSET 0x20000 |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 81 | |
| 82 | /* |
| 83 | * Redistributor registers, offsets from RD_base |
| 84 | */ |
| 85 | #define GICR_CTLR 0x0000 |
| 86 | #define GICR_IIDR 0x0004 |
| 87 | #define GICR_TYPER 0x0008 |
| 88 | #define GICR_STATUSR 0x0010 |
| 89 | #define GICR_WAKER 0x0014 |
| 90 | #define GICR_SETLPIR 0x0040 |
| 91 | #define GICR_CLRLPIR 0x0048 |
| 92 | #define GICR_PROPBASER 0x0070 |
| 93 | #define GICR_PENDBASER 0x0078 |
| 94 | #define GICR_INVLPIR 0x00A0 |
| 95 | #define GICR_INVALLR 0x00B0 |
| 96 | #define GICR_SYNCR 0x00C0 |
| 97 | #define GICR_IDREGS 0xFFD0 |
| 98 | |
| 99 | /* SGI and PPI Redistributor registers, offsets from RD_base */ |
| 100 | #define GICR_IGROUPR0 (GICR_SGI_OFFSET + 0x0080) |
| 101 | #define GICR_ISENABLER0 (GICR_SGI_OFFSET + 0x0100) |
| 102 | #define GICR_ICENABLER0 (GICR_SGI_OFFSET + 0x0180) |
| 103 | #define GICR_ISPENDR0 (GICR_SGI_OFFSET + 0x0200) |
| 104 | #define GICR_ICPENDR0 (GICR_SGI_OFFSET + 0x0280) |
| 105 | #define GICR_ISACTIVER0 (GICR_SGI_OFFSET + 0x0300) |
| 106 | #define GICR_ICACTIVER0 (GICR_SGI_OFFSET + 0x0380) |
| 107 | #define GICR_IPRIORITYR (GICR_SGI_OFFSET + 0x0400) |
| 108 | #define GICR_ICFGR0 (GICR_SGI_OFFSET + 0x0C00) |
| 109 | #define GICR_ICFGR1 (GICR_SGI_OFFSET + 0x0C04) |
| 110 | #define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00) |
| 111 | #define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00) |
| 112 | |
Peter Maydell | 641be69 | 2022-04-08 15:15:32 +0100 | [diff] [blame] | 113 | /* VLPI redistributor registers, offsets from VLPI_base */ |
| 114 | #define GICR_VPROPBASER (GICR_VLPI_OFFSET + 0x70) |
| 115 | #define GICR_VPENDBASER (GICR_VLPI_OFFSET + 0x78) |
| 116 | |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 117 | #define GICR_CTLR_ENABLE_LPIS (1U << 0) |
Peter Maydell | 1611956 | 2022-01-22 18:24:39 +0000 | [diff] [blame] | 118 | #define GICR_CTLR_CES (1U << 1) |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 119 | #define GICR_CTLR_RWP (1U << 3) |
| 120 | #define GICR_CTLR_DPG0 (1U << 24) |
| 121 | #define GICR_CTLR_DPG1NS (1U << 25) |
| 122 | #define GICR_CTLR_DPG1S (1U << 26) |
| 123 | #define GICR_CTLR_UWP (1U << 31) |
| 124 | |
| 125 | #define GICR_TYPER_PLPIS (1U << 0) |
| 126 | #define GICR_TYPER_VLPIS (1U << 1) |
| 127 | #define GICR_TYPER_DIRECTLPI (1U << 3) |
| 128 | #define GICR_TYPER_LAST (1U << 4) |
| 129 | #define GICR_TYPER_DPGS (1U << 5) |
| 130 | #define GICR_TYPER_PROCNUM (0xFFFFU << 8) |
| 131 | #define GICR_TYPER_COMMONLPIAFF (0x3 << 24) |
| 132 | #define GICR_TYPER_AFFINITYVALUE (0xFFFFFFFFULL << 32) |
| 133 | |
| 134 | #define GICR_WAKER_ProcessorSleep (1U << 1) |
| 135 | #define GICR_WAKER_ChildrenAsleep (1U << 2) |
| 136 | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 137 | FIELD(GICR_PROPBASER, IDBITS, 0, 5) |
| 138 | FIELD(GICR_PROPBASER, INNERCACHE, 7, 3) |
| 139 | FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2) |
| 140 | FIELD(GICR_PROPBASER, PHYADDR, 12, 40) |
| 141 | FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3) |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 142 | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 143 | FIELD(GICR_PENDBASER, INNERCACHE, 7, 3) |
| 144 | FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2) |
| 145 | FIELD(GICR_PENDBASER, PHYADDR, 16, 36) |
| 146 | FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3) |
| 147 | FIELD(GICR_PENDBASER, PTZ, 62, 1) |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 148 | |
Shashi Mallela | 17fb5e3 | 2021-09-13 16:07:24 +0100 | [diff] [blame] | 149 | #define GICR_PROPBASER_IDBITS_THRESHOLD 0xd |
| 150 | |
Peter Maydell | 641be69 | 2022-04-08 15:15:32 +0100 | [diff] [blame] | 151 | /* These are the GICv4 VPROPBASER and VPENDBASER layouts; v4.1 is different */ |
| 152 | FIELD(GICR_VPROPBASER, IDBITS, 0, 5) |
| 153 | FIELD(GICR_VPROPBASER, INNERCACHE, 7, 3) |
| 154 | FIELD(GICR_VPROPBASER, SHAREABILITY, 10, 2) |
| 155 | FIELD(GICR_VPROPBASER, PHYADDR, 12, 40) |
| 156 | FIELD(GICR_VPROPBASER, OUTERCACHE, 56, 3) |
| 157 | |
| 158 | FIELD(GICR_VPENDBASER, INNERCACHE, 7, 3) |
| 159 | FIELD(GICR_VPENDBASER, SHAREABILITY, 10, 2) |
| 160 | FIELD(GICR_VPENDBASER, PHYADDR, 16, 36) |
| 161 | FIELD(GICR_VPENDBASER, OUTERCACHE, 56, 3) |
| 162 | FIELD(GICR_VPENDBASER, DIRTY, 60, 1) |
| 163 | FIELD(GICR_VPENDBASER, PENDINGLAST, 61, 1) |
| 164 | FIELD(GICR_VPENDBASER, IDAI, 62, 1) |
| 165 | FIELD(GICR_VPENDBASER, VALID, 63, 1) |
| 166 | |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 167 | #define ICC_CTLR_EL1_CBPR (1U << 0) |
| 168 | #define ICC_CTLR_EL1_EOIMODE (1U << 1) |
| 169 | #define ICC_CTLR_EL1_PMHE (1U << 6) |
| 170 | #define ICC_CTLR_EL1_PRIBITS_SHIFT 8 |
Vijaya Kumar K | 367b9f5 | 2017-02-23 17:21:11 +0530 | [diff] [blame] | 171 | #define ICC_CTLR_EL1_PRIBITS_MASK (7U << ICC_CTLR_EL1_PRIBITS_SHIFT) |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 172 | #define ICC_CTLR_EL1_IDBITS_SHIFT 11 |
| 173 | #define ICC_CTLR_EL1_SEIS (1U << 14) |
| 174 | #define ICC_CTLR_EL1_A3V (1U << 15) |
| 175 | |
| 176 | #define ICC_PMR_PRIORITY_MASK 0xff |
| 177 | #define ICC_BPR_BINARYPOINT_MASK 0x07 |
| 178 | #define ICC_IGRPEN_ENABLE 0x01 |
| 179 | |
| 180 | #define ICC_CTLR_EL3_CBPR_EL1S (1U << 0) |
| 181 | #define ICC_CTLR_EL3_CBPR_EL1NS (1U << 1) |
| 182 | #define ICC_CTLR_EL3_EOIMODE_EL3 (1U << 2) |
| 183 | #define ICC_CTLR_EL3_EOIMODE_EL1S (1U << 3) |
| 184 | #define ICC_CTLR_EL3_EOIMODE_EL1NS (1U << 4) |
| 185 | #define ICC_CTLR_EL3_RM (1U << 5) |
| 186 | #define ICC_CTLR_EL3_PMHE (1U << 6) |
| 187 | #define ICC_CTLR_EL3_PRIBITS_SHIFT 8 |
| 188 | #define ICC_CTLR_EL3_IDBITS_SHIFT 11 |
| 189 | #define ICC_CTLR_EL3_SEIS (1U << 14) |
| 190 | #define ICC_CTLR_EL3_A3V (1U << 15) |
| 191 | #define ICC_CTLR_EL3_NDS (1U << 17) |
| 192 | |
Peter Maydell | e69d2fa | 2017-01-20 11:15:09 +0000 | [diff] [blame] | 193 | #define ICH_VMCR_EL2_VENG0_SHIFT 0 |
| 194 | #define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT) |
| 195 | #define ICH_VMCR_EL2_VENG1_SHIFT 1 |
| 196 | #define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT) |
| 197 | #define ICH_VMCR_EL2_VACKCTL (1U << 2) |
| 198 | #define ICH_VMCR_EL2_VFIQEN (1U << 3) |
| 199 | #define ICH_VMCR_EL2_VCBPR_SHIFT 4 |
| 200 | #define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT) |
| 201 | #define ICH_VMCR_EL2_VEOIM_SHIFT 9 |
| 202 | #define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT) |
| 203 | #define ICH_VMCR_EL2_VBPR1_SHIFT 18 |
| 204 | #define ICH_VMCR_EL2_VBPR1_LENGTH 3 |
| 205 | #define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT) |
| 206 | #define ICH_VMCR_EL2_VBPR0_SHIFT 21 |
| 207 | #define ICH_VMCR_EL2_VBPR0_LENGTH 3 |
| 208 | #define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT) |
| 209 | #define ICH_VMCR_EL2_VPMR_SHIFT 24 |
| 210 | #define ICH_VMCR_EL2_VPMR_LENGTH 8 |
| 211 | #define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT) |
| 212 | |
| 213 | #define ICH_HCR_EL2_EN (1U << 0) |
| 214 | #define ICH_HCR_EL2_UIE (1U << 1) |
| 215 | #define ICH_HCR_EL2_LRENPIE (1U << 2) |
| 216 | #define ICH_HCR_EL2_NPIE (1U << 3) |
| 217 | #define ICH_HCR_EL2_VGRP0EIE (1U << 4) |
| 218 | #define ICH_HCR_EL2_VGRP0DIE (1U << 5) |
| 219 | #define ICH_HCR_EL2_VGRP1EIE (1U << 6) |
| 220 | #define ICH_HCR_EL2_VGRP1DIE (1U << 7) |
| 221 | #define ICH_HCR_EL2_TC (1U << 10) |
| 222 | #define ICH_HCR_EL2_TALL0 (1U << 11) |
| 223 | #define ICH_HCR_EL2_TALL1 (1U << 12) |
| 224 | #define ICH_HCR_EL2_TSEI (1U << 13) |
| 225 | #define ICH_HCR_EL2_TDIR (1U << 14) |
| 226 | #define ICH_HCR_EL2_EOICOUNT_SHIFT 27 |
| 227 | #define ICH_HCR_EL2_EOICOUNT_LENGTH 5 |
| 228 | #define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT) |
| 229 | |
| 230 | #define ICH_LR_EL2_VINTID_SHIFT 0 |
| 231 | #define ICH_LR_EL2_VINTID_LENGTH 32 |
| 232 | #define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT) |
| 233 | #define ICH_LR_EL2_PINTID_SHIFT 32 |
| 234 | #define ICH_LR_EL2_PINTID_LENGTH 10 |
| 235 | #define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT) |
| 236 | /* Note that EOI shares with the top bit of the pINTID field */ |
| 237 | #define ICH_LR_EL2_EOI (1ULL << 41) |
| 238 | #define ICH_LR_EL2_PRIORITY_SHIFT 48 |
| 239 | #define ICH_LR_EL2_PRIORITY_LENGTH 8 |
| 240 | #define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT) |
| 241 | #define ICH_LR_EL2_GROUP (1ULL << 60) |
| 242 | #define ICH_LR_EL2_HW (1ULL << 61) |
| 243 | #define ICH_LR_EL2_STATE_SHIFT 62 |
| 244 | #define ICH_LR_EL2_STATE_LENGTH 2 |
| 245 | #define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT) |
| 246 | /* values for the state field: */ |
| 247 | #define ICH_LR_EL2_STATE_INVALID 0 |
| 248 | #define ICH_LR_EL2_STATE_PENDING 1 |
| 249 | #define ICH_LR_EL2_STATE_ACTIVE 2 |
| 250 | #define ICH_LR_EL2_STATE_ACTIVE_PENDING 3 |
| 251 | #define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT) |
| 252 | #define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT) |
| 253 | |
| 254 | #define ICH_MISR_EL2_EOI (1U << 0) |
| 255 | #define ICH_MISR_EL2_U (1U << 1) |
| 256 | #define ICH_MISR_EL2_LRENP (1U << 2) |
| 257 | #define ICH_MISR_EL2_NP (1U << 3) |
| 258 | #define ICH_MISR_EL2_VGRP0E (1U << 4) |
| 259 | #define ICH_MISR_EL2_VGRP0D (1U << 5) |
| 260 | #define ICH_MISR_EL2_VGRP1E (1U << 6) |
| 261 | #define ICH_MISR_EL2_VGRP1D (1U << 7) |
| 262 | |
| 263 | #define ICH_VTR_EL2_LISTREGS_SHIFT 0 |
| 264 | #define ICH_VTR_EL2_TDS (1U << 19) |
| 265 | #define ICH_VTR_EL2_NV4 (1U << 20) |
| 266 | #define ICH_VTR_EL2_A3V (1U << 21) |
| 267 | #define ICH_VTR_EL2_SEIS (1U << 22) |
| 268 | #define ICH_VTR_EL2_IDBITS_SHIFT 23 |
| 269 | #define ICH_VTR_EL2_PREBITS_SHIFT 26 |
| 270 | #define ICH_VTR_EL2_PRIBITS_SHIFT 29 |
| 271 | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 272 | /* ITS Registers */ |
| 273 | |
| 274 | FIELD(GITS_BASER, SIZE, 0, 8) |
| 275 | FIELD(GITS_BASER, PAGESIZE, 8, 2) |
| 276 | FIELD(GITS_BASER, SHAREABILITY, 10, 2) |
| 277 | FIELD(GITS_BASER, PHYADDR, 12, 36) |
| 278 | FIELD(GITS_BASER, PHYADDRL_64K, 16, 32) |
| 279 | FIELD(GITS_BASER, PHYADDRH_64K, 12, 4) |
| 280 | FIELD(GITS_BASER, ENTRYSIZE, 48, 5) |
| 281 | FIELD(GITS_BASER, OUTERCACHE, 53, 3) |
| 282 | FIELD(GITS_BASER, TYPE, 56, 3) |
| 283 | FIELD(GITS_BASER, INNERCACHE, 59, 3) |
| 284 | FIELD(GITS_BASER, INDIRECT, 62, 1) |
| 285 | FIELD(GITS_BASER, VALID, 63, 1) |
| 286 | |
Shashi Mallela | 1b08e43 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 287 | FIELD(GITS_CBASER, SIZE, 0, 8) |
| 288 | FIELD(GITS_CBASER, SHAREABILITY, 10, 2) |
| 289 | FIELD(GITS_CBASER, PHYADDR, 12, 40) |
| 290 | FIELD(GITS_CBASER, OUTERCACHE, 53, 3) |
| 291 | FIELD(GITS_CBASER, INNERCACHE, 59, 3) |
| 292 | FIELD(GITS_CBASER, VALID, 63, 1) |
| 293 | |
| 294 | FIELD(GITS_CREADR, STALLED, 0, 1) |
| 295 | FIELD(GITS_CREADR, OFFSET, 5, 15) |
| 296 | |
| 297 | FIELD(GITS_CWRITER, RETRY, 0, 1) |
| 298 | FIELD(GITS_CWRITER, OFFSET, 5, 15) |
| 299 | |
| 300 | FIELD(GITS_CTLR, ENABLED, 0, 1) |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 301 | FIELD(GITS_CTLR, QUIESCENT, 31, 1) |
| 302 | |
| 303 | FIELD(GITS_TYPER, PHYSICAL, 0, 1) |
Peter Maydell | 50d8458 | 2022-04-08 15:15:16 +0100 | [diff] [blame] | 304 | FIELD(GITS_TYPER, VIRTUAL, 1, 1) |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 305 | FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4) |
| 306 | FIELD(GITS_TYPER, IDBITS, 8, 5) |
| 307 | FIELD(GITS_TYPER, DEVBITS, 13, 5) |
| 308 | FIELD(GITS_TYPER, SEIS, 18, 1) |
| 309 | FIELD(GITS_TYPER, PTA, 19, 1) |
| 310 | FIELD(GITS_TYPER, CIDBITS, 32, 4) |
| 311 | FIELD(GITS_TYPER, CIL, 36, 1) |
Peter Maydell | e2d5e18 | 2022-04-08 15:15:46 +0100 | [diff] [blame] | 312 | FIELD(GITS_TYPER, VMOVP, 37, 1) |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 313 | |
Shashi Mallela | 1b08e43 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 314 | #define GITS_IDREGS 0xFFD0 |
| 315 | |
Shashi Mallela | 1b08e43 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 316 | #define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \ |
| 317 | R_GITS_BASER_TYPE_MASK) |
| 318 | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 319 | #define GITS_BASER_PAGESIZE_4K 0 |
| 320 | #define GITS_BASER_PAGESIZE_16K 1 |
| 321 | #define GITS_BASER_PAGESIZE_64K 2 |
| 322 | |
| 323 | #define GITS_BASER_TYPE_DEVICE 1ULL |
Peter Maydell | 50d8458 | 2022-04-08 15:15:16 +0100 | [diff] [blame] | 324 | #define GITS_BASER_TYPE_VPE 2ULL |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 325 | #define GITS_BASER_TYPE_COLLECTION 4ULL |
| 326 | |
Shashi Mallela | 1b08e43 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 327 | #define GITS_PAGE_SIZE_4K 0x1000 |
| 328 | #define GITS_PAGE_SIZE_16K 0x4000 |
| 329 | #define GITS_PAGE_SIZE_64K 0x10000 |
| 330 | |
| 331 | #define L1TABLE_ENTRY_SIZE 8 |
| 332 | |
Shashi Mallela | 17fb5e3 | 2021-09-13 16:07:24 +0100 | [diff] [blame] | 333 | #define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK |
| 334 | #define LPI_PRIORITY_MASK 0xfc |
| 335 | |
Peter Maydell | b6f9600 | 2022-02-01 19:31:55 +0000 | [diff] [blame] | 336 | #define GITS_CMDQ_ENTRY_WORDS 4 |
| 337 | #define GITS_CMDQ_ENTRY_SIZE (GITS_CMDQ_ENTRY_WORDS * sizeof(uint64_t)) |
Shashi Mallela | 7eca39e | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 338 | |
| 339 | #define CMD_MASK 0xff |
| 340 | |
| 341 | /* ITS Commands */ |
Peter Maydell | 961b491 | 2022-01-22 18:24:44 +0000 | [diff] [blame] | 342 | #define GITS_CMD_MOVI 0x01 |
Shashi Mallela | 7eca39e | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 343 | #define GITS_CMD_INT 0x03 |
Peter Maydell | 714d8bd | 2022-01-22 18:24:37 +0000 | [diff] [blame] | 344 | #define GITS_CMD_CLEAR 0x04 |
| 345 | #define GITS_CMD_SYNC 0x05 |
Shashi Mallela | 7eca39e | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 346 | #define GITS_CMD_MAPD 0x08 |
Peter Maydell | 714d8bd | 2022-01-22 18:24:37 +0000 | [diff] [blame] | 347 | #define GITS_CMD_MAPC 0x09 |
Shashi Mallela | 7eca39e | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 348 | #define GITS_CMD_MAPTI 0x0A |
Peter Maydell | 714d8bd | 2022-01-22 18:24:37 +0000 | [diff] [blame] | 349 | #define GITS_CMD_MAPI 0x0B |
Shashi Mallela | 7eca39e | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 350 | #define GITS_CMD_INV 0x0C |
| 351 | #define GITS_CMD_INVALL 0x0D |
Peter Maydell | f6d1d9b | 2022-01-22 18:24:43 +0000 | [diff] [blame] | 352 | #define GITS_CMD_MOVALL 0x0E |
Peter Maydell | 714d8bd | 2022-01-22 18:24:37 +0000 | [diff] [blame] | 353 | #define GITS_CMD_DISCARD 0x0F |
Peter Maydell | 3c64a42 | 2022-04-08 15:15:29 +0100 | [diff] [blame] | 354 | #define GITS_CMD_VMOVI 0x21 |
Peter Maydell | 3851af4 | 2022-04-08 15:15:25 +0100 | [diff] [blame] | 355 | #define GITS_CMD_VMOVP 0x22 |
Peter Maydell | f76ba95 | 2022-04-08 15:15:26 +0100 | [diff] [blame] | 356 | #define GITS_CMD_VSYNC 0x25 |
Peter Maydell | 0cdf7a5 | 2022-04-08 15:15:18 +0100 | [diff] [blame] | 357 | #define GITS_CMD_VMAPP 0x29 |
Peter Maydell | 9de53de | 2022-04-08 15:15:17 +0100 | [diff] [blame] | 358 | #define GITS_CMD_VMAPTI 0x2A |
| 359 | #define GITS_CMD_VMAPI 0x2B |
Peter Maydell | c6dd2f9 | 2022-04-08 15:15:30 +0100 | [diff] [blame] | 360 | #define GITS_CMD_VINVALL 0x2D |
Shashi Mallela | 7eca39e | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 361 | |
| 362 | /* MAPC command fields */ |
| 363 | #define ICID_LENGTH 16 |
| 364 | #define ICID_MASK ((1U << ICID_LENGTH) - 1) |
| 365 | FIELD(MAPC, RDBASE, 16, 32) |
| 366 | |
| 367 | #define RDBASE_PROCNUM_LENGTH 16 |
| 368 | #define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1) |
| 369 | |
| 370 | /* MAPD command fields */ |
| 371 | #define ITTADDR_LENGTH 44 |
| 372 | #define ITTADDR_SHIFT 8 |
| 373 | #define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH) |
| 374 | #define SIZE_MASK 0x1f |
| 375 | |
Shashi Mallela | c694cb4 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 376 | /* MAPI command fields */ |
| 377 | #define EVENTID_MASK ((1ULL << 32) - 1) |
| 378 | |
| 379 | /* MAPTI command fields */ |
| 380 | #define pINTID_SHIFT 32 |
| 381 | #define pINTID_MASK MAKE_64BIT_MASK(32, 32) |
| 382 | |
Shashi Mallela | 7eca39e | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 383 | #define DEVID_SHIFT 32 |
| 384 | #define DEVID_MASK MAKE_64BIT_MASK(32, 32) |
| 385 | |
| 386 | #define VALID_SHIFT 63 |
| 387 | #define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT) |
| 388 | #define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK |
| 389 | #define TABLE_ENTRY_VALID_MASK (1ULL << 0) |
Shashi Mallela | 1b08e43 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 390 | |
Peter Maydell | f6d1d9b | 2022-01-22 18:24:43 +0000 | [diff] [blame] | 391 | /* MOVALL command fields */ |
| 392 | FIELD(MOVALL_2, RDBASE1, 16, 36) |
| 393 | FIELD(MOVALL_3, RDBASE2, 16, 36) |
| 394 | |
Peter Maydell | 961b491 | 2022-01-22 18:24:44 +0000 | [diff] [blame] | 395 | /* MOVI command fields */ |
| 396 | FIELD(MOVI_0, DEVICEID, 32, 32) |
| 397 | FIELD(MOVI_1, EVENTID, 0, 32) |
| 398 | FIELD(MOVI_2, ICID, 0, 16) |
| 399 | |
Peter Maydell | a686e85 | 2022-04-08 15:15:27 +0100 | [diff] [blame] | 400 | /* INV command fields */ |
| 401 | FIELD(INV_0, DEVICEID, 32, 32) |
| 402 | FIELD(INV_1, EVENTID, 0, 32) |
| 403 | |
Peter Maydell | 9de53de | 2022-04-08 15:15:17 +0100 | [diff] [blame] | 404 | /* VMAPI, VMAPTI command fields */ |
| 405 | FIELD(VMAPTI_0, DEVICEID, 32, 32) |
| 406 | FIELD(VMAPTI_1, EVENTID, 0, 32) |
| 407 | FIELD(VMAPTI_1, VPEID, 32, 16) |
| 408 | FIELD(VMAPTI_2, VINTID, 0, 32) /* VMAPTI only */ |
| 409 | FIELD(VMAPTI_2, DOORBELL, 32, 32) |
| 410 | |
Peter Maydell | 0cdf7a5 | 2022-04-08 15:15:18 +0100 | [diff] [blame] | 411 | /* VMAPP command fields */ |
| 412 | FIELD(VMAPP_0, ALLOC, 8, 1) /* GICv4.1 only */ |
| 413 | FIELD(VMAPP_0, PTZ, 9, 1) /* GICv4.1 only */ |
| 414 | FIELD(VMAPP_0, VCONFADDR, 16, 36) /* GICv4.1 only */ |
| 415 | FIELD(VMAPP_1, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */ |
| 416 | FIELD(VMAPP_1, VPEID, 32, 16) |
| 417 | FIELD(VMAPP_2, RDBASE, 16, 36) |
| 418 | FIELD(VMAPP_2, V, 63, 1) |
| 419 | FIELD(VMAPP_3, VPTSIZE, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */ |
| 420 | FIELD(VMAPP_3, VPTADDR, 16, 36) |
| 421 | |
Peter Maydell | 3851af4 | 2022-04-08 15:15:25 +0100 | [diff] [blame] | 422 | /* VMOVP command fields */ |
| 423 | FIELD(VMOVP_0, SEQNUM, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */ |
| 424 | FIELD(VMOVP_1, ITSLIST, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */ |
| 425 | FIELD(VMOVP_1, VPEID, 32, 16) |
| 426 | FIELD(VMOVP_2, RDBASE, 16, 36) |
| 427 | FIELD(VMOVP_2, DB, 63, 1) /* GICv4.1 only */ |
| 428 | FIELD(VMOVP_3, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */ |
| 429 | |
Peter Maydell | 3c64a42 | 2022-04-08 15:15:29 +0100 | [diff] [blame] | 430 | /* VMOVI command fields */ |
| 431 | FIELD(VMOVI_0, DEVICEID, 32, 32) |
| 432 | FIELD(VMOVI_1, EVENTID, 0, 32) |
| 433 | FIELD(VMOVI_1, VPEID, 32, 16) |
| 434 | FIELD(VMOVI_2, D, 0, 1) |
| 435 | FIELD(VMOVI_2, DOORBELL, 32, 32) |
| 436 | |
Peter Maydell | c6dd2f9 | 2022-04-08 15:15:30 +0100 | [diff] [blame] | 437 | /* VINVALL command fields */ |
| 438 | FIELD(VINVALL_1, VPEID, 32, 16) |
| 439 | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 440 | /* |
| 441 | * 12 bytes Interrupt translation Table Entry size |
| 442 | * as per Table 5.3 in GICv3 spec |
| 443 | * ITE Lower 8 Bytes |
Peter Maydell | a1ce993 | 2022-02-01 19:32:00 +0000 | [diff] [blame] | 444 | * Bits: | 63 ... 48 | 47 ... 32 | 31 ... 26 | 25 ... 2 | 1 | 0 | |
| 445 | * Values: | vPEID | ICID | unused | IntNum | IntType | Valid | |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 446 | * ITE Higher 4 Bytes |
Peter Maydell | a1ce993 | 2022-02-01 19:32:00 +0000 | [diff] [blame] | 447 | * Bits: | 31 ... 25 | 24 ... 0 | |
| 448 | * Values: | unused | Doorbell | |
| 449 | * (When Doorbell is unused, as it always is for INTYPE_PHYSICAL, |
| 450 | * the value of that field in memory cannot be relied upon -- older |
| 451 | * versions of QEMU did not correctly write to that memory.) |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 452 | */ |
| 453 | #define ITS_ITT_ENTRY_SIZE 0xC |
Peter Maydell | 764d6ba | 2022-01-07 17:07:59 +0000 | [diff] [blame] | 454 | |
| 455 | FIELD(ITE_L, VALID, 0, 1) |
| 456 | FIELD(ITE_L, INTTYPE, 1, 1) |
| 457 | FIELD(ITE_L, INTID, 2, 24) |
Peter Maydell | a1ce993 | 2022-02-01 19:32:00 +0000 | [diff] [blame] | 458 | FIELD(ITE_L, ICID, 32, 16) |
| 459 | FIELD(ITE_L, VPEID, 48, 16) |
| 460 | FIELD(ITE_H, DOORBELL, 0, 24) |
Peter Maydell | 764d6ba | 2022-01-07 17:07:59 +0000 | [diff] [blame] | 461 | |
| 462 | /* Possible values for ITE_L INTTYPE */ |
| 463 | #define ITE_INTTYPE_VIRTUAL 0 |
| 464 | #define ITE_INTTYPE_PHYSICAL 1 |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 465 | |
| 466 | /* 16 bits EventId */ |
| 467 | #define ITS_IDBITS GICD_TYPER_IDBITS |
| 468 | |
| 469 | /* 16 bits DeviceId */ |
| 470 | #define ITS_DEVBITS 0xF |
| 471 | |
| 472 | /* 16 bits CollectionId */ |
| 473 | #define ITS_CIDBITS 0xF |
| 474 | |
| 475 | /* |
| 476 | * 8 bytes Device Table Entry size |
| 477 | * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits |
| 478 | */ |
| 479 | #define GITS_DTE_SIZE (0x8ULL) |
Peter Maydell | e07f844 | 2022-01-07 17:07:59 +0000 | [diff] [blame] | 480 | |
| 481 | FIELD(DTE, VALID, 0, 1) |
| 482 | FIELD(DTE, SIZE, 1, 5) |
| 483 | FIELD(DTE, ITTADDR, 6, 44) |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 484 | |
| 485 | /* |
| 486 | * 8 bytes Collection Table Entry size |
Peter Maydell | 257bb65 | 2022-01-07 17:07:59 +0000 | [diff] [blame] | 487 | * Valid = 1 bit, RDBase = 16 bits |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 488 | */ |
| 489 | #define GITS_CTE_SIZE (0x8ULL) |
Peter Maydell | 437dc0e | 2022-01-07 17:07:59 +0000 | [diff] [blame] | 490 | FIELD(CTE, VALID, 0, 1) |
| 491 | FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH) |
Shashi Mallela | 18f6290 | 2021-09-13 16:07:23 +0100 | [diff] [blame] | 492 | |
Peter Maydell | 50d8458 | 2022-04-08 15:15:16 +0100 | [diff] [blame] | 493 | /* |
| 494 | * 8 bytes VPE table entry size: |
| 495 | * Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits |
| 496 | * |
| 497 | * Field sizes for Valid and size are mandated; field sizes for RDbase |
| 498 | * and VPT_addr are IMPDEF. |
| 499 | */ |
| 500 | #define GITS_VPE_SIZE 0x8ULL |
| 501 | |
| 502 | FIELD(VTE, VALID, 0, 1) |
| 503 | FIELD(VTE, VPTSIZE, 1, 5) |
| 504 | FIELD(VTE, VPTADDR, 6, 36) |
| 505 | FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH) |
| 506 | |
Peter Maydell | 227a865 | 2016-06-17 15:23:48 +0100 | [diff] [blame] | 507 | /* Special interrupt IDs */ |
| 508 | #define INTID_SECURE 1020 |
| 509 | #define INTID_NONSECURE 1021 |
| 510 | #define INTID_SPURIOUS 1023 |
| 511 | |
Peter Maydell | ce187c3 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 512 | /* Functions internal to the emulated GICv3 */ |
| 513 | |
| 514 | /** |
Peter Maydell | ae3b3ba | 2022-04-08 15:15:31 +0100 | [diff] [blame] | 515 | * gicv3_redist_size: |
| 516 | * @s: GICv3State |
| 517 | * |
| 518 | * Return the size of the redistributor register frame in bytes |
| 519 | * (which depends on what GIC version this is) |
| 520 | */ |
| 521 | static inline int gicv3_redist_size(GICv3State *s) |
| 522 | { |
| 523 | /* |
| 524 | * Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS. |
| 525 | * It's the same for every redistributor in the GIC, so arbitrarily |
| 526 | * use the register field in the first one. |
| 527 | */ |
| 528 | if (s->cpu[0].gicr_typer & GICR_TYPER_VLPIS) { |
| 529 | return GICV4_REDIST_SIZE; |
| 530 | } else { |
| 531 | return GICV3_REDIST_SIZE; |
| 532 | } |
| 533 | } |
| 534 | |
| 535 | /** |
Peter Maydell | b74d7c0 | 2021-11-26 16:39:14 +0000 | [diff] [blame] | 536 | * gicv3_intid_is_special: |
| 537 | * @intid: interrupt ID |
| 538 | * |
| 539 | * Return true if @intid is a special interrupt ID (1020 to |
| 540 | * 1023 inclusive). This corresponds to the GIC spec pseudocode |
| 541 | * IsSpecial() function. |
| 542 | */ |
| 543 | static inline bool gicv3_intid_is_special(int intid) |
| 544 | { |
| 545 | return intid >= INTID_SECURE && intid <= INTID_SPURIOUS; |
| 546 | } |
| 547 | |
| 548 | /** |
Peter Maydell | ce187c3 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 549 | * gicv3_redist_update: |
| 550 | * @cs: GICv3CPUState for this redistributor |
| 551 | * |
| 552 | * Recalculate the highest priority pending interrupt after a |
| 553 | * change to redistributor state, and inform the CPU accordingly. |
| 554 | */ |
| 555 | void gicv3_redist_update(GICv3CPUState *cs); |
| 556 | |
| 557 | /** |
| 558 | * gicv3_update: |
| 559 | * @s: GICv3State |
| 560 | * @start: first interrupt whose state changed |
| 561 | * @len: length of the range of interrupts whose state changed |
| 562 | * |
| 563 | * Recalculate the highest priority pending interrupts after a |
| 564 | * change to the distributor state affecting @len interrupts |
| 565 | * starting at @start, and inform the CPUs accordingly. |
| 566 | */ |
| 567 | void gicv3_update(GICv3State *s, int start, int len); |
| 568 | |
| 569 | /** |
| 570 | * gicv3_full_update_noirqset: |
| 571 | * @s: GICv3State |
| 572 | * |
| 573 | * Recalculate the cached information about highest priority |
| 574 | * pending interrupts, but don't inform the CPUs. This should be |
| 575 | * called after an incoming migration has loaded new state. |
| 576 | */ |
| 577 | void gicv3_full_update_noirqset(GICv3State *s); |
| 578 | |
| 579 | /** |
| 580 | * gicv3_full_update: |
| 581 | * @s: GICv3State |
| 582 | * |
| 583 | * Recalculate the highest priority pending interrupts after |
| 584 | * a change that could affect the status of all interrupts, |
| 585 | * and inform the CPUs accordingly. |
| 586 | */ |
| 587 | void gicv3_full_update(GICv3State *s); |
Shlomo Pongratz | e52af51 | 2016-06-17 15:23:47 +0100 | [diff] [blame] | 588 | MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data, |
| 589 | unsigned size, MemTxAttrs attrs); |
| 590 | MemTxResult gicv3_dist_write(void *opaque, hwaddr addr, uint64_t data, |
| 591 | unsigned size, MemTxAttrs attrs); |
Shlomo Pongratz | cec93a9 | 2016-06-17 15:23:47 +0100 | [diff] [blame] | 592 | MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, |
| 593 | unsigned size, MemTxAttrs attrs); |
| 594 | MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, |
| 595 | unsigned size, MemTxAttrs attrs); |
Peter Maydell | c84428b | 2016-06-17 15:23:47 +0100 | [diff] [blame] | 596 | void gicv3_dist_set_irq(GICv3State *s, int irq, int level); |
| 597 | void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level); |
Shashi Mallela | 17fb5e3 | 2021-09-13 16:07:24 +0100 | [diff] [blame] | 598 | void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level); |
Peter Maydell | 469cf23 | 2022-04-08 15:15:23 +0100 | [diff] [blame] | 599 | /** |
| 600 | * gicv3_redist_process_vlpi: |
| 601 | * @cs: GICv3CPUState |
| 602 | * @irq: (virtual) interrupt number |
| 603 | * @vptaddr: (guest) address of VLPI table |
| 604 | * @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell") |
| 605 | * @level: level to set @irq to |
| 606 | * |
| 607 | * Process a virtual LPI being directly injected by the ITS. This function |
| 608 | * will update the VLPI table specified by @vptaddr and @vptsize. If the |
| 609 | * vCPU corresponding to that VLPI table is currently running on |
| 610 | * the CPU associated with this redistributor, directly inject the VLPI |
| 611 | * @irq. If the vCPU is not running on this CPU, raise the doorbell |
| 612 | * interrupt instead. |
| 613 | */ |
| 614 | void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr, |
| 615 | int doorbell, int level); |
Peter Maydell | c3f21b0 | 2022-04-08 15:15:34 +0100 | [diff] [blame] | 616 | /** |
| 617 | * gicv3_redist_vlpi_pending: |
| 618 | * @cs: GICv3CPUState |
| 619 | * @irq: (virtual) interrupt number |
| 620 | * @level: level to set @irq to |
| 621 | * |
| 622 | * Set/clear the pending status of a virtual LPI in the vLPI table |
| 623 | * that this redistributor is currently using. (The difference between |
| 624 | * this and gicv3_redist_process_vlpi() is that this is called from |
| 625 | * the cpuif and does not need to do the not-running-on-this-vcpu checks.) |
| 626 | */ |
| 627 | void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level); |
| 628 | |
Shashi Mallela | 17fb5e3 | 2021-09-13 16:07:24 +0100 | [diff] [blame] | 629 | void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level); |
Peter Maydell | 101f27f | 2021-11-24 20:20:05 +0000 | [diff] [blame] | 630 | /** |
| 631 | * gicv3_redist_update_lpi: |
| 632 | * @cs: GICv3CPUState |
| 633 | * |
| 634 | * Scan the LPI pending table and recalculate the highest priority |
| 635 | * pending LPI and also the overall highest priority pending interrupt. |
| 636 | */ |
Shashi Mallela | 17fb5e3 | 2021-09-13 16:07:24 +0100 | [diff] [blame] | 637 | void gicv3_redist_update_lpi(GICv3CPUState *cs); |
Peter Maydell | 101f27f | 2021-11-24 20:20:05 +0000 | [diff] [blame] | 638 | /** |
| 639 | * gicv3_redist_update_lpi_only: |
| 640 | * @cs: GICv3CPUState |
| 641 | * |
| 642 | * Scan the LPI pending table and recalculate cs->hpplpi only, |
| 643 | * without calling gicv3_redist_update() to recalculate the overall |
| 644 | * highest priority pending interrupt. This should be called after |
| 645 | * an incoming migration has loaded new state. |
| 646 | */ |
| 647 | void gicv3_redist_update_lpi_only(GICv3CPUState *cs); |
Peter Maydell | f6d1d9b | 2022-01-22 18:24:43 +0000 | [diff] [blame] | 648 | /** |
Peter Maydell | a686e85 | 2022-04-08 15:15:27 +0100 | [diff] [blame] | 649 | * gicv3_redist_inv_lpi: |
| 650 | * @cs: GICv3CPUState |
| 651 | * @irq: LPI to invalidate cached information for |
| 652 | * |
| 653 | * Forget or update any cached information associated with this LPI. |
| 654 | */ |
| 655 | void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq); |
| 656 | /** |
Peter Maydell | d401432 | 2022-04-08 15:15:28 +0100 | [diff] [blame] | 657 | * gicv3_redist_inv_vlpi: |
| 658 | * @cs: GICv3CPUState |
| 659 | * @irq: vLPI to invalidate cached information for |
| 660 | * @vptaddr: (guest) address of vLPI table |
| 661 | * |
| 662 | * Forget or update any cached information associated with this vLPI. |
| 663 | */ |
| 664 | void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr); |
| 665 | /** |
Peter Maydell | 961b491 | 2022-01-22 18:24:44 +0000 | [diff] [blame] | 666 | * gicv3_redist_mov_lpi: |
| 667 | * @src: source redistributor |
| 668 | * @dest: destination redistributor |
| 669 | * @irq: LPI to update |
| 670 | * |
| 671 | * Move the pending state of the specified LPI from @src to @dest, |
| 672 | * as required by the ITS MOVI command. |
| 673 | */ |
| 674 | void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq); |
| 675 | /** |
Peter Maydell | f6d1d9b | 2022-01-22 18:24:43 +0000 | [diff] [blame] | 676 | * gicv3_redist_movall_lpis: |
| 677 | * @src: source redistributor |
| 678 | * @dest: destination redistributor |
| 679 | * |
| 680 | * Scan the LPI pending table for @src, and for each pending LPI there |
| 681 | * mark it as not-pending for @src and pending for @dest, as required |
| 682 | * by the ITS MOVALL command. |
| 683 | */ |
| 684 | void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest); |
Peter Maydell | 3c64a42 | 2022-04-08 15:15:29 +0100 | [diff] [blame] | 685 | /** |
| 686 | * gicv3_redist_mov_vlpi: |
| 687 | * @src: source redistributor |
| 688 | * @src_vptaddr: (guest) address of source VLPI table |
| 689 | * @dest: destination redistributor |
| 690 | * @dest_vptaddr: (guest) address of destination VLPI table |
| 691 | * @irq: VLPI to update |
| 692 | * @doorbell: doorbell for destination (1023 for "no doorbell") |
| 693 | * |
| 694 | * Move the pending state of the specified VLPI from @src to @dest, |
| 695 | * as required by the ITS VMOVI command. |
| 696 | */ |
| 697 | void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr, |
| 698 | GICv3CPUState *dest, uint64_t dest_vptaddr, |
| 699 | int irq, int doorbell); |
Peter Maydell | c6dd2f9 | 2022-04-08 15:15:30 +0100 | [diff] [blame] | 700 | /** |
| 701 | * gicv3_redist_vinvall: |
| 702 | * @cs: GICv3CPUState |
| 703 | * @vptaddr: address of VLPI pending table |
| 704 | * |
| 705 | * On redistributor @cs, invalidate all cached information associated |
| 706 | * with the vCPU defined by @vptaddr. |
| 707 | */ |
| 708 | void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr); |
Peter Maydell | f6d1d9b | 2022-01-22 18:24:43 +0000 | [diff] [blame] | 709 | |
Peter Maydell | b1a0eb7 | 2016-06-17 15:23:47 +0100 | [diff] [blame] | 710 | void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns); |
Peter Maydell | 359fbe6 | 2016-06-17 15:23:47 +0100 | [diff] [blame] | 711 | void gicv3_init_cpuif(GICv3State *s); |
Peter Maydell | ce187c3 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 712 | |
| 713 | /** |
| 714 | * gicv3_cpuif_update: |
| 715 | * @cs: GICv3CPUState for the CPU to update |
| 716 | * |
| 717 | * Recalculate whether to assert the IRQ or FIQ lines after a change |
| 718 | * to the current highest priority pending interrupt, the CPU's |
| 719 | * current running priority or the CPU's current exception level or |
| 720 | * security state. |
| 721 | */ |
Peter Maydell | f7b9358 | 2016-06-17 15:23:47 +0100 | [diff] [blame] | 722 | void gicv3_cpuif_update(GICv3CPUState *cs); |
Peter Maydell | ce187c3 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 723 | |
Peter Maydell | 1033763 | 2022-04-08 15:15:33 +0100 | [diff] [blame] | 724 | /* |
| 725 | * gicv3_cpuif_virt_irq_fiq_update: |
| 726 | * @cs: GICv3CPUState for the CPU to update |
| 727 | * |
| 728 | * Recalculate whether to assert the virtual IRQ or FIQ lines after |
| 729 | * a change to the current highest priority pending virtual interrupt. |
| 730 | * Note that this does not recalculate and change the maintenance |
| 731 | * interrupt status (for that, see gicv3_cpuif_virt_update()). |
| 732 | */ |
| 733 | void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs); |
| 734 | |
Shlomo Pongratz | 5699267 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 735 | static inline uint32_t gicv3_iidr(void) |
| 736 | { |
| 737 | /* Return the Implementer Identification Register value |
| 738 | * for the emulated GICv3, as reported in GICD_IIDR and GICR_IIDR. |
| 739 | * |
| 740 | * We claim to be an ARM r0p0 with a zero ProductID. |
| 741 | * This is the same as an r0p0 GIC-500. |
| 742 | */ |
| 743 | return 0x43b; |
| 744 | } |
| 745 | |
Peter Maydell | 50a3a30 | 2022-04-08 15:15:13 +0100 | [diff] [blame] | 746 | /* CoreSight PIDR0 values for ARM GICv3 implementations */ |
| 747 | #define GICV3_PIDR0_DIST 0x92 |
| 748 | #define GICV3_PIDR0_REDIST 0x93 |
| 749 | #define GICV3_PIDR0_ITS 0x94 |
| 750 | |
Peter Maydell | e2d5e18 | 2022-04-08 15:15:46 +0100 | [diff] [blame] | 751 | static inline uint32_t gicv3_idreg(GICv3State *s, int regoffset, uint8_t pidr0) |
Shlomo Pongratz | 5699267 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 752 | { |
| 753 | /* Return the value of the CoreSight ID register at the specified |
| 754 | * offset from the first ID register (as found in the distributor |
| 755 | * and redistributor register banks). |
Peter Maydell | e2d5e18 | 2022-04-08 15:15:46 +0100 | [diff] [blame] | 756 | * These values indicate an ARM implementation of a GICv3 or v4. |
Shlomo Pongratz | 5699267 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 757 | */ |
| 758 | static const uint8_t gicd_ids[] = { |
Peter Maydell | e2d5e18 | 2022-04-08 15:15:46 +0100 | [diff] [blame] | 759 | 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x0B, 0x00, 0x0D, 0xF0, 0x05, 0xB1 |
Shlomo Pongratz | 5699267 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 760 | }; |
Peter Maydell | e2d5e18 | 2022-04-08 15:15:46 +0100 | [diff] [blame] | 761 | uint32_t id; |
Peter Maydell | 50a3a30 | 2022-04-08 15:15:13 +0100 | [diff] [blame] | 762 | |
| 763 | regoffset /= 4; |
| 764 | |
| 765 | if (regoffset == 4) { |
| 766 | return pidr0; |
| 767 | } |
Peter Maydell | e2d5e18 | 2022-04-08 15:15:46 +0100 | [diff] [blame] | 768 | id = gicd_ids[regoffset]; |
| 769 | if (regoffset == 6) { |
| 770 | /* PIDR2 bits [7:4] are the GIC architecture revision */ |
| 771 | id |= s->revision << 4; |
| 772 | } |
| 773 | return id; |
Shlomo Pongratz | 5699267 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 774 | } |
| 775 | |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 776 | /** |
Peter Maydell | ce187c3 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 777 | * gicv3_irq_group: |
| 778 | * |
| 779 | * Return the group which this interrupt is configured as (GICV3_G0, |
| 780 | * GICV3_G1 or GICV3_G1NS). |
| 781 | */ |
| 782 | static inline int gicv3_irq_group(GICv3State *s, GICv3CPUState *cs, int irq) |
| 783 | { |
| 784 | bool grpbit, grpmodbit; |
| 785 | |
| 786 | if (irq < GIC_INTERNAL) { |
| 787 | grpbit = extract32(cs->gicr_igroupr0, irq, 1); |
| 788 | grpmodbit = extract32(cs->gicr_igrpmodr0, irq, 1); |
| 789 | } else { |
| 790 | grpbit = gicv3_gicd_group_test(s, irq); |
| 791 | grpmodbit = gicv3_gicd_grpmod_test(s, irq); |
| 792 | } |
| 793 | if (grpbit) { |
| 794 | return GICV3_G1NS; |
| 795 | } |
| 796 | if (s->gicd_ctlr & GICD_CTLR_DS) { |
| 797 | return GICV3_G0; |
| 798 | } |
| 799 | return grpmodbit ? GICV3_G1 : GICV3_G0; |
| 800 | } |
| 801 | |
| 802 | /** |
Pavel Fedin | 07e2034 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 803 | * gicv3_redist_affid: |
| 804 | * |
| 805 | * Return the 32-bit affinity ID of the CPU connected to this redistributor |
| 806 | */ |
| 807 | static inline uint32_t gicv3_redist_affid(GICv3CPUState *cs) |
| 808 | { |
| 809 | return cs->gicr_typer >> 32; |
| 810 | } |
| 811 | |
Peter Maydell | ce187c3 | 2016-06-17 15:23:46 +0100 | [diff] [blame] | 812 | /** |
| 813 | * gicv3_cache_target_cpustate: |
| 814 | * |
| 815 | * Update the cached CPU state corresponding to the target for this interrupt |
| 816 | * (which is kept in s->gicd_irouter_target[]). |
| 817 | */ |
| 818 | static inline void gicv3_cache_target_cpustate(GICv3State *s, int irq) |
| 819 | { |
| 820 | GICv3CPUState *cs = NULL; |
| 821 | int i; |
| 822 | uint32_t tgtaff = extract64(s->gicd_irouter[irq], 0, 24) | |
| 823 | extract64(s->gicd_irouter[irq], 32, 8) << 24; |
| 824 | |
| 825 | for (i = 0; i < s->num_cpu; i++) { |
| 826 | if (s->cpu[i].gicr_typer >> 32 == tgtaff) { |
| 827 | cs = &s->cpu[i]; |
| 828 | break; |
| 829 | } |
| 830 | } |
| 831 | |
| 832 | s->gicd_irouter_target[irq] = cs; |
| 833 | } |
| 834 | |
| 835 | /** |
| 836 | * gicv3_cache_all_target_cpustates: |
| 837 | * |
| 838 | * Populate the entire cache of CPU state pointers for interrupt targets |
| 839 | * (eg after inbound migration or CPU reset) |
| 840 | */ |
| 841 | static inline void gicv3_cache_all_target_cpustates(GICv3State *s) |
| 842 | { |
| 843 | int irq; |
| 844 | |
| 845 | for (irq = GIC_INTERNAL; irq < GICV3_MAXIRQ; irq++) { |
| 846 | gicv3_cache_target_cpustate(s, irq); |
| 847 | } |
| 848 | } |
| 849 | |
Vijaya Kumar K | d3a3e52 | 2017-02-23 17:21:12 +0530 | [diff] [blame] | 850 | void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s); |
| 851 | |
Markus Armbruster | 175de52 | 2016-06-29 15:29:06 +0200 | [diff] [blame] | 852 | #endif /* QEMU_ARM_GICV3_INTERNAL_H */ |