blob: cb55c726810c417afab92b2b098164e9b8500c36 [file] [log] [blame]
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +01001/*
2 * ARM GICv3 support - common bits of emulated and KVM kernel model
3 *
4 * Copyright (c) 2012 Linaro Limited
5 * Copyright (c) 2015 Huawei.
Pavel Fedin07e20342016-06-17 15:23:46 +01006 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +01007 * Written by Peter Maydell
Pavel Fedin07e20342016-06-17 15:23:46 +01008 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +01009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
Peter Maydell8ef94f02016-01-26 18:17:05 +000024#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010025#include "qapi/error.h"
Markus Armbruster0b8fa322019-05-23 16:35:07 +020026#include "qemu/module.h"
Philippe Mathieu-Daudé0c40daf2023-04-05 13:48:26 +020027#include "qemu/error-report.h"
Markus Armbruster2e5b09f2019-07-09 17:20:52 +020028#include "hw/core/cpu.h"
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010029#include "hw/intc/arm_gicv3_common.h"
Markus Armbrustera27bd6c2019-08-12 07:23:51 +020030#include "hw/qdev-properties.h"
Markus Armbrusterd6454272019-08-12 07:23:45 +020031#include "migration/vmstate.h"
Pavel Fedin07e20342016-06-17 15:23:46 +010032#include "gicv3_internal.h"
33#include "hw/arm/linux-boot-if.h"
Shannon Zhao910e2042018-06-08 13:15:32 +010034#include "sysemu/kvm.h"
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010035
Peter Maydell341823c2018-08-06 13:34:45 +010036
37static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs)
38{
39 if (cs->gicd_no_migration_shift_bug) {
40 return;
41 }
42
43 /* Older versions of QEMU had a bug in the handling of state save/restore
44 * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
45 * so that instead of the data for external interrupts 32 and up
46 * starting at bit position 32 in the bitmap, it started at bit
47 * position 64. If we're receiving data from a QEMU with that bug,
48 * we must move the data down into the right place.
49 */
50 memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8,
51 sizeof(cs->group) - GIC_INTERNAL / 8);
52 memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8,
53 sizeof(cs->grpmod) - GIC_INTERNAL / 8);
54 memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8,
55 sizeof(cs->enabled) - GIC_INTERNAL / 8);
56 memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8,
57 sizeof(cs->pending) - GIC_INTERNAL / 8);
58 memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8,
59 sizeof(cs->active) - GIC_INTERNAL / 8);
60 memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8,
61 sizeof(cs->edge_trigger) - GIC_INTERNAL / 8);
62
63 /*
64 * While this new version QEMU doesn't have this kind of bug as we fix it,
65 * so it needs to set the flag to true to indicate that and it's necessary
66 * for next migration to work from this new version QEMU.
67 */
68 cs->gicd_no_migration_shift_bug = true;
69}
70
Dr. David Alan Gilbert44b1ff32017-09-25 12:29:12 +010071static int gicv3_pre_save(void *opaque)
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010072{
73 GICv3State *s = (GICv3State *)opaque;
74 ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
75
76 if (c->pre_save) {
77 c->pre_save(s);
78 }
Dr. David Alan Gilbert44b1ff32017-09-25 12:29:12 +010079
80 return 0;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010081}
82
83static int gicv3_post_load(void *opaque, int version_id)
84{
85 GICv3State *s = (GICv3State *)opaque;
86 ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
87
Peter Maydell341823c2018-08-06 13:34:45 +010088 gicv3_gicd_no_migration_shift_bug_post_load(s);
89
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010090 if (c->post_load) {
91 c->post_load(s);
92 }
93 return 0;
94}
95
Peter Maydell4eb833b2017-01-20 11:15:09 +000096static bool virt_state_needed(void *opaque)
97{
98 GICv3CPUState *cs = opaque;
99
100 return cs->num_list_regs != 0;
101}
102
103static const VMStateDescription vmstate_gicv3_cpu_virt = {
104 .name = "arm_gicv3_cpu/virt",
105 .version_id = 1,
106 .minimum_version_id = 1,
107 .needed = virt_state_needed,
Richard Henderson45b1f812023-12-21 14:16:15 +1100108 .fields = (const VMStateField[]) {
Peter Maydell4eb833b2017-01-20 11:15:09 +0000109 VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
110 VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
111 VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
112 VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
113 VMSTATE_END_OF_LIST()
114 }
115};
116
Peter Maydell326049c2018-08-06 13:34:44 +0100117static int vmstate_gicv3_cpu_pre_load(void *opaque)
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530118{
119 GICv3CPUState *cs = opaque;
120
121 /*
122 * If the sre_el1 subsection is not transferred this
123 * means SRE_EL1 is 0x7 (which might not be the same as
124 * our reset value).
125 */
126 cs->icc_sre_el1 = 0x7;
127 return 0;
128}
129
130static bool icc_sre_el1_reg_needed(void *opaque)
131{
132 GICv3CPUState *cs = opaque;
133
134 return cs->icc_sre_el1 != 7;
135}
136
137const VMStateDescription vmstate_gicv3_cpu_sre_el1 = {
138 .name = "arm_gicv3_cpu/sre_el1",
139 .version_id = 1,
140 .minimum_version_id = 1,
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530141 .needed = icc_sre_el1_reg_needed,
Richard Henderson45b1f812023-12-21 14:16:15 +1100142 .fields = (const VMStateField[]) {
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530143 VMSTATE_UINT64(icc_sre_el1, GICv3CPUState),
144 VMSTATE_END_OF_LIST()
145 }
146};
147
Peter Maydell641be692022-04-08 15:15:32 +0100148static bool gicv4_needed(void *opaque)
149{
150 GICv3CPUState *cs = opaque;
151
152 return cs->gic->revision > 3;
153}
154
155const VMStateDescription vmstate_gicv3_gicv4 = {
156 .name = "arm_gicv3_cpu/gicv4",
157 .version_id = 1,
158 .minimum_version_id = 1,
159 .needed = gicv4_needed,
Richard Henderson45b1f812023-12-21 14:16:15 +1100160 .fields = (const VMStateField[]) {
Peter Maydell641be692022-04-08 15:15:32 +0100161 VMSTATE_UINT64(gicr_vpropbaser, GICv3CPUState),
162 VMSTATE_UINT64(gicr_vpendbaser, GICv3CPUState),
163 VMSTATE_END_OF_LIST()
164 }
165};
166
Pavel Fedin757caee2016-06-17 15:23:46 +0100167static const VMStateDescription vmstate_gicv3_cpu = {
168 .name = "arm_gicv3_cpu",
169 .version_id = 1,
170 .minimum_version_id = 1,
Peter Maydell326049c2018-08-06 13:34:44 +0100171 .pre_load = vmstate_gicv3_cpu_pre_load,
Richard Henderson45b1f812023-12-21 14:16:15 +1100172 .fields = (const VMStateField[]) {
Pavel Fedin757caee2016-06-17 15:23:46 +0100173 VMSTATE_UINT32(level, GICv3CPUState),
174 VMSTATE_UINT32(gicr_ctlr, GICv3CPUState),
175 VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2),
176 VMSTATE_UINT32(gicr_waker, GICv3CPUState),
177 VMSTATE_UINT64(gicr_propbaser, GICv3CPUState),
178 VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState),
179 VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState),
180 VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState),
181 VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState),
182 VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState),
183 VMSTATE_UINT32(edge_trigger, GICv3CPUState),
184 VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState),
185 VMSTATE_UINT32(gicr_nsacr, GICv3CPUState),
186 VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL),
187 VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2),
188 VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState),
189 VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3),
190 VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4),
191 VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
192 VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
193 VMSTATE_END_OF_LIST()
Peter Maydell4eb833b2017-01-20 11:15:09 +0000194 },
Richard Henderson45b1f812023-12-21 14:16:15 +1100195 .subsections = (const VMStateDescription * const []) {
Peter Maydell4eb833b2017-01-20 11:15:09 +0000196 &vmstate_gicv3_cpu_virt,
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530197 &vmstate_gicv3_cpu_sre_el1,
Peter Maydell641be692022-04-08 15:15:32 +0100198 &vmstate_gicv3_gicv4,
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530199 NULL
Pavel Fedin757caee2016-06-17 15:23:46 +0100200 }
201};
202
Peter Maydell326049c2018-08-06 13:34:44 +0100203static int gicv3_pre_load(void *opaque)
Shannon Zhao910e2042018-06-08 13:15:32 +0100204{
205 GICv3State *cs = opaque;
206
207 /*
208 * The gicd_no_migration_shift_bug flag is used for migration compatibility
209 * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
210 * Strictly, what we want to know is whether the migration source is using
211 * KVM. Since we don't have any way to determine that, we look at whether the
212 * destination is using KVM; this is close enough because for the older QEMU
213 * versions with this bug KVM -> TCG migration didn't work anyway. If the
214 * source is a newer QEMU without this bug it will transmit the migration
215 * subsection which sets the flag to true; otherwise it will remain set to
216 * the value we select here.
217 */
218 if (kvm_enabled()) {
219 cs->gicd_no_migration_shift_bug = false;
220 }
221
222 return 0;
223}
224
Peter Maydell78e9ddd2018-08-06 13:34:41 +0100225static bool needed_always(void *opaque)
226{
227 return true;
228}
229
Shannon Zhao910e2042018-06-08 13:15:32 +0100230const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = {
231 .name = "arm_gicv3/gicd_no_migration_shift_bug",
232 .version_id = 1,
233 .minimum_version_id = 1,
Peter Maydell78e9ddd2018-08-06 13:34:41 +0100234 .needed = needed_always,
Richard Henderson45b1f812023-12-21 14:16:15 +1100235 .fields = (const VMStateField[]) {
Shannon Zhao910e2042018-06-08 13:15:32 +0100236 VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State),
237 VMSTATE_END_OF_LIST()
238 }
239};
240
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100241static const VMStateDescription vmstate_gicv3 = {
242 .name = "arm_gicv3",
Pavel Fedin757caee2016-06-17 15:23:46 +0100243 .version_id = 1,
244 .minimum_version_id = 1,
Peter Maydell326049c2018-08-06 13:34:44 +0100245 .pre_load = gicv3_pre_load,
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100246 .pre_save = gicv3_pre_save,
247 .post_load = gicv3_post_load,
Eric Auger252a7a62017-06-13 14:57:01 +0100248 .priority = MIG_PRI_GICV3,
Richard Henderson45b1f812023-12-21 14:16:15 +1100249 .fields = (const VMStateField[]) {
Pavel Fedin757caee2016-06-17 15:23:46 +0100250 VMSTATE_UINT32(gicd_ctlr, GICv3State),
251 VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2),
252 VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE),
253 VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE),
254 VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE),
255 VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE),
256 VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE),
257 VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE),
258 VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE),
259 VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ),
260 VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ),
261 VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State,
262 DIV_ROUND_UP(GICV3_MAXIRQ, 16)),
263 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu,
264 vmstate_gicv3_cpu, GICv3CPUState),
265 VMSTATE_END_OF_LIST()
Shannon Zhao910e2042018-06-08 13:15:32 +0100266 },
Richard Henderson45b1f812023-12-21 14:16:15 +1100267 .subsections = (const VMStateDescription * const []) {
Shannon Zhao910e2042018-06-08 13:15:32 +0100268 &vmstate_gicv3_gicd_no_migration_shift_bug,
269 NULL
Pavel Fedin757caee2016-06-17 15:23:46 +0100270 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100271};
272
273void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
Peter Maydell01b5ab82021-09-30 16:08:40 +0100274 const MemoryRegionOps *ops)
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100275{
276 SysBusDevice *sbd = SYS_BUS_DEVICE(s);
277 int i;
Peter Maydelle5cba102021-09-30 16:08:42 +0100278 int cpuidx;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100279
280 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
281 * GPIO array layout is thus:
282 * [0..N-1] spi
283 * [N..N+31] PPIs for CPU 0
284 * [N+32..N+63] PPIs for CPU 1
285 * ...
286 */
287 i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu;
288 qdev_init_gpio_in(DEVICE(s), handler, i);
289
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100290 for (i = 0; i < s->num_cpu; i++) {
Peter Maydell3faf2b02016-06-17 15:23:46 +0100291 sysbus_init_irq(sbd, &s->cpu[i].parent_irq);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100292 }
293 for (i = 0; i < s->num_cpu; i++) {
Peter Maydell3faf2b02016-06-17 15:23:46 +0100294 sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100295 }
Peter Maydellb53db422017-01-20 11:15:08 +0000296 for (i = 0; i < s->num_cpu; i++) {
297 sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
298 }
299 for (i = 0; i < s->num_cpu; i++) {
300 sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
301 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100302
303 memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
304 "gicv3_dist", 0x10000);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100305 sysbus_init_mmio(sbd, &s->iomem_dist);
Eric Auger1e575b62018-06-22 13:28:36 +0100306
Peter Maydelle5cba102021-09-30 16:08:42 +0100307 s->redist_regions = g_new0(GICv3RedistRegion, s->nb_redist_regions);
308 cpuidx = 0;
Eric Auger1e575b62018-06-22 13:28:36 +0100309 for (i = 0; i < s->nb_redist_regions; i++) {
310 char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
Peter Maydelle5cba102021-09-30 16:08:42 +0100311 GICv3RedistRegion *region = &s->redist_regions[i];
Eric Auger1e575b62018-06-22 13:28:36 +0100312
Peter Maydelle5cba102021-09-30 16:08:42 +0100313 region->gic = s;
314 region->cpuidx = cpuidx;
315 cpuidx += s->redist_region_count[i];
316
317 memory_region_init_io(&region->iomem, OBJECT(s),
318 ops ? &ops[1] : NULL, region, name,
Peter Maydellae3b3ba2022-04-08 15:15:31 +0100319 s->redist_region_count[i] * gicv3_redist_size(s));
Peter Maydelle5cba102021-09-30 16:08:42 +0100320 sysbus_init_mmio(sbd, &region->iomem);
Eric Auger1e575b62018-06-22 13:28:36 +0100321 g_free(name);
322 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100323}
324
325static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
326{
327 GICv3State *s = ARM_GICV3_COMMON(dev);
Peter Maydell04616412021-09-30 16:08:41 +0100328 int i, rdist_capacity, cpuidx;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100329
Peter Maydell445d5822022-04-08 15:15:47 +0100330 /*
331 * This GIC device supports only revisions 3 and 4. The GICv1/v2
332 * is a separate device.
333 * Note that subclasses of this device may impose further restrictions
334 * on the GIC revision: notably, the in-kernel KVM GIC doesn't
335 * support GICv4.
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100336 */
Peter Maydell445d5822022-04-08 15:15:47 +0100337 if (s->revision != 3 && s->revision != 4) {
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100338 error_setg(errp, "unsupported GIC revision %d", s->revision);
339 return;
340 }
Pavel Fedin07e20342016-06-17 15:23:46 +0100341
342 if (s->num_irq > GICV3_MAXIRQ) {
343 error_setg(errp,
344 "requested %u interrupt lines exceeds GIC maximum %d",
345 s->num_irq, GICV3_MAXIRQ);
346 return;
347 }
348 if (s->num_irq < GIC_INTERNAL) {
349 error_setg(errp,
350 "requested %u interrupt lines is below GIC minimum %d",
351 s->num_irq, GIC_INTERNAL);
352 return;
353 }
Peter Maydell89ac9d02022-04-08 15:15:11 +0100354 if (s->num_cpu == 0) {
355 error_setg(errp, "num-cpu must be at least 1");
356 return;
357 }
Pavel Fedin07e20342016-06-17 15:23:46 +0100358
359 /* ITLinesNumber is represented as (N / 32) - 1, so this is an
360 * implementation imposed restriction, not an architectural one,
361 * so we don't have to deal with bitfields where only some of the
362 * bits in a 32-bit word should be valid.
363 */
364 if (s->num_irq % 32) {
365 error_setg(errp,
366 "%d interrupt lines unsupported: not divisible by 32",
367 s->num_irq);
368 return;
369 }
370
Shashi Mallelaac30dec2021-09-13 16:07:23 +0100371 if (s->lpi_enable && !s->dma) {
372 error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set");
373 return;
374 }
375
Peter Maydell01b5ab82021-09-30 16:08:40 +0100376 rdist_capacity = 0;
377 for (i = 0; i < s->nb_redist_regions; i++) {
378 rdist_capacity += s->redist_region_count[i];
379 }
Peter Maydell671927a2022-04-08 15:15:12 +0100380 if (rdist_capacity != s->num_cpu) {
Peter Maydell01b5ab82021-09-30 16:08:40 +0100381 error_setg(errp, "Capacity of the redist regions(%d) "
Peter Maydell671927a2022-04-08 15:15:12 +0100382 "does not match the number of vcpus(%d)",
Peter Maydell01b5ab82021-09-30 16:08:40 +0100383 rdist_capacity, s->num_cpu);
384 return;
385 }
386
Peter Maydelle5ff0412022-01-22 18:24:33 +0000387 if (s->lpi_enable) {
388 address_space_init(&s->dma_as, s->dma,
389 "gicv3-its-sysmem");
390 }
391
Pavel Fedin07e20342016-06-17 15:23:46 +0100392 s->cpu = g_new0(GICv3CPUState, s->num_cpu);
393
394 for (i = 0; i < s->num_cpu; i++) {
395 CPUState *cpu = qemu_get_cpu(i);
396 uint64_t cpu_affid;
Pavel Fedin07e20342016-06-17 15:23:46 +0100397
398 s->cpu[i].cpu = cpu;
399 s->cpu[i].gic = s;
Vijaya Kumar Kd3a3e522017-02-23 17:21:12 +0530400 /* Store GICv3CPUState in CPUARMState gicv3state pointer */
401 gicv3_set_gicv3state(cpu, &s->cpu[i]);
Pavel Fedin07e20342016-06-17 15:23:46 +0100402
403 /* Pre-construct the GICR_TYPER:
404 * For our implementation:
405 * Top 32 bits are the affinity value of the associated CPU
406 * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
407 * Processor_Number == CPU index starting from 0
408 * DPGS == 0 (GICR_CTLR.DPG* not supported)
409 * Last == 1 if this is the last redistributor in a series of
410 * contiguous redistributor pages
411 * DirectLPI == 0 (direct injection of LPIs not supported)
Peter Maydelle2d5e182022-04-08 15:15:46 +0100412 * VLPIS == 1 if vLPIs supported (GICv4 and up)
413 * PLPIS == 1 if LPIs supported
Pavel Fedin07e20342016-06-17 15:23:46 +0100414 */
Marc-André Lureau77a7a362017-06-07 20:36:26 +0400415 cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL);
Pavel Fedin07e20342016-06-17 15:23:46 +0100416
417 /* The CPU mp-affinity property is in MPIDR register format; squash
418 * the affinity bytes into 32 bits as the GICR_TYPER has them.
419 */
Andrew Jones92204402016-12-27 14:59:24 +0000420 cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
421 (cpu_affid & 0xFFFFFF);
Pavel Fedin07e20342016-06-17 15:23:46 +0100422 s->cpu[i].gicr_typer = (cpu_affid << 32) |
423 (1 << 24) |
Peter Maydell04616412021-09-30 16:08:41 +0100424 (i << 8);
Shashi Mallelaac30dec2021-09-13 16:07:23 +0100425
426 if (s->lpi_enable) {
427 s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS;
Peter Maydelle2d5e182022-04-08 15:15:46 +0100428 if (s->revision > 3) {
429 s->cpu[i].gicr_typer |= GICR_TYPER_VLPIS;
430 }
Shashi Mallelaac30dec2021-09-13 16:07:23 +0100431 }
Pavel Fedin07e20342016-06-17 15:23:46 +0100432 }
Peter Maydell04616412021-09-30 16:08:41 +0100433
434 /*
435 * Now go through and set GICR_TYPER.Last for the final
436 * redistributor in each region.
437 */
438 cpuidx = 0;
439 for (i = 0; i < s->nb_redist_regions; i++) {
440 cpuidx += s->redist_region_count[i];
441 s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST;
442 }
Peter Maydell7c087bd2022-04-08 15:15:24 +0100443
444 s->itslist = g_ptr_array_new();
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100445}
446
Eric Auger1e575b62018-06-22 13:28:36 +0100447static void arm_gicv3_finalize(Object *obj)
448{
449 GICv3State *s = ARM_GICV3_COMMON(obj);
450
451 g_free(s->redist_region_count);
452}
453
Peter Maydell183cac32022-12-14 14:27:11 +0000454static void arm_gicv3_common_reset_hold(Object *obj)
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100455{
Peter Maydell183cac32022-12-14 14:27:11 +0000456 GICv3State *s = ARM_GICV3_COMMON(obj);
Pavel Fedin07e20342016-06-17 15:23:46 +0100457 int i;
458
459 for (i = 0; i < s->num_cpu; i++) {
460 GICv3CPUState *cs = &s->cpu[i];
461
462 cs->level = 0;
463 cs->gicr_ctlr = 0;
Peter Maydell16119562022-01-22 18:24:39 +0000464 if (s->lpi_enable) {
465 /* Our implementation supports clearing GICR_CTLR.EnableLPIs */
466 cs->gicr_ctlr |= GICR_CTLR_CES;
467 }
Pavel Fedin07e20342016-06-17 15:23:46 +0100468 cs->gicr_statusr[GICV3_S] = 0;
469 cs->gicr_statusr[GICV3_NS] = 0;
470 cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
471 cs->gicr_propbaser = 0;
472 cs->gicr_pendbaser = 0;
Peter Maydell641be692022-04-08 15:15:32 +0100473 cs->gicr_vpropbaser = 0;
474 cs->gicr_vpendbaser = 0;
Pavel Fedin07e20342016-06-17 15:23:46 +0100475 /* If we're resetting a TZ-aware GIC as if secure firmware
476 * had set it up ready to start a kernel in non-secure, we
477 * need to set interrupts to group 1 so the kernel can use them.
478 * Otherwise they reset to group 0 like the hardware.
479 */
480 if (s->irq_reset_nonsecure) {
481 cs->gicr_igroupr0 = 0xffffffff;
482 } else {
483 cs->gicr_igroupr0 = 0;
484 }
485
486 cs->gicr_ienabler0 = 0;
487 cs->gicr_ipendr0 = 0;
488 cs->gicr_iactiver0 = 0;
489 cs->edge_trigger = 0xffff;
490 cs->gicr_igrpmodr0 = 0;
491 cs->gicr_nsacr = 0;
492 memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
493
Peter Maydellce187c32016-06-17 15:23:46 +0100494 cs->hppi.prio = 0xff;
Shashi Mallela17fb5e32021-09-13 16:07:24 +0100495 cs->hpplpi.prio = 0xff;
Peter Maydellc3f21b02022-04-08 15:15:34 +0100496 cs->hppvlpi.prio = 0xff;
Peter Maydellce187c32016-06-17 15:23:46 +0100497
Pavel Fedin07e20342016-06-17 15:23:46 +0100498 /* State in the CPU interface must *not* be reset here, because it
499 * is part of the CPU's reset domain, not the GIC device's.
500 */
501 }
502
503 /* For our implementation affinity routing is always enabled */
504 if (s->security_extn) {
505 s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS;
506 } else {
507 s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE;
508 }
509
510 s->gicd_statusr[GICV3_S] = 0;
511 s->gicd_statusr[GICV3_NS] = 0;
512
513 memset(s->group, 0, sizeof(s->group));
514 memset(s->grpmod, 0, sizeof(s->grpmod));
515 memset(s->enabled, 0, sizeof(s->enabled));
516 memset(s->pending, 0, sizeof(s->pending));
517 memset(s->active, 0, sizeof(s->active));
518 memset(s->level, 0, sizeof(s->level));
519 memset(s->edge_trigger, 0, sizeof(s->edge_trigger));
520 memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority));
521 memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter));
522 memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr));
Peter Maydellce187c32016-06-17 15:23:46 +0100523 /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
524 * write these to get sane behaviour and we need not populate the
525 * pointer cache here; however having the cache be different for
526 * "happened to be 0 from reset" and "guest wrote 0" would be
527 * too confusing.
528 */
529 gicv3_cache_all_target_cpustates(s);
Pavel Fedin07e20342016-06-17 15:23:46 +0100530
531 if (s->irq_reset_nonsecure) {
532 /* If we're resetting a TZ-aware GIC as if secure firmware
533 * had set it up ready to start a kernel in non-secure, we
534 * need to set interrupts to group 1 so the kernel can use them.
535 * Otherwise they reset to group 0 like the hardware.
536 */
537 for (i = GIC_INTERNAL; i < s->num_irq; i++) {
538 gicv3_gicd_group_set(s, i);
539 }
540 }
Shannon Zhao910e2042018-06-08 13:15:32 +0100541 s->gicd_no_migration_shift_bug = true;
Pavel Fedin07e20342016-06-17 15:23:46 +0100542}
543
544static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
545 bool secure_boot)
546{
547 GICv3State *s = ARM_GICV3_COMMON(obj);
548
549 if (s->security_extn && !secure_boot) {
550 /* We're directly booting a kernel into NonSecure. If this GIC
551 * implements the security extensions then we must configure it
552 * to have all the interrupts be NonSecure (this is a job that
553 * is done by the Secure boot firmware in real hardware, and in
554 * this mode QEMU is acting as a minimalist firmware-and-bootloader
555 * equivalent).
556 */
557 s->irq_reset_nonsecure = true;
558 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100559}
560
561static Property arm_gicv3_common_properties[] = {
562 DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
563 DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
564 DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
Shashi Mallelaac30dec2021-09-13 16:07:23 +0100565 DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100566 DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
Peter Maydell39f29e52022-05-12 16:14:56 +0100567 /*
568 * Compatibility property: force 8 bits of physical priority, even
569 * if the CPU being emulated should have fewer.
570 */
571 DEFINE_PROP_BOOL("force-8-bit-prio", GICv3State, force_8bit_prio, 0),
Eric Auger1e575b62018-06-22 13:28:36 +0100572 DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
573 redist_region_count, qdev_prop_uint32, uint32_t),
Shashi Mallelaac30dec2021-09-13 16:07:23 +0100574 DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
575 MemoryRegion *),
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100576 DEFINE_PROP_END_OF_LIST(),
577};
578
579static void arm_gicv3_common_class_init(ObjectClass *klass, void *data)
580{
581 DeviceClass *dc = DEVICE_CLASS(klass);
Peter Maydell183cac32022-12-14 14:27:11 +0000582 ResettableClass *rc = RESETTABLE_CLASS(klass);
Pavel Fedin07e20342016-06-17 15:23:46 +0100583 ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100584
Peter Maydell183cac32022-12-14 14:27:11 +0000585 rc->phases.hold = arm_gicv3_common_reset_hold;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100586 dc->realize = arm_gicv3_common_realize;
Marc-André Lureau4f67d302020-01-10 19:30:32 +0400587 device_class_set_props(dc, arm_gicv3_common_properties);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100588 dc->vmsd = &vmstate_gicv3;
Pavel Fedin07e20342016-06-17 15:23:46 +0100589 albifc->arm_linux_init = arm_gic_common_linux_init;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100590}
591
592static const TypeInfo arm_gicv3_common_type = {
593 .name = TYPE_ARM_GICV3_COMMON,
594 .parent = TYPE_SYS_BUS_DEVICE,
595 .instance_size = sizeof(GICv3State),
596 .class_size = sizeof(ARMGICv3CommonClass),
597 .class_init = arm_gicv3_common_class_init,
Eric Auger1e575b62018-06-22 13:28:36 +0100598 .instance_finalize = arm_gicv3_finalize,
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100599 .abstract = true,
Pavel Fedin07e20342016-06-17 15:23:46 +0100600 .interfaces = (InterfaceInfo []) {
601 { TYPE_ARM_LINUX_BOOT_IF },
602 { },
603 },
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100604};
605
606static void register_types(void)
607{
608 type_register_static(&arm_gicv3_common_type);
609}
610
611type_init(register_types)
Philippe Mathieu-Daudé0c40daf2023-04-05 13:48:26 +0200612
613const char *gicv3_class_name(void)
614{
615 if (kvm_irqchip_in_kernel()) {
616 return "kvm-arm-gicv3";
617 } else {
618 if (kvm_enabled()) {
619 error_report("Userspace GICv3 is not supported with KVM");
620 exit(1);
621 }
622 return "arm-gicv3";
623 }
624}