blob: 52480c3b4cf949198615cf1efc9797acef921298 [file] [log] [blame]
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +01001/*
2 * ARM GICv3 support - common bits of emulated and KVM kernel model
3 *
4 * Copyright (c) 2012 Linaro Limited
5 * Copyright (c) 2015 Huawei.
Pavel Fedin07e20342016-06-17 15:23:46 +01006 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +01007 * Written by Peter Maydell
Pavel Fedin07e20342016-06-17 15:23:46 +01008 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +01009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
Peter Maydell8ef94f02016-01-26 18:17:05 +000024#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010025#include "qapi/error.h"
Pavel Fedin07e20342016-06-17 15:23:46 +010026#include "qom/cpu.h"
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010027#include "hw/intc/arm_gicv3_common.h"
Pavel Fedin07e20342016-06-17 15:23:46 +010028#include "gicv3_internal.h"
29#include "hw/arm/linux-boot-if.h"
Shannon Zhao910e2042018-06-08 13:15:32 +010030#include "sysemu/kvm.h"
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010031
Peter Maydell341823c2018-08-06 13:34:45 +010032
33static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs)
34{
35 if (cs->gicd_no_migration_shift_bug) {
36 return;
37 }
38
39 /* Older versions of QEMU had a bug in the handling of state save/restore
40 * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
41 * so that instead of the data for external interrupts 32 and up
42 * starting at bit position 32 in the bitmap, it started at bit
43 * position 64. If we're receiving data from a QEMU with that bug,
44 * we must move the data down into the right place.
45 */
46 memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8,
47 sizeof(cs->group) - GIC_INTERNAL / 8);
48 memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8,
49 sizeof(cs->grpmod) - GIC_INTERNAL / 8);
50 memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8,
51 sizeof(cs->enabled) - GIC_INTERNAL / 8);
52 memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8,
53 sizeof(cs->pending) - GIC_INTERNAL / 8);
54 memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8,
55 sizeof(cs->active) - GIC_INTERNAL / 8);
56 memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8,
57 sizeof(cs->edge_trigger) - GIC_INTERNAL / 8);
58
59 /*
60 * While this new version QEMU doesn't have this kind of bug as we fix it,
61 * so it needs to set the flag to true to indicate that and it's necessary
62 * for next migration to work from this new version QEMU.
63 */
64 cs->gicd_no_migration_shift_bug = true;
65}
66
Dr. David Alan Gilbert44b1ff32017-09-25 12:29:12 +010067static int gicv3_pre_save(void *opaque)
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010068{
69 GICv3State *s = (GICv3State *)opaque;
70 ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
71
72 if (c->pre_save) {
73 c->pre_save(s);
74 }
Dr. David Alan Gilbert44b1ff32017-09-25 12:29:12 +010075
76 return 0;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010077}
78
79static int gicv3_post_load(void *opaque, int version_id)
80{
81 GICv3State *s = (GICv3State *)opaque;
82 ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
83
Peter Maydell341823c2018-08-06 13:34:45 +010084 gicv3_gicd_no_migration_shift_bug_post_load(s);
85
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +010086 if (c->post_load) {
87 c->post_load(s);
88 }
89 return 0;
90}
91
Peter Maydell4eb833b2017-01-20 11:15:09 +000092static bool virt_state_needed(void *opaque)
93{
94 GICv3CPUState *cs = opaque;
95
96 return cs->num_list_regs != 0;
97}
98
99static const VMStateDescription vmstate_gicv3_cpu_virt = {
100 .name = "arm_gicv3_cpu/virt",
101 .version_id = 1,
102 .minimum_version_id = 1,
103 .needed = virt_state_needed,
104 .fields = (VMStateField[]) {
105 VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
106 VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
107 VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
108 VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
109 VMSTATE_END_OF_LIST()
110 }
111};
112
Peter Maydell326049c2018-08-06 13:34:44 +0100113static int vmstate_gicv3_cpu_pre_load(void *opaque)
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530114{
115 GICv3CPUState *cs = opaque;
116
117 /*
118 * If the sre_el1 subsection is not transferred this
119 * means SRE_EL1 is 0x7 (which might not be the same as
120 * our reset value).
121 */
122 cs->icc_sre_el1 = 0x7;
123 return 0;
124}
125
126static bool icc_sre_el1_reg_needed(void *opaque)
127{
128 GICv3CPUState *cs = opaque;
129
130 return cs->icc_sre_el1 != 7;
131}
132
133const VMStateDescription vmstate_gicv3_cpu_sre_el1 = {
134 .name = "arm_gicv3_cpu/sre_el1",
135 .version_id = 1,
136 .minimum_version_id = 1,
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530137 .needed = icc_sre_el1_reg_needed,
138 .fields = (VMStateField[]) {
139 VMSTATE_UINT64(icc_sre_el1, GICv3CPUState),
140 VMSTATE_END_OF_LIST()
141 }
142};
143
Pavel Fedin757caee2016-06-17 15:23:46 +0100144static const VMStateDescription vmstate_gicv3_cpu = {
145 .name = "arm_gicv3_cpu",
146 .version_id = 1,
147 .minimum_version_id = 1,
Peter Maydell326049c2018-08-06 13:34:44 +0100148 .pre_load = vmstate_gicv3_cpu_pre_load,
Pavel Fedin757caee2016-06-17 15:23:46 +0100149 .fields = (VMStateField[]) {
150 VMSTATE_UINT32(level, GICv3CPUState),
151 VMSTATE_UINT32(gicr_ctlr, GICv3CPUState),
152 VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2),
153 VMSTATE_UINT32(gicr_waker, GICv3CPUState),
154 VMSTATE_UINT64(gicr_propbaser, GICv3CPUState),
155 VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState),
156 VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState),
157 VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState),
158 VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState),
159 VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState),
160 VMSTATE_UINT32(edge_trigger, GICv3CPUState),
161 VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState),
162 VMSTATE_UINT32(gicr_nsacr, GICv3CPUState),
163 VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL),
164 VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2),
165 VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState),
166 VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3),
167 VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4),
168 VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
169 VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
170 VMSTATE_END_OF_LIST()
Peter Maydell4eb833b2017-01-20 11:15:09 +0000171 },
172 .subsections = (const VMStateDescription * []) {
173 &vmstate_gicv3_cpu_virt,
Vijaya Kumar K6692aac2017-02-23 17:21:10 +0530174 &vmstate_gicv3_cpu_sre_el1,
175 NULL
Pavel Fedin757caee2016-06-17 15:23:46 +0100176 }
177};
178
Peter Maydell326049c2018-08-06 13:34:44 +0100179static int gicv3_pre_load(void *opaque)
Shannon Zhao910e2042018-06-08 13:15:32 +0100180{
181 GICv3State *cs = opaque;
182
183 /*
184 * The gicd_no_migration_shift_bug flag is used for migration compatibility
185 * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
186 * Strictly, what we want to know is whether the migration source is using
187 * KVM. Since we don't have any way to determine that, we look at whether the
188 * destination is using KVM; this is close enough because for the older QEMU
189 * versions with this bug KVM -> TCG migration didn't work anyway. If the
190 * source is a newer QEMU without this bug it will transmit the migration
191 * subsection which sets the flag to true; otherwise it will remain set to
192 * the value we select here.
193 */
194 if (kvm_enabled()) {
195 cs->gicd_no_migration_shift_bug = false;
196 }
197
198 return 0;
199}
200
Peter Maydell78e9ddd2018-08-06 13:34:41 +0100201static bool needed_always(void *opaque)
202{
203 return true;
204}
205
Shannon Zhao910e2042018-06-08 13:15:32 +0100206const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = {
207 .name = "arm_gicv3/gicd_no_migration_shift_bug",
208 .version_id = 1,
209 .minimum_version_id = 1,
Peter Maydell78e9ddd2018-08-06 13:34:41 +0100210 .needed = needed_always,
Shannon Zhao910e2042018-06-08 13:15:32 +0100211 .fields = (VMStateField[]) {
212 VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State),
213 VMSTATE_END_OF_LIST()
214 }
215};
216
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100217static const VMStateDescription vmstate_gicv3 = {
218 .name = "arm_gicv3",
Pavel Fedin757caee2016-06-17 15:23:46 +0100219 .version_id = 1,
220 .minimum_version_id = 1,
Peter Maydell326049c2018-08-06 13:34:44 +0100221 .pre_load = gicv3_pre_load,
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100222 .pre_save = gicv3_pre_save,
223 .post_load = gicv3_post_load,
Eric Auger252a7a62017-06-13 14:57:01 +0100224 .priority = MIG_PRI_GICV3,
Pavel Fedin757caee2016-06-17 15:23:46 +0100225 .fields = (VMStateField[]) {
226 VMSTATE_UINT32(gicd_ctlr, GICv3State),
227 VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2),
228 VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE),
229 VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE),
230 VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE),
231 VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE),
232 VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE),
233 VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE),
234 VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE),
235 VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ),
236 VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ),
237 VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State,
238 DIV_ROUND_UP(GICV3_MAXIRQ, 16)),
239 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu,
240 vmstate_gicv3_cpu, GICv3CPUState),
241 VMSTATE_END_OF_LIST()
Shannon Zhao910e2042018-06-08 13:15:32 +0100242 },
243 .subsections = (const VMStateDescription * []) {
244 &vmstate_gicv3_gicd_no_migration_shift_bug,
245 NULL
Pavel Fedin757caee2016-06-17 15:23:46 +0100246 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100247};
248
249void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
Eric Auger1e575b62018-06-22 13:28:36 +0100250 const MemoryRegionOps *ops, Error **errp)
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100251{
252 SysBusDevice *sbd = SYS_BUS_DEVICE(s);
Eric Auger1e575b62018-06-22 13:28:36 +0100253 int rdist_capacity = 0;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100254 int i;
255
Eric Auger1e575b62018-06-22 13:28:36 +0100256 for (i = 0; i < s->nb_redist_regions; i++) {
257 rdist_capacity += s->redist_region_count[i];
258 }
259 if (rdist_capacity < s->num_cpu) {
260 error_setg(errp, "Capacity of the redist regions(%d) "
261 "is less than number of vcpus(%d)",
262 rdist_capacity, s->num_cpu);
263 return;
264 }
265
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100266 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
267 * GPIO array layout is thus:
268 * [0..N-1] spi
269 * [N..N+31] PPIs for CPU 0
270 * [N+32..N+63] PPIs for CPU 1
271 * ...
272 */
273 i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu;
274 qdev_init_gpio_in(DEVICE(s), handler, i);
275
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100276 for (i = 0; i < s->num_cpu; i++) {
Peter Maydell3faf2b02016-06-17 15:23:46 +0100277 sysbus_init_irq(sbd, &s->cpu[i].parent_irq);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100278 }
279 for (i = 0; i < s->num_cpu; i++) {
Peter Maydell3faf2b02016-06-17 15:23:46 +0100280 sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100281 }
Peter Maydellb53db422017-01-20 11:15:08 +0000282 for (i = 0; i < s->num_cpu; i++) {
283 sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
284 }
285 for (i = 0; i < s->num_cpu; i++) {
286 sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
287 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100288
289 memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
290 "gicv3_dist", 0x10000);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100291 sysbus_init_mmio(sbd, &s->iomem_dist);
Eric Auger1e575b62018-06-22 13:28:36 +0100292
293 s->iomem_redist = g_new0(MemoryRegion, s->nb_redist_regions);
294 for (i = 0; i < s->nb_redist_regions; i++) {
295 char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
296
297 memory_region_init_io(&s->iomem_redist[i], OBJECT(s),
298 ops ? &ops[1] : NULL, s, name,
299 s->redist_region_count[i] * GICV3_REDIST_SIZE);
300 sysbus_init_mmio(sbd, &s->iomem_redist[i]);
301 g_free(name);
302 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100303}
304
305static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
306{
307 GICv3State *s = ARM_GICV3_COMMON(dev);
Pavel Fedin07e20342016-06-17 15:23:46 +0100308 int i;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100309
310 /* revision property is actually reserved and currently used only in order
311 * to keep the interface compatible with GICv2 code, avoiding extra
312 * conditions. However, in future it could be used, for example, if we
313 * implement GICv4.
314 */
315 if (s->revision != 3) {
316 error_setg(errp, "unsupported GIC revision %d", s->revision);
317 return;
318 }
Pavel Fedin07e20342016-06-17 15:23:46 +0100319
320 if (s->num_irq > GICV3_MAXIRQ) {
321 error_setg(errp,
322 "requested %u interrupt lines exceeds GIC maximum %d",
323 s->num_irq, GICV3_MAXIRQ);
324 return;
325 }
326 if (s->num_irq < GIC_INTERNAL) {
327 error_setg(errp,
328 "requested %u interrupt lines is below GIC minimum %d",
329 s->num_irq, GIC_INTERNAL);
330 return;
331 }
332
333 /* ITLinesNumber is represented as (N / 32) - 1, so this is an
334 * implementation imposed restriction, not an architectural one,
335 * so we don't have to deal with bitfields where only some of the
336 * bits in a 32-bit word should be valid.
337 */
338 if (s->num_irq % 32) {
339 error_setg(errp,
340 "%d interrupt lines unsupported: not divisible by 32",
341 s->num_irq);
342 return;
343 }
344
345 s->cpu = g_new0(GICv3CPUState, s->num_cpu);
346
347 for (i = 0; i < s->num_cpu; i++) {
348 CPUState *cpu = qemu_get_cpu(i);
349 uint64_t cpu_affid;
350 int last;
351
352 s->cpu[i].cpu = cpu;
353 s->cpu[i].gic = s;
Vijaya Kumar Kd3a3e522017-02-23 17:21:12 +0530354 /* Store GICv3CPUState in CPUARMState gicv3state pointer */
355 gicv3_set_gicv3state(cpu, &s->cpu[i]);
Pavel Fedin07e20342016-06-17 15:23:46 +0100356
357 /* Pre-construct the GICR_TYPER:
358 * For our implementation:
359 * Top 32 bits are the affinity value of the associated CPU
360 * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
361 * Processor_Number == CPU index starting from 0
362 * DPGS == 0 (GICR_CTLR.DPG* not supported)
363 * Last == 1 if this is the last redistributor in a series of
364 * contiguous redistributor pages
365 * DirectLPI == 0 (direct injection of LPIs not supported)
366 * VLPIS == 0 (virtual LPIs not supported)
367 * PLPIS == 0 (physical LPIs not supported)
368 */
Marc-André Lureau77a7a362017-06-07 20:36:26 +0400369 cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL);
Pavel Fedin07e20342016-06-17 15:23:46 +0100370 last = (i == s->num_cpu - 1);
371
372 /* The CPU mp-affinity property is in MPIDR register format; squash
373 * the affinity bytes into 32 bits as the GICR_TYPER has them.
374 */
Andrew Jones92204402016-12-27 14:59:24 +0000375 cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
376 (cpu_affid & 0xFFFFFF);
Pavel Fedin07e20342016-06-17 15:23:46 +0100377 s->cpu[i].gicr_typer = (cpu_affid << 32) |
378 (1 << 24) |
379 (i << 8) |
380 (last << 4);
381 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100382}
383
Eric Auger1e575b62018-06-22 13:28:36 +0100384static void arm_gicv3_finalize(Object *obj)
385{
386 GICv3State *s = ARM_GICV3_COMMON(obj);
387
388 g_free(s->redist_region_count);
389}
390
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100391static void arm_gicv3_common_reset(DeviceState *dev)
392{
Pavel Fedin07e20342016-06-17 15:23:46 +0100393 GICv3State *s = ARM_GICV3_COMMON(dev);
394 int i;
395
396 for (i = 0; i < s->num_cpu; i++) {
397 GICv3CPUState *cs = &s->cpu[i];
398
399 cs->level = 0;
400 cs->gicr_ctlr = 0;
401 cs->gicr_statusr[GICV3_S] = 0;
402 cs->gicr_statusr[GICV3_NS] = 0;
403 cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
404 cs->gicr_propbaser = 0;
405 cs->gicr_pendbaser = 0;
406 /* If we're resetting a TZ-aware GIC as if secure firmware
407 * had set it up ready to start a kernel in non-secure, we
408 * need to set interrupts to group 1 so the kernel can use them.
409 * Otherwise they reset to group 0 like the hardware.
410 */
411 if (s->irq_reset_nonsecure) {
412 cs->gicr_igroupr0 = 0xffffffff;
413 } else {
414 cs->gicr_igroupr0 = 0;
415 }
416
417 cs->gicr_ienabler0 = 0;
418 cs->gicr_ipendr0 = 0;
419 cs->gicr_iactiver0 = 0;
420 cs->edge_trigger = 0xffff;
421 cs->gicr_igrpmodr0 = 0;
422 cs->gicr_nsacr = 0;
423 memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
424
Peter Maydellce187c32016-06-17 15:23:46 +0100425 cs->hppi.prio = 0xff;
426
Pavel Fedin07e20342016-06-17 15:23:46 +0100427 /* State in the CPU interface must *not* be reset here, because it
428 * is part of the CPU's reset domain, not the GIC device's.
429 */
430 }
431
432 /* For our implementation affinity routing is always enabled */
433 if (s->security_extn) {
434 s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS;
435 } else {
436 s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE;
437 }
438
439 s->gicd_statusr[GICV3_S] = 0;
440 s->gicd_statusr[GICV3_NS] = 0;
441
442 memset(s->group, 0, sizeof(s->group));
443 memset(s->grpmod, 0, sizeof(s->grpmod));
444 memset(s->enabled, 0, sizeof(s->enabled));
445 memset(s->pending, 0, sizeof(s->pending));
446 memset(s->active, 0, sizeof(s->active));
447 memset(s->level, 0, sizeof(s->level));
448 memset(s->edge_trigger, 0, sizeof(s->edge_trigger));
449 memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority));
450 memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter));
451 memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr));
Peter Maydellce187c32016-06-17 15:23:46 +0100452 /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
453 * write these to get sane behaviour and we need not populate the
454 * pointer cache here; however having the cache be different for
455 * "happened to be 0 from reset" and "guest wrote 0" would be
456 * too confusing.
457 */
458 gicv3_cache_all_target_cpustates(s);
Pavel Fedin07e20342016-06-17 15:23:46 +0100459
460 if (s->irq_reset_nonsecure) {
461 /* If we're resetting a TZ-aware GIC as if secure firmware
462 * had set it up ready to start a kernel in non-secure, we
463 * need to set interrupts to group 1 so the kernel can use them.
464 * Otherwise they reset to group 0 like the hardware.
465 */
466 for (i = GIC_INTERNAL; i < s->num_irq; i++) {
467 gicv3_gicd_group_set(s, i);
468 }
469 }
Shannon Zhao910e2042018-06-08 13:15:32 +0100470 s->gicd_no_migration_shift_bug = true;
Pavel Fedin07e20342016-06-17 15:23:46 +0100471}
472
473static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
474 bool secure_boot)
475{
476 GICv3State *s = ARM_GICV3_COMMON(obj);
477
478 if (s->security_extn && !secure_boot) {
479 /* We're directly booting a kernel into NonSecure. If this GIC
480 * implements the security extensions then we must configure it
481 * to have all the interrupts be NonSecure (this is a job that
482 * is done by the Secure boot firmware in real hardware, and in
483 * this mode QEMU is acting as a minimalist firmware-and-bootloader
484 * equivalent).
485 */
486 s->irq_reset_nonsecure = true;
487 }
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100488}
489
490static Property arm_gicv3_common_properties[] = {
491 DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
492 DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
493 DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
494 DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
Eric Auger1e575b62018-06-22 13:28:36 +0100495 DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
496 redist_region_count, qdev_prop_uint32, uint32_t),
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100497 DEFINE_PROP_END_OF_LIST(),
498};
499
500static void arm_gicv3_common_class_init(ObjectClass *klass, void *data)
501{
502 DeviceClass *dc = DEVICE_CLASS(klass);
Pavel Fedin07e20342016-06-17 15:23:46 +0100503 ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass);
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100504
505 dc->reset = arm_gicv3_common_reset;
506 dc->realize = arm_gicv3_common_realize;
507 dc->props = arm_gicv3_common_properties;
508 dc->vmsd = &vmstate_gicv3;
Pavel Fedin07e20342016-06-17 15:23:46 +0100509 albifc->arm_linux_init = arm_gic_common_linux_init;
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100510}
511
512static const TypeInfo arm_gicv3_common_type = {
513 .name = TYPE_ARM_GICV3_COMMON,
514 .parent = TYPE_SYS_BUS_DEVICE,
515 .instance_size = sizeof(GICv3State),
516 .class_size = sizeof(ARMGICv3CommonClass),
517 .class_init = arm_gicv3_common_class_init,
Eric Auger1e575b62018-06-22 13:28:36 +0100518 .instance_finalize = arm_gicv3_finalize,
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100519 .abstract = true,
Pavel Fedin07e20342016-06-17 15:23:46 +0100520 .interfaces = (InterfaceInfo []) {
521 { TYPE_ARM_LINUX_BOOT_IF },
522 { },
523 },
Shlomo Pongratzff8f06e2015-09-24 01:29:36 +0100524};
525
526static void register_types(void)
527{
528 type_register_static(&arm_gicv3_common_type);
529}
530
531type_init(register_types)