blob: e7f1c1f2190f046c5b632a39c8522b2aeb086b9a [file] [log] [blame]
Eric Auger527773e2018-05-04 18:05:51 +01001/*
2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Prem Mallappa <pmallapp@broadcom.com>
16 *
17 */
18
19#include "qemu/osdep.h"
Eric Auger527773e2018-05-04 18:05:51 +010020#include "trace.h"
21#include "exec/target_page.h"
Markus Armbruster2e5b09f2019-07-09 17:20:52 +020022#include "hw/core/cpu.h"
Eric Auger527773e2018-05-04 18:05:51 +010023#include "hw/qdev-properties.h"
24#include "qapi/error.h"
Eric Augercc27ed82018-06-26 17:50:42 +010025#include "qemu/jhash.h"
Markus Armbruster0b8fa322019-05-23 16:35:07 +020026#include "qemu/module.h"
Eric Auger527773e2018-05-04 18:05:51 +010027
28#include "qemu/error-report.h"
29#include "hw/arm/smmu-common.h"
Eric Auger93641942018-05-04 18:05:51 +010030#include "smmu-internal.h"
31
Eric Augercc27ed82018-06-26 17:50:42 +010032/* IOTLB Management */
33
Eric Auger60a61f12020-07-28 17:08:07 +020034static guint smmu_iotlb_key_hash(gconstpointer v)
35{
36 SMMUIOTLBKey *key = (SMMUIOTLBKey *)v;
37 uint32_t a, b, c;
38
39 /* Jenkins hash */
40 a = b = c = JHASH_INITVAL + sizeof(*key);
Eric Auger9e54dee2020-07-28 17:08:09 +020041 a += key->asid + key->level + key->tg;
Eric Auger60a61f12020-07-28 17:08:07 +020042 b += extract64(key->iova, 0, 32);
43 c += extract64(key->iova, 32, 32);
44
45 __jhash_mix(a, b, c);
46 __jhash_final(a, b, c);
47
48 return c;
49}
50
51static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
52{
Eric Auger9e54dee2020-07-28 17:08:09 +020053 SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2;
Eric Auger60a61f12020-07-28 17:08:07 +020054
Eric Auger9e54dee2020-07-28 17:08:09 +020055 return (k1->asid == k2->asid) && (k1->iova == k2->iova) &&
56 (k1->level == k2->level) && (k1->tg == k2->tg);
Eric Auger60a61f12020-07-28 17:08:07 +020057}
58
Eric Auger9e54dee2020-07-28 17:08:09 +020059SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova,
60 uint8_t tg, uint8_t level)
Eric Auger60a61f12020-07-28 17:08:07 +020061{
Eric Auger9e54dee2020-07-28 17:08:09 +020062 SMMUIOTLBKey key = {.asid = asid, .iova = iova, .tg = tg, .level = level};
Eric Auger60a61f12020-07-28 17:08:07 +020063
64 return key;
65}
66
Eric Augera7550152020-07-28 17:08:08 +020067SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
Eric Auger9e54dee2020-07-28 17:08:09 +020068 SMMUTransTableInfo *tt, hwaddr iova)
Eric Auger6808bca2020-07-28 17:08:06 +020069{
Eric Auger9e54dee2020-07-28 17:08:09 +020070 uint8_t tg = (tt->granule_sz - 10) / 2;
71 uint8_t inputsize = 64 - tt->tsz;
72 uint8_t stride = tt->granule_sz - 3;
73 uint8_t level = 4 - (inputsize - 4) / stride;
74 SMMUTLBEntry *entry = NULL;
75
76 while (level <= 3) {
77 uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz);
78 uint64_t mask = subpage_size - 1;
79 SMMUIOTLBKey key;
80
81 key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level);
82 entry = g_hash_table_lookup(bs->iotlb, &key);
83 if (entry) {
84 break;
85 }
86 level++;
87 }
Eric Auger6808bca2020-07-28 17:08:06 +020088
89 if (entry) {
90 cfg->iotlb_hits++;
91 trace_smmu_iotlb_lookup_hit(cfg->asid, iova,
92 cfg->iotlb_hits, cfg->iotlb_misses,
93 100 * cfg->iotlb_hits /
94 (cfg->iotlb_hits + cfg->iotlb_misses));
95 } else {
96 cfg->iotlb_misses++;
97 trace_smmu_iotlb_lookup_miss(cfg->asid, iova,
98 cfg->iotlb_hits, cfg->iotlb_misses,
99 100 * cfg->iotlb_hits /
100 (cfg->iotlb_hits + cfg->iotlb_misses));
101 }
102 return entry;
103}
104
Eric Augera7550152020-07-28 17:08:08 +0200105void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
Eric Auger6808bca2020-07-28 17:08:06 +0200106{
107 SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1);
Eric Auger9e54dee2020-07-28 17:08:09 +0200108 uint8_t tg = (new->granule - 10) / 2;
Eric Auger6808bca2020-07-28 17:08:06 +0200109
110 if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
111 smmu_iotlb_inv_all(bs);
112 }
113
Eric Auger9e54dee2020-07-28 17:08:09 +0200114 *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level);
115 trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level);
Eric Augera7550152020-07-28 17:08:08 +0200116 g_hash_table_insert(bs->iotlb, key, new);
Eric Auger6808bca2020-07-28 17:08:06 +0200117}
118
Philippe Mathieu-Daudé9de9fa52022-12-16 22:49:24 +0100119void smmu_iotlb_inv_all(SMMUState *s)
Eric Augercc27ed82018-06-26 17:50:42 +0100120{
121 trace_smmu_iotlb_inv_all();
122 g_hash_table_remove_all(s->iotlb);
123}
124
125static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value,
126 gpointer user_data)
127{
128 uint16_t asid = *(uint16_t *)user_data;
129 SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
130
Eric Auger60a61f12020-07-28 17:08:07 +0200131 return SMMU_IOTLB_ASID(*iotlb_key) == asid;
Eric Augercc27ed82018-06-26 17:50:42 +0100132}
133
Eric Auger9e54dee2020-07-28 17:08:09 +0200134static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value,
135 gpointer user_data)
Eric Augercc27ed82018-06-26 17:50:42 +0100136{
Eric Auger9e54dee2020-07-28 17:08:09 +0200137 SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
138 IOMMUTLBEntry *entry = &iter->entry;
139 SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
140 SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key;
141
142 if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) {
143 return false;
144 }
Eric Augerd5291562020-07-28 17:08:11 +0200145 return ((info->iova & ~entry->addr_mask) == entry->iova) ||
146 ((entry->iova & ~info->mask) == info->iova);
Eric Auger9e54dee2020-07-28 17:08:09 +0200147}
148
Philippe Mathieu-Daudé9de9fa52022-12-16 22:49:24 +0100149void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
150 uint8_t tg, uint64_t num_pages, uint8_t ttl)
Eric Auger9e54dee2020-07-28 17:08:09 +0200151{
Eric Auger6d9cd112021-03-09 11:27:40 +0100152 /* if tg is not set we use 4KB range invalidation */
153 uint8_t granule = tg ? tg * 2 + 10 : 12;
154
Eric Augera4b6e1b2021-03-09 11:27:39 +0100155 if (ttl && (num_pages == 1) && (asid >= 0)) {
Eric Augerd5291562020-07-28 17:08:11 +0200156 SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl);
Eric Augercc27ed82018-06-26 17:50:42 +0100157
Eric Auger6d9cd112021-03-09 11:27:40 +0100158 if (g_hash_table_remove(s->iotlb, &key)) {
159 return;
160 }
161 /*
162 * if the entry is not found, let's see if it does not
163 * belong to a larger IOTLB entry
164 */
Eric Augerd5291562020-07-28 17:08:11 +0200165 }
Eric Auger6d9cd112021-03-09 11:27:40 +0100166
167 SMMUIOTLBPageInvInfo info = {
168 .asid = asid, .iova = iova,
169 .mask = (num_pages * 1 << granule) - 1};
170
171 g_hash_table_foreach_remove(s->iotlb,
172 smmu_hash_remove_by_asid_iova,
173 &info);
Eric Augercc27ed82018-06-26 17:50:42 +0100174}
175
Philippe Mathieu-Daudé9de9fa52022-12-16 22:49:24 +0100176void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
Eric Augercc27ed82018-06-26 17:50:42 +0100177{
178 trace_smmu_iotlb_inv_asid(asid);
179 g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid);
180}
181
Eric Auger93641942018-05-04 18:05:51 +0100182/* VMSAv8-64 Translation */
183
184/**
185 * get_pte - Get the content of a page table entry located at
186 * @base_addr[@index]
187 */
188static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
189 SMMUPTWEventInfo *info)
190{
191 int ret;
192 dma_addr_t addr = baseaddr + index * sizeof(*pte);
193
194 /* TODO: guarantee 64-bit single-copy atomicity */
Philippe Mathieu-Daudéba06fe82020-09-03 10:08:29 +0200195 ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte),
196 MEMTXATTRS_UNSPECIFIED);
Eric Auger93641942018-05-04 18:05:51 +0100197
198 if (ret != MEMTX_OK) {
199 info->type = SMMU_PTW_ERR_WALK_EABT;
200 info->addr = addr;
201 return -EINVAL;
202 }
203 trace_smmu_get_pte(baseaddr, index, addr, *pte);
204 return 0;
205}
206
207/* VMSAv8-64 Translation Table Format Descriptor Decoding */
208
209/**
210 * get_page_pte_address - returns the L3 descriptor output address,
211 * ie. the page frame
212 * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format
213 */
214static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz)
215{
216 return PTE_ADDRESS(pte, granule_sz);
217}
218
219/**
220 * get_table_pte_address - return table descriptor output address,
221 * ie. address of next level table
222 * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
223 */
224static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz)
225{
226 return PTE_ADDRESS(pte, granule_sz);
227}
228
229/**
230 * get_block_pte_address - return block descriptor output address and block size
231 * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
232 */
233static inline hwaddr get_block_pte_address(uint64_t pte, int level,
234 int granule_sz, uint64_t *bsz)
235{
Eric Auger118eee62018-05-18 17:48:07 +0100236 int n = level_shift(level, granule_sz);
Eric Auger93641942018-05-04 18:05:51 +0100237
Eric Auger118eee62018-05-18 17:48:07 +0100238 *bsz = 1ULL << n;
Eric Auger93641942018-05-04 18:05:51 +0100239 return PTE_ADDRESS(pte, n);
240}
241
242SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
243{
244 bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi);
245 uint8_t tbi_byte = tbi * 8;
246
247 if (cfg->tt[0].tsz &&
248 !extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) {
249 /* there is a ttbr0 region and we are in it (high bits all zero) */
250 return &cfg->tt[0];
251 } else if (cfg->tt[1].tsz &&
Jean-Philippe Bruckere431b8f2023-02-14 17:19:22 +0000252 sextract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte) == -1) {
Eric Auger93641942018-05-04 18:05:51 +0100253 /* there is a ttbr1 region and we are in it (high bits all one) */
254 return &cfg->tt[1];
255 } else if (!cfg->tt[0].tsz) {
256 /* ttbr0 region is "everything not in the ttbr1 region" */
257 return &cfg->tt[0];
258 } else if (!cfg->tt[1].tsz) {
259 /* ttbr1 region is "everything not in the ttbr0 region" */
260 return &cfg->tt[1];
261 }
262 /* in the gap between the two regions, this is a Translation fault */
263 return NULL;
264}
265
266/**
267 * smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA
268 * @cfg: translation config
269 * @iova: iova to translate
270 * @perm: access type
Eric Augera7550152020-07-28 17:08:08 +0200271 * @tlbe: SMMUTLBEntry (out)
Eric Auger93641942018-05-04 18:05:51 +0100272 * @info: handle to an error info
273 *
274 * Return 0 on success, < 0 on error. In case of error, @info is filled
275 * and tlbe->perm is set to IOMMU_NONE.
276 * Upon success, @tlbe is filled with translated_addr and entry
277 * permission rights.
278 */
279static int smmu_ptw_64(SMMUTransCfg *cfg,
280 dma_addr_t iova, IOMMUAccessFlags perm,
Eric Augera7550152020-07-28 17:08:08 +0200281 SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
Eric Auger93641942018-05-04 18:05:51 +0100282{
283 dma_addr_t baseaddr, indexmask;
284 int stage = cfg->stage;
285 SMMUTransTableInfo *tt = select_tt(cfg, iova);
286 uint8_t level, granule_sz, inputsize, stride;
287
288 if (!tt || tt->disabled) {
289 info->type = SMMU_PTW_ERR_TRANSLATION;
290 goto error;
291 }
292
293 granule_sz = tt->granule_sz;
294 stride = granule_sz - 3;
295 inputsize = 64 - tt->tsz;
296 level = 4 - (inputsize - 4) / stride;
297 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
298 baseaddr = extract64(tt->ttb, 0, 48);
299 baseaddr &= ~indexmask;
300
Eric Auger93641942018-05-04 18:05:51 +0100301 while (level <= 3) {
302 uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
303 uint64_t mask = subpage_size - 1;
304 uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
Eric Auger17338372020-07-28 17:08:05 +0200305 uint64_t pte, gpa;
Eric Auger93641942018-05-04 18:05:51 +0100306 dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
307 uint8_t ap;
308
309 if (get_pte(baseaddr, offset, &pte, info)) {
310 goto error;
311 }
312 trace_smmu_ptw_level(level, iova, subpage_size,
313 baseaddr, offset, pte);
314
315 if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
316 trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
317 pte_addr, offset, pte);
Eric Auger17338372020-07-28 17:08:05 +0200318 break;
Eric Auger93641942018-05-04 18:05:51 +0100319 }
320
Eric Auger17338372020-07-28 17:08:05 +0200321 if (is_table_pte(pte, level)) {
322 ap = PTE_APTABLE(pte);
Eric Auger93641942018-05-04 18:05:51 +0100323
Eric Augere7c3b9d2020-07-28 17:08:14 +0200324 if (is_permission_fault(ap, perm) && !tt->had) {
Eric Auger93641942018-05-04 18:05:51 +0100325 info->type = SMMU_PTW_ERR_PERMISSION;
326 goto error;
327 }
Eric Auger17338372020-07-28 17:08:05 +0200328 baseaddr = get_table_pte_address(pte, granule_sz);
329 level++;
330 continue;
331 } else if (is_page_pte(pte, level)) {
332 gpa = get_page_pte_address(pte, granule_sz);
Eric Auger93641942018-05-04 18:05:51 +0100333 trace_smmu_ptw_page_pte(stage, level, iova,
334 baseaddr, pte_addr, pte, gpa);
Eric Auger17338372020-07-28 17:08:05 +0200335 } else {
Eric Auger93641942018-05-04 18:05:51 +0100336 uint64_t block_size;
Eric Auger93641942018-05-04 18:05:51 +0100337
Eric Auger17338372020-07-28 17:08:05 +0200338 gpa = get_block_pte_address(pte, level, granule_sz,
339 &block_size);
Eric Auger93641942018-05-04 18:05:51 +0100340 trace_smmu_ptw_block_pte(stage, level, baseaddr,
341 pte_addr, pte, iova, gpa,
342 block_size >> 20);
Eric Auger93641942018-05-04 18:05:51 +0100343 }
Eric Auger17338372020-07-28 17:08:05 +0200344 ap = PTE_AP(pte);
Eric Auger93641942018-05-04 18:05:51 +0100345 if (is_permission_fault(ap, perm)) {
346 info->type = SMMU_PTW_ERR_PERMISSION;
347 goto error;
348 }
Eric Auger93641942018-05-04 18:05:51 +0100349
Eric Auger9e54dee2020-07-28 17:08:09 +0200350 tlbe->entry.translated_addr = gpa;
351 tlbe->entry.iova = iova & ~mask;
352 tlbe->entry.addr_mask = mask;
Eric Augera7550152020-07-28 17:08:08 +0200353 tlbe->entry.perm = PTE_AP_TO_PERM(ap);
354 tlbe->level = level;
355 tlbe->granule = granule_sz;
Eric Auger17338372020-07-28 17:08:05 +0200356 return 0;
357 }
Eric Auger93641942018-05-04 18:05:51 +0100358 info->type = SMMU_PTW_ERR_TRANSLATION;
359
360error:
Eric Augera7550152020-07-28 17:08:08 +0200361 tlbe->entry.perm = IOMMU_NONE;
Eric Auger93641942018-05-04 18:05:51 +0100362 return -EINVAL;
363}
364
365/**
366 * smmu_ptw - Walk the page tables for an IOVA, according to @cfg
367 *
368 * @cfg: translation configuration
369 * @iova: iova to translate
370 * @perm: tentative access type
371 * @tlbe: returned entry
372 * @info: ptw event handle
373 *
374 * return 0 on success
375 */
Philippe Mathieu-Daudé9de9fa52022-12-16 22:49:24 +0100376int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
377 SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
Eric Auger93641942018-05-04 18:05:51 +0100378{
379 if (!cfg->aa64) {
380 /*
381 * This code path is not entered as we check this while decoding
382 * the configuration data in the derived SMMU model.
383 */
384 g_assert_not_reached();
385 }
386
387 return smmu_ptw_64(cfg, iova, perm, tlbe, info);
388}
Eric Auger527773e2018-05-04 18:05:51 +0100389
Eric Augercac994e2018-05-04 18:05:51 +0100390/**
391 * The bus number is used for lookup when SID based invalidation occurs.
392 * In that case we lazily populate the SMMUPciBus array from the bus hash
393 * table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus
394 * numbers may not be always initialized yet.
395 */
396SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num)
397{
398 SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num];
Philippe Mathieu-Daudé5ca0e6f2020-03-05 16:09:14 +0000399 GHashTableIter iter;
Eric Augercac994e2018-05-04 18:05:51 +0100400
Philippe Mathieu-Daudé5ca0e6f2020-03-05 16:09:14 +0000401 if (smmu_pci_bus) {
402 return smmu_pci_bus;
Eric Augercac994e2018-05-04 18:05:51 +0100403 }
Philippe Mathieu-Daudé5ca0e6f2020-03-05 16:09:14 +0000404
405 g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr);
406 while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) {
407 if (pci_bus_num(smmu_pci_bus->bus) == bus_num) {
408 s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus;
409 return smmu_pci_bus;
410 }
411 }
412
413 return NULL;
Eric Augercac994e2018-05-04 18:05:51 +0100414}
415
416static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn)
417{
418 SMMUState *s = opaque;
419 SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus);
420 SMMUDevice *sdev;
Eric Auger6ce92972018-09-25 14:02:32 +0100421 static unsigned int index;
Eric Augercac994e2018-05-04 18:05:51 +0100422
423 if (!sbus) {
424 sbus = g_malloc0(sizeof(SMMUPciBus) +
425 sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX);
426 sbus->bus = bus;
427 g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus);
428 }
429
430 sdev = sbus->pbdev[devfn];
431 if (!sdev) {
Eric Auger6ce92972018-09-25 14:02:32 +0100432 char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++);
433
Eric Augercac994e2018-05-04 18:05:51 +0100434 sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1);
435
436 sdev->smmu = s;
437 sdev->bus = bus;
438 sdev->devfn = devfn;
439
440 memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu),
441 s->mrtypename,
Jean-Philippe Bruckerca3fbed2023-02-14 17:19:21 +0000442 OBJECT(s), name, UINT64_MAX);
Eric Augercac994e2018-05-04 18:05:51 +0100443 address_space_init(&sdev->as,
444 MEMORY_REGION(&sdev->iommu), name);
445 trace_smmu_add_mr(name);
446 g_free(name);
447 }
448
449 return &sdev->as;
450}
451
Eric Auger32cfd7f2018-06-26 17:50:42 +0100452IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid)
453{
454 uint8_t bus_n, devfn;
455 SMMUPciBus *smmu_bus;
456 SMMUDevice *smmu;
457
458 bus_n = PCI_BUS_NUM(sid);
459 smmu_bus = smmu_find_smmu_pcibus(s, bus_n);
460 if (smmu_bus) {
Eric Augerb78aae92018-07-09 14:51:34 +0100461 devfn = SMMU_PCI_DEVFN(sid);
Eric Auger32cfd7f2018-06-26 17:50:42 +0100462 smmu = smmu_bus->pbdev[devfn];
463 if (smmu) {
464 return &smmu->iommu;
465 }
466 }
467 return NULL;
468}
469
Eric Auger832e4222018-06-26 17:50:42 +0100470/* Unmap all notifiers attached to @mr */
Philippe Mathieu-Daudé1e793dd2022-12-16 22:49:23 +0100471static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr)
Eric Auger832e4222018-06-26 17:50:42 +0100472{
473 IOMMUNotifier *n;
474
475 trace_smmu_inv_notifiers_mr(mr->parent_obj.name);
476 IOMMU_NOTIFIER_FOREACH(n, mr) {
Jason Wang98332f62023-02-23 14:59:23 +0800477 memory_region_unmap_iommu_notifier_range(n);
Eric Auger832e4222018-06-26 17:50:42 +0100478 }
479}
480
481/* Unmap all notifiers of all mr's */
482void smmu_inv_notifiers_all(SMMUState *s)
483{
Eric Augerc6370442019-04-29 17:35:57 +0100484 SMMUDevice *sdev;
Eric Auger832e4222018-06-26 17:50:42 +0100485
Eric Augerc6370442019-04-29 17:35:57 +0100486 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
487 smmu_inv_notifiers_mr(&sdev->iommu);
Eric Auger832e4222018-06-26 17:50:42 +0100488 }
489}
490
Eric Auger527773e2018-05-04 18:05:51 +0100491static void smmu_base_realize(DeviceState *dev, Error **errp)
492{
Eric Augercac994e2018-05-04 18:05:51 +0100493 SMMUState *s = ARM_SMMU(dev);
Eric Auger527773e2018-05-04 18:05:51 +0100494 SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev);
495 Error *local_err = NULL;
496
497 sbc->parent_realize(dev, &local_err);
498 if (local_err) {
499 error_propagate(errp, local_err);
500 return;
501 }
Eric Auger32cfd7f2018-06-26 17:50:42 +0100502 s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free);
Eric Augercc27ed82018-06-26 17:50:42 +0100503 s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal,
504 g_free, g_free);
Eric Augercac994e2018-05-04 18:05:51 +0100505 s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL);
506
507 if (s->primary_bus) {
508 pci_setup_iommu(s->primary_bus, smmu_find_add_as, s);
509 } else {
510 error_setg(errp, "SMMU is not attached to any PCI bus!");
511 }
Eric Auger527773e2018-05-04 18:05:51 +0100512}
513
Peter Maydell3c1a7c42022-12-14 14:27:10 +0000514static void smmu_base_reset_hold(Object *obj)
Eric Auger527773e2018-05-04 18:05:51 +0100515{
Peter Maydell3c1a7c42022-12-14 14:27:10 +0000516 SMMUState *s = ARM_SMMU(obj);
Eric Auger32cfd7f2018-06-26 17:50:42 +0100517
518 g_hash_table_remove_all(s->configs);
Eric Augercc27ed82018-06-26 17:50:42 +0100519 g_hash_table_remove_all(s->iotlb);
Eric Auger527773e2018-05-04 18:05:51 +0100520}
521
522static Property smmu_dev_properties[] = {
523 DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0),
Philippe Mathieu-Daudéc45e7612023-01-17 20:30:14 +0100524 DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus,
525 TYPE_PCI_BUS, PCIBus *),
Eric Auger527773e2018-05-04 18:05:51 +0100526 DEFINE_PROP_END_OF_LIST(),
527};
528
529static void smmu_base_class_init(ObjectClass *klass, void *data)
530{
531 DeviceClass *dc = DEVICE_CLASS(klass);
Peter Maydell3c1a7c42022-12-14 14:27:10 +0000532 ResettableClass *rc = RESETTABLE_CLASS(klass);
Eric Auger527773e2018-05-04 18:05:51 +0100533 SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass);
534
Marc-André Lureau4f67d302020-01-10 19:30:32 +0400535 device_class_set_props(dc, smmu_dev_properties);
Eric Auger527773e2018-05-04 18:05:51 +0100536 device_class_set_parent_realize(dc, smmu_base_realize,
537 &sbc->parent_realize);
Peter Maydell3c1a7c42022-12-14 14:27:10 +0000538 rc->phases.hold = smmu_base_reset_hold;
Eric Auger527773e2018-05-04 18:05:51 +0100539}
540
541static const TypeInfo smmu_base_info = {
542 .name = TYPE_ARM_SMMU,
543 .parent = TYPE_SYS_BUS_DEVICE,
544 .instance_size = sizeof(SMMUState),
545 .class_data = NULL,
546 .class_size = sizeof(SMMUBaseClass),
547 .class_init = smmu_base_class_init,
548 .abstract = true,
549};
550
551static void smmu_base_register_types(void)
552{
553 type_register_static(&smmu_base_info);
554}
555
556type_init(smmu_base_register_types)
557