blob: 8cefd0c62fb707b5a2fd4ba19f0b521acf8cd4a3 [file] [log] [blame]
Jun Nakajima432d2682010-08-31 16:41:25 +01001/*
2 * Copyright (C) 2011 Citrix Ltd.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +01007 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
Jun Nakajima432d2682010-08-31 16:41:25 +01009 */
10
11#include "config.h"
12
13#include <sys/resource.h>
14
Paolo Bonzini0d09e412013-02-05 17:06:20 +010015#include "hw/xen/xen_backend.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010016#include "sysemu/blockdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010017#include "qemu/bitmap.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010018
19#include <xen/hvm/params.h>
20#include <sys/mman.h>
21
Paolo Bonzini9c17d612012-12-17 18:20:04 +010022#include "sysemu/xen-mapcache.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010023#include "trace.h"
24
25
26//#define MAPCACHE_DEBUG
27
28#ifdef MAPCACHE_DEBUG
29# define DPRINTF(fmt, ...) do { \
30 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
31} while (0)
32#else
33# define DPRINTF(fmt, ...) do { } while (0)
34#endif
35
Stefano Stabellini643f5932013-12-18 19:17:32 +000036#if HOST_LONG_BITS == 32
Jun Nakajima432d2682010-08-31 16:41:25 +010037# define MCACHE_BUCKET_SHIFT 16
John Babovalea6c5f82011-03-22 14:50:28 +000038# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
Stefano Stabellini643f5932013-12-18 19:17:32 +000039#else
Jun Nakajima432d2682010-08-31 16:41:25 +010040# define MCACHE_BUCKET_SHIFT 20
John Babovalea6c5f82011-03-22 14:50:28 +000041# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
Jun Nakajima432d2682010-08-31 16:41:25 +010042#endif
43#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
44
Anthony PERARD56c119e2011-09-09 12:50:18 +000045/* This is the size of the virtual address space reserve to QEMU that will not
46 * be use by MapCache.
47 * From empirical tests I observed that qemu use 75MB more than the
48 * max_mcache_size.
49 */
50#define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
51
Jun Nakajima432d2682010-08-31 16:41:25 +010052typedef struct MapCacheEntry {
Avi Kivitya8170e52012-10-23 12:30:10 +020053 hwaddr paddr_index;
Jun Nakajima432d2682010-08-31 16:41:25 +010054 uint8_t *vaddr_base;
Stefano Stabellinic13390c2011-05-19 18:35:42 +010055 unsigned long *valid_mapping;
Jun Nakajima432d2682010-08-31 16:41:25 +010056 uint8_t lock;
Avi Kivitya8170e52012-10-23 12:30:10 +020057 hwaddr size;
Jun Nakajima432d2682010-08-31 16:41:25 +010058 struct MapCacheEntry *next;
59} MapCacheEntry;
60
61typedef struct MapCacheRev {
62 uint8_t *vaddr_req;
Avi Kivitya8170e52012-10-23 12:30:10 +020063 hwaddr paddr_index;
64 hwaddr size;
Jun Nakajima432d2682010-08-31 16:41:25 +010065 QTAILQ_ENTRY(MapCacheRev) next;
66} MapCacheRev;
67
68typedef struct MapCache {
69 MapCacheEntry *entry;
70 unsigned long nr_buckets;
71 QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
72
73 /* For most cases (>99.9%), the page address is the same. */
Stefano Stabellinie2deee32013-04-02 13:23:40 +000074 MapCacheEntry *last_entry;
Jun Nakajima432d2682010-08-31 16:41:25 +010075 unsigned long max_mcache_size;
76 unsigned int mcache_bucket_shift;
Anthony PERARDcd1ba7d2012-01-18 12:21:38 +000077
78 phys_offset_to_gaddr_t phys_offset_to_gaddr;
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +010079 QemuMutex lock;
Anthony PERARDcd1ba7d2012-01-18 12:21:38 +000080 void *opaque;
Jun Nakajima432d2682010-08-31 16:41:25 +010081} MapCache;
82
83static MapCache *mapcache;
84
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +010085static inline void mapcache_lock(void)
86{
87 qemu_mutex_lock(&mapcache->lock);
88}
89
90static inline void mapcache_unlock(void)
91{
92 qemu_mutex_unlock(&mapcache->lock);
93}
94
Stefano Stabellinic13390c2011-05-19 18:35:42 +010095static inline int test_bits(int nr, int size, const unsigned long *addr)
96{
97 unsigned long res = find_next_zero_bit(addr, size + nr, nr);
98 if (res >= nr + size)
99 return 1;
100 else
101 return 0;
102}
103
Anthony PERARDcd1ba7d2012-01-18 12:21:38 +0000104void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
Jun Nakajima432d2682010-08-31 16:41:25 +0100105{
106 unsigned long size;
107 struct rlimit rlimit_as;
108
Anthony Liguori7267c092011-08-20 22:09:37 -0500109 mapcache = g_malloc0(sizeof (MapCache));
Jun Nakajima432d2682010-08-31 16:41:25 +0100110
Anthony PERARDcd1ba7d2012-01-18 12:21:38 +0000111 mapcache->phys_offset_to_gaddr = f;
112 mapcache->opaque = opaque;
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100113 qemu_mutex_init(&mapcache->lock);
Anthony PERARDcd1ba7d2012-01-18 12:21:38 +0000114
Jun Nakajima432d2682010-08-31 16:41:25 +0100115 QTAILQ_INIT(&mapcache->locked_entries);
Jun Nakajima432d2682010-08-31 16:41:25 +0100116
Anthony PERARD56c119e2011-09-09 12:50:18 +0000117 if (geteuid() == 0) {
118 rlimit_as.rlim_cur = RLIM_INFINITY;
119 rlimit_as.rlim_max = RLIM_INFINITY;
120 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
John Babovalea6c5f82011-03-22 14:50:28 +0000121 } else {
Anthony PERARD56c119e2011-09-09 12:50:18 +0000122 getrlimit(RLIMIT_AS, &rlimit_as);
123 rlimit_as.rlim_cur = rlimit_as.rlim_max;
124
125 if (rlimit_as.rlim_max != RLIM_INFINITY) {
126 fprintf(stderr, "Warning: QEMU's maximum size of virtual"
127 " memory is not infinity.\n");
128 }
129 if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
130 mapcache->max_mcache_size = rlimit_as.rlim_max -
131 NON_MCACHE_MEMORY_SIZE;
132 } else {
133 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
134 }
John Babovalea6c5f82011-03-22 14:50:28 +0000135 }
136
Jun Nakajima432d2682010-08-31 16:41:25 +0100137 setrlimit(RLIMIT_AS, &rlimit_as);
Jun Nakajima432d2682010-08-31 16:41:25 +0100138
139 mapcache->nr_buckets =
140 (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
141 (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
142 (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
143
144 size = mapcache->nr_buckets * sizeof (MapCacheEntry);
145 size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200146 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
147 mapcache->nr_buckets, size);
Anthony Liguori7267c092011-08-20 22:09:37 -0500148 mapcache->entry = g_malloc0(size);
Jun Nakajima432d2682010-08-31 16:41:25 +0100149}
150
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200151static void xen_remap_bucket(MapCacheEntry *entry,
Avi Kivitya8170e52012-10-23 12:30:10 +0200152 hwaddr size,
153 hwaddr address_index)
Jun Nakajima432d2682010-08-31 16:41:25 +0100154{
155 uint8_t *vaddr_base;
156 xen_pfn_t *pfns;
157 int *err;
John Babovalea6c5f82011-03-22 14:50:28 +0000158 unsigned int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200159 hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
Jun Nakajima432d2682010-08-31 16:41:25 +0100160
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200161 trace_xen_remap_bucket(address_index);
Jun Nakajima432d2682010-08-31 16:41:25 +0100162
Anthony Liguori7267c092011-08-20 22:09:37 -0500163 pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
164 err = g_malloc0(nb_pfn * sizeof (int));
Jun Nakajima432d2682010-08-31 16:41:25 +0100165
166 if (entry->vaddr_base != NULL) {
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100167 if (munmap(entry->vaddr_base, entry->size) != 0) {
Jun Nakajima432d2682010-08-31 16:41:25 +0100168 perror("unmap fails");
169 exit(-1);
170 }
171 }
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100172 if (entry->valid_mapping != NULL) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500173 g_free(entry->valid_mapping);
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100174 entry->valid_mapping = NULL;
175 }
Jun Nakajima432d2682010-08-31 16:41:25 +0100176
177 for (i = 0; i < nb_pfn; i++) {
178 pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
179 }
180
181 vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE,
182 pfns, err, nb_pfn);
183 if (vaddr_base == NULL) {
184 perror("xc_map_foreign_bulk");
185 exit(-1);
186 }
187
188 entry->vaddr_base = vaddr_base;
189 entry->paddr_index = address_index;
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100190 entry->size = size;
Anthony Liguori7267c092011-08-20 22:09:37 -0500191 entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100192 BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
Jun Nakajima432d2682010-08-31 16:41:25 +0100193
John Babovalea6c5f82011-03-22 14:50:28 +0000194 bitmap_zero(entry->valid_mapping, nb_pfn);
195 for (i = 0; i < nb_pfn; i++) {
196 if (!err[i]) {
197 bitmap_set(entry->valid_mapping, i, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +0100198 }
Jun Nakajima432d2682010-08-31 16:41:25 +0100199 }
200
Anthony Liguori7267c092011-08-20 22:09:37 -0500201 g_free(pfns);
202 g_free(err);
Jun Nakajima432d2682010-08-31 16:41:25 +0100203}
204
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100205static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
206 uint8_t lock)
Jun Nakajima432d2682010-08-31 16:41:25 +0100207{
208 MapCacheEntry *entry, *pentry = NULL;
Avi Kivitya8170e52012-10-23 12:30:10 +0200209 hwaddr address_index;
210 hwaddr address_offset;
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100211 hwaddr cache_size = size;
212 hwaddr test_bit_size;
Anthony PERARDcd1ba7d2012-01-18 12:21:38 +0000213 bool translated = false;
214
215tryagain:
216 address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
217 address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
Jun Nakajima432d2682010-08-31 16:41:25 +0100218
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200219 trace_xen_map_cache(phys_addr);
Jun Nakajima432d2682010-08-31 16:41:25 +0100220
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100221 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
Hanweidong044d4e12013-04-02 13:22:41 +0000222 if (size) {
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100223 test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
Hanweidong044d4e12013-04-02 13:22:41 +0000224
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100225 if (test_bit_size % XC_PAGE_SIZE) {
226 test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
Hanweidong044d4e12013-04-02 13:22:41 +0000227 }
228 } else {
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100229 test_bit_size = XC_PAGE_SIZE;
Hanweidong044d4e12013-04-02 13:22:41 +0000230 }
231
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000232 if (mapcache->last_entry != NULL &&
233 mapcache->last_entry->paddr_index == address_index &&
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100234 !lock && !size &&
Hanweidong044d4e12013-04-02 13:22:41 +0000235 test_bits(address_offset >> XC_PAGE_SHIFT,
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100236 test_bit_size >> XC_PAGE_SHIFT,
Hanweidong044d4e12013-04-02 13:22:41 +0000237 mapcache->last_entry->valid_mapping)) {
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000238 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
239 return mapcache->last_entry->vaddr_base + address_offset;
Jun Nakajima432d2682010-08-31 16:41:25 +0100240 }
241
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100242 /* size is always a multiple of MCACHE_BUCKET_SIZE */
Anthony PERARD09ab48e2012-04-13 17:18:56 +0000243 if (size) {
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100244 cache_size = size + address_offset;
245 if (cache_size % MCACHE_BUCKET_SIZE) {
246 cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
Anthony PERARD09ab48e2012-04-13 17:18:56 +0000247 }
248 } else {
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100249 cache_size = MCACHE_BUCKET_SIZE;
Anthony PERARD09ab48e2012-04-13 17:18:56 +0000250 }
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100251
Jun Nakajima432d2682010-08-31 16:41:25 +0100252 entry = &mapcache->entry[address_index % mapcache->nr_buckets];
253
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100254 while (entry && entry->lock && entry->vaddr_base &&
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100255 (entry->paddr_index != address_index || entry->size != cache_size ||
Hanweidong044d4e12013-04-02 13:22:41 +0000256 !test_bits(address_offset >> XC_PAGE_SHIFT,
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100257 test_bit_size >> XC_PAGE_SHIFT,
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100258 entry->valid_mapping))) {
Jun Nakajima432d2682010-08-31 16:41:25 +0100259 pentry = entry;
260 entry = entry->next;
261 }
262 if (!entry) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500263 entry = g_malloc0(sizeof (MapCacheEntry));
Jun Nakajima432d2682010-08-31 16:41:25 +0100264 pentry->next = entry;
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100265 xen_remap_bucket(entry, cache_size, address_index);
Jun Nakajima432d2682010-08-31 16:41:25 +0100266 } else if (!entry->lock) {
267 if (!entry->vaddr_base || entry->paddr_index != address_index ||
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100268 entry->size != cache_size ||
Hanweidong044d4e12013-04-02 13:22:41 +0000269 !test_bits(address_offset >> XC_PAGE_SHIFT,
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100270 test_bit_size >> XC_PAGE_SHIFT,
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100271 entry->valid_mapping)) {
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100272 xen_remap_bucket(entry, cache_size, address_index);
Jun Nakajima432d2682010-08-31 16:41:25 +0100273 }
274 }
275
Hanweidong044d4e12013-04-02 13:22:41 +0000276 if(!test_bits(address_offset >> XC_PAGE_SHIFT,
Paolo Bonzini9b6d7b32015-01-14 11:20:55 +0100277 test_bit_size >> XC_PAGE_SHIFT,
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100278 entry->valid_mapping)) {
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000279 mapcache->last_entry = NULL;
Anthony PERARDcd1ba7d2012-01-18 12:21:38 +0000280 if (!translated && mapcache->phys_offset_to_gaddr) {
281 phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
282 translated = true;
283 goto tryagain;
284 }
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200285 trace_xen_map_cache_return(NULL);
Jun Nakajima432d2682010-08-31 16:41:25 +0100286 return NULL;
287 }
288
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000289 mapcache->last_entry = entry;
Jun Nakajima432d2682010-08-31 16:41:25 +0100290 if (lock) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500291 MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
Jun Nakajima432d2682010-08-31 16:41:25 +0100292 entry->lock++;
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000293 reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
294 reventry->paddr_index = mapcache->last_entry->paddr_index;
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100295 reventry->size = entry->size;
Jun Nakajima432d2682010-08-31 16:41:25 +0100296 QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
297 }
298
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000299 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
300 return mapcache->last_entry->vaddr_base + address_offset;
Jun Nakajima432d2682010-08-31 16:41:25 +0100301}
302
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100303uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
304 uint8_t lock)
305{
306 uint8_t *p;
307
308 mapcache_lock();
309 p = xen_map_cache_unlocked(phys_addr, size, lock);
310 mapcache_unlock();
311 return p;
312}
313
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200314ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
Jun Nakajima432d2682010-08-31 16:41:25 +0100315{
Stefan Bergerecf169b2011-07-26 10:33:11 -0400316 MapCacheEntry *entry = NULL;
Jun Nakajima432d2682010-08-31 16:41:25 +0100317 MapCacheRev *reventry;
Avi Kivitya8170e52012-10-23 12:30:10 +0200318 hwaddr paddr_index;
319 hwaddr size;
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100320 ram_addr_t raddr;
Jun Nakajima432d2682010-08-31 16:41:25 +0100321 int found = 0;
322
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100323 mapcache_lock();
Jun Nakajima432d2682010-08-31 16:41:25 +0100324 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
325 if (reventry->vaddr_req == ptr) {
326 paddr_index = reventry->paddr_index;
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100327 size = reventry->size;
Jun Nakajima432d2682010-08-31 16:41:25 +0100328 found = 1;
329 break;
330 }
331 }
332 if (!found) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200333 fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
Jun Nakajima432d2682010-08-31 16:41:25 +0100334 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
335 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
336 reventry->vaddr_req);
337 }
338 abort();
339 return 0;
340 }
341
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100342 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
343 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100344 entry = entry->next;
345 }
346 if (!entry) {
347 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100348 raddr = 0;
349 } else {
350 raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
351 ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100352 }
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100353 mapcache_unlock();
354 return raddr;
Jun Nakajima432d2682010-08-31 16:41:25 +0100355}
356
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100357static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
Jun Nakajima432d2682010-08-31 16:41:25 +0100358{
359 MapCacheEntry *entry = NULL, *pentry = NULL;
360 MapCacheRev *reventry;
Avi Kivitya8170e52012-10-23 12:30:10 +0200361 hwaddr paddr_index;
362 hwaddr size;
Jun Nakajima432d2682010-08-31 16:41:25 +0100363 int found = 0;
364
Jun Nakajima432d2682010-08-31 16:41:25 +0100365 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
366 if (reventry->vaddr_req == buffer) {
367 paddr_index = reventry->paddr_index;
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100368 size = reventry->size;
Jun Nakajima432d2682010-08-31 16:41:25 +0100369 found = 1;
370 break;
371 }
372 }
373 if (!found) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200374 DPRINTF("%s, could not find %p\n", __func__, buffer);
Jun Nakajima432d2682010-08-31 16:41:25 +0100375 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
376 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
377 }
378 return;
379 }
380 QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
Anthony Liguori7267c092011-08-20 22:09:37 -0500381 g_free(reventry);
Jun Nakajima432d2682010-08-31 16:41:25 +0100382
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000383 if (mapcache->last_entry != NULL &&
384 mapcache->last_entry->paddr_index == paddr_index) {
385 mapcache->last_entry = NULL;
Frediano Ziglio27b76522012-08-22 10:17:04 +0000386 }
387
Jun Nakajima432d2682010-08-31 16:41:25 +0100388 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100389 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
Jun Nakajima432d2682010-08-31 16:41:25 +0100390 pentry = entry;
391 entry = entry->next;
392 }
393 if (!entry) {
394 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
395 return;
396 }
397 entry->lock--;
398 if (entry->lock > 0 || pentry == NULL) {
399 return;
400 }
401
402 pentry->next = entry->next;
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100403 if (munmap(entry->vaddr_base, entry->size) != 0) {
Jun Nakajima432d2682010-08-31 16:41:25 +0100404 perror("unmap fails");
405 exit(-1);
406 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500407 g_free(entry->valid_mapping);
408 g_free(entry);
Jun Nakajima432d2682010-08-31 16:41:25 +0100409}
410
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100411void xen_invalidate_map_cache_entry(uint8_t *buffer)
412{
413 mapcache_lock();
414 xen_invalidate_map_cache_entry_unlocked(buffer);
415 mapcache_unlock();
416}
417
Jan Kiszkae41d7c62011-06-21 22:59:08 +0200418void xen_invalidate_map_cache(void)
Jun Nakajima432d2682010-08-31 16:41:25 +0100419{
420 unsigned long i;
421 MapCacheRev *reventry;
422
423 /* Flush pending AIO before destroying the mapcache */
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000424 bdrv_drain_all();
Jun Nakajima432d2682010-08-31 16:41:25 +0100425
Paolo Bonzini86a6a9b2015-01-14 11:20:56 +0100426 mapcache_lock();
427
Jun Nakajima432d2682010-08-31 16:41:25 +0100428 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
429 DPRINTF("There should be no locked mappings at this time, "
430 "but "TARGET_FMT_plx" -> %p is present\n",
431 reventry->paddr_index, reventry->vaddr_req);
432 }
433
Jun Nakajima432d2682010-08-31 16:41:25 +0100434 for (i = 0; i < mapcache->nr_buckets; i++) {
435 MapCacheEntry *entry = &mapcache->entry[i];
436
437 if (entry->vaddr_base == NULL) {
438 continue;
439 }
Julien Grall852a7ce2012-04-13 17:33:02 +0000440 if (entry->lock > 0) {
441 continue;
442 }
Jun Nakajima432d2682010-08-31 16:41:25 +0100443
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100444 if (munmap(entry->vaddr_base, entry->size) != 0) {
Jun Nakajima432d2682010-08-31 16:41:25 +0100445 perror("unmap fails");
446 exit(-1);
447 }
448
449 entry->paddr_index = 0;
450 entry->vaddr_base = NULL;
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100451 entry->size = 0;
Anthony Liguori7267c092011-08-20 22:09:37 -0500452 g_free(entry->valid_mapping);
Stefano Stabellinic13390c2011-05-19 18:35:42 +0100453 entry->valid_mapping = NULL;
Jun Nakajima432d2682010-08-31 16:41:25 +0100454 }
455
Stefano Stabellinie2deee32013-04-02 13:23:40 +0000456 mapcache->last_entry = NULL;
Jun Nakajima432d2682010-08-31 16:41:25 +0100457
458 mapcache_unlock();
459}