Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU Enhanced Disk Format |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2010 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> |
| 8 | * Anthony Liguori <aliguori@us.ibm.com> |
| 9 | * |
| 10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
| 11 | * See the COPYING.LIB file in the top-level directory. |
| 12 | * |
| 13 | */ |
| 14 | |
| 15 | #ifndef BLOCK_QED_H |
| 16 | #define BLOCK_QED_H |
| 17 | |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 18 | #include "block/block_int.h" |
Veronia Bahaa | f348b6d | 2016-03-20 19:16:19 +0200 | [diff] [blame] | 19 | #include "qemu/cutils.h" |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 20 | |
| 21 | /* The layout of a QED file is as follows: |
| 22 | * |
| 23 | * +--------+----------+----------+----------+-----+ |
| 24 | * | header | L1 table | cluster0 | cluster1 | ... | |
| 25 | * +--------+----------+----------+----------+-----+ |
| 26 | * |
| 27 | * There is a 2-level pagetable for cluster allocation: |
| 28 | * |
| 29 | * +----------+ |
| 30 | * | L1 table | |
| 31 | * +----------+ |
| 32 | * ,------' | '------. |
| 33 | * +----------+ | +----------+ |
| 34 | * | L2 table | ... | L2 table | |
| 35 | * +----------+ +----------+ |
| 36 | * ,------' | '------. |
| 37 | * +----------+ | +----------+ |
| 38 | * | Data | ... | Data | |
| 39 | * +----------+ +----------+ |
| 40 | * |
| 41 | * The L1 table is fixed size and always present. L2 tables are allocated on |
| 42 | * demand. The L1 table size determines the maximum possible image size; it |
| 43 | * can be influenced using the cluster_size and table_size values. |
| 44 | * |
| 45 | * All fields are little-endian on disk. |
| 46 | */ |
Chunyan Liu | 7ab7484 | 2014-06-05 17:21:00 +0800 | [diff] [blame] | 47 | #define QED_DEFAULT_CLUSTER_SIZE 65536 |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 48 | enum { |
| 49 | QED_MAGIC = 'Q' | 'E' << 8 | 'D' << 16 | '\0' << 24, |
| 50 | |
| 51 | /* The image supports a backing file */ |
| 52 | QED_F_BACKING_FILE = 0x01, |
| 53 | |
Stefan Hajnoczi | 01979a9 | 2010-12-06 16:08:03 +0000 | [diff] [blame] | 54 | /* The image needs a consistency check before use */ |
| 55 | QED_F_NEED_CHECK = 0x02, |
| 56 | |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 57 | /* The backing file format must not be probed, treat as raw image */ |
| 58 | QED_F_BACKING_FORMAT_NO_PROBE = 0x04, |
| 59 | |
| 60 | /* Feature bits must be used when the on-disk format changes */ |
| 61 | QED_FEATURE_MASK = QED_F_BACKING_FILE | /* supported feature bits */ |
Stefan Hajnoczi | 01979a9 | 2010-12-06 16:08:03 +0000 | [diff] [blame] | 62 | QED_F_NEED_CHECK | |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 63 | QED_F_BACKING_FORMAT_NO_PROBE, |
| 64 | QED_COMPAT_FEATURE_MASK = 0, /* supported compat feature bits */ |
| 65 | QED_AUTOCLEAR_FEATURE_MASK = 0, /* supported autoclear feature bits */ |
| 66 | |
| 67 | /* Data is stored in groups of sectors called clusters. Cluster size must |
| 68 | * be large to avoid keeping too much metadata. I/O requests that have |
| 69 | * sub-cluster size will require read-modify-write. |
| 70 | */ |
| 71 | QED_MIN_CLUSTER_SIZE = 4 * 1024, /* in bytes */ |
| 72 | QED_MAX_CLUSTER_SIZE = 64 * 1024 * 1024, |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 73 | |
| 74 | /* Allocated clusters are tracked using a 2-level pagetable. Table size is |
| 75 | * a multiple of clusters so large maximum image sizes can be supported |
| 76 | * without jacking up the cluster size too much. |
| 77 | */ |
| 78 | QED_MIN_TABLE_SIZE = 1, /* in clusters */ |
| 79 | QED_MAX_TABLE_SIZE = 16, |
| 80 | QED_DEFAULT_TABLE_SIZE = 4, |
Stefan Hajnoczi | 6f321e9 | 2011-05-09 16:45:40 +0100 | [diff] [blame] | 81 | |
| 82 | /* Delay to flush and clean image after last allocating write completes */ |
| 83 | QED_NEED_CHECK_TIMEOUT = 5, /* in seconds */ |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 84 | }; |
| 85 | |
| 86 | typedef struct { |
| 87 | uint32_t magic; /* QED\0 */ |
| 88 | |
| 89 | uint32_t cluster_size; /* in bytes */ |
| 90 | uint32_t table_size; /* for L1 and L2 tables, in clusters */ |
| 91 | uint32_t header_size; /* in clusters */ |
| 92 | |
| 93 | uint64_t features; /* format feature bits */ |
| 94 | uint64_t compat_features; /* compatible feature bits */ |
| 95 | uint64_t autoclear_features; /* self-resetting feature bits */ |
| 96 | |
| 97 | uint64_t l1_table_offset; /* in bytes */ |
| 98 | uint64_t image_size; /* total logical image size, in bytes */ |
| 99 | |
| 100 | /* if (features & QED_F_BACKING_FILE) */ |
| 101 | uint32_t backing_filename_offset; /* in bytes from start of header */ |
| 102 | uint32_t backing_filename_size; /* in bytes */ |
Jeff Cody | 687fb89 | 2013-09-25 12:08:51 -0400 | [diff] [blame] | 103 | } QEMU_PACKED QEDHeader; |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 104 | |
| 105 | typedef struct { |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 106 | uint64_t offsets[0]; /* in bytes */ |
| 107 | } QEDTable; |
| 108 | |
| 109 | /* The L2 cache is a simple write-through cache for L2 structures */ |
| 110 | typedef struct CachedL2Table { |
| 111 | QEDTable *table; |
| 112 | uint64_t offset; /* offset=0 indicates an invalidate entry */ |
| 113 | QTAILQ_ENTRY(CachedL2Table) node; |
| 114 | int ref; |
| 115 | } CachedL2Table; |
| 116 | |
| 117 | typedef struct { |
| 118 | QTAILQ_HEAD(, CachedL2Table) entries; |
| 119 | unsigned int n_entries; |
| 120 | } L2TableCache; |
| 121 | |
| 122 | typedef struct QEDRequest { |
| 123 | CachedL2Table *l2_table; |
| 124 | } QEDRequest; |
| 125 | |
Stefan Hajnoczi | 6e4f59b | 2012-02-07 13:27:27 +0000 | [diff] [blame] | 126 | enum { |
| 127 | QED_AIOCB_WRITE = 0x0001, /* read or write? */ |
Stefan Hajnoczi | 0e71be1 | 2012-02-07 13:27:28 +0000 | [diff] [blame] | 128 | QED_AIOCB_ZERO = 0x0002, /* zero write, used with QED_AIOCB_WRITE */ |
Stefan Hajnoczi | 6e4f59b | 2012-02-07 13:27:27 +0000 | [diff] [blame] | 129 | }; |
| 130 | |
Stefan Hajnoczi | eabba58 | 2010-12-06 16:08:02 +0000 | [diff] [blame] | 131 | typedef struct QEDAIOCB { |
Kevin Wolf | 48cc565 | 2016-11-18 14:47:36 +0100 | [diff] [blame] | 132 | BlockDriverState *bs; |
Stefan Hajnoczi | eabba58 | 2010-12-06 16:08:02 +0000 | [diff] [blame] | 133 | QSIMPLEQ_ENTRY(QEDAIOCB) next; /* next request */ |
Stefan Hajnoczi | 6e4f59b | 2012-02-07 13:27:27 +0000 | [diff] [blame] | 134 | int flags; /* QED_AIOCB_* bits ORed together */ |
Stefan Hajnoczi | eabba58 | 2010-12-06 16:08:02 +0000 | [diff] [blame] | 135 | uint64_t end_pos; /* request end on block device, in bytes */ |
| 136 | |
| 137 | /* User scatter-gather list */ |
| 138 | QEMUIOVector *qiov; |
| 139 | size_t qiov_offset; /* byte count already processed */ |
| 140 | |
| 141 | /* Current cluster scatter-gather list */ |
| 142 | QEMUIOVector cur_qiov; |
| 143 | uint64_t cur_pos; /* position on block device, in bytes */ |
| 144 | uint64_t cur_cluster; /* cluster offset in image file */ |
| 145 | unsigned int cur_nclusters; /* number of clusters being accessed */ |
| 146 | int find_cluster_ret; /* used for L1/L2 update */ |
| 147 | |
| 148 | QEDRequest request; |
| 149 | } QEDAIOCB; |
| 150 | |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 151 | typedef struct { |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 152 | BlockDriverState *bs; /* device */ |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 153 | |
Paolo Bonzini | 1f01e50 | 2017-06-29 15:27:47 +0200 | [diff] [blame] | 154 | /* Written only by an allocating write or the timer handler (the latter |
| 155 | * while allocating reqs are plugged). |
| 156 | */ |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 157 | QEDHeader header; /* always cpu-endian */ |
Paolo Bonzini | 1f01e50 | 2017-06-29 15:27:47 +0200 | [diff] [blame] | 158 | |
| 159 | /* Protected by table_lock. */ |
| 160 | CoMutex table_lock; |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 161 | QEDTable *l1_table; |
| 162 | L2TableCache l2_cache; /* l2 table cache */ |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 163 | uint32_t table_nelems; |
| 164 | uint32_t l1_shift; |
| 165 | uint32_t l2_shift; |
| 166 | uint32_t l2_mask; |
Paolo Bonzini | 1f01e50 | 2017-06-29 15:27:47 +0200 | [diff] [blame] | 167 | uint64_t file_size; /* length of image file, in bytes */ |
Stefan Hajnoczi | eabba58 | 2010-12-06 16:08:02 +0000 | [diff] [blame] | 168 | |
| 169 | /* Allocating write request queue */ |
Kevin Wolf | 0806c3b | 2016-11-18 15:32:17 +0100 | [diff] [blame] | 170 | QEDAIOCB *allocating_acb; |
| 171 | CoQueue allocating_write_reqs; |
Stefan Hajnoczi | 6f321e9 | 2011-05-09 16:45:40 +0100 | [diff] [blame] | 172 | bool allocating_write_reqs_plugged; |
| 173 | |
| 174 | /* Periodic flush and clear need check flag */ |
| 175 | QEMUTimer *need_check_timer; |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 176 | } BDRVQEDState; |
| 177 | |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 178 | enum { |
| 179 | QED_CLUSTER_FOUND, /* cluster found */ |
Anthony Liguori | 21df65b | 2010-12-17 15:58:22 +0000 | [diff] [blame] | 180 | QED_CLUSTER_ZERO, /* zero cluster found */ |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 181 | QED_CLUSTER_L2, /* cluster missing in L2 */ |
| 182 | QED_CLUSTER_L1, /* cluster missing in L1 */ |
| 183 | }; |
| 184 | |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 185 | /** |
Stefan Hajnoczi | b10170a | 2012-08-09 13:05:54 +0100 | [diff] [blame] | 186 | * Header functions |
| 187 | */ |
| 188 | int qed_write_header_sync(BDRVQEDState *s); |
| 189 | |
| 190 | /** |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 191 | * L2 cache functions |
| 192 | */ |
| 193 | void qed_init_l2_cache(L2TableCache *l2_cache); |
| 194 | void qed_free_l2_cache(L2TableCache *l2_cache); |
| 195 | CachedL2Table *qed_alloc_l2_cache_entry(L2TableCache *l2_cache); |
| 196 | void qed_unref_l2_cache_entry(CachedL2Table *entry); |
| 197 | CachedL2Table *qed_find_l2_cache_entry(L2TableCache *l2_cache, uint64_t offset); |
| 198 | void qed_commit_l2_cache_entry(L2TableCache *l2_cache, CachedL2Table *l2_table); |
| 199 | |
| 200 | /** |
| 201 | * Table I/O functions |
| 202 | */ |
Kevin Wolf | b9b10c3 | 2023-02-03 16:21:50 +0100 | [diff] [blame] | 203 | int coroutine_fn GRAPH_RDLOCK qed_read_l1_table_sync(BDRVQEDState *s); |
Emanuele Giuseppe Esposito | 8809534 | 2023-02-03 16:21:46 +0100 | [diff] [blame] | 204 | |
| 205 | int coroutine_fn GRAPH_RDLOCK |
| 206 | qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n); |
| 207 | |
| 208 | int coroutine_fn GRAPH_RDLOCK |
| 209 | qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, unsigned int n); |
| 210 | |
Kevin Wolf | b9b10c3 | 2023-02-03 16:21:50 +0100 | [diff] [blame] | 211 | int coroutine_fn GRAPH_RDLOCK |
| 212 | qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset); |
| 213 | |
| 214 | int coroutine_fn GRAPH_RDLOCK |
| 215 | qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset); |
Emanuele Giuseppe Esposito | 8809534 | 2023-02-03 16:21:46 +0100 | [diff] [blame] | 216 | |
| 217 | int coroutine_fn GRAPH_RDLOCK |
| 218 | qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, unsigned int index, |
| 219 | unsigned int n, bool flush); |
| 220 | |
| 221 | int coroutine_fn GRAPH_RDLOCK |
| 222 | qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, |
| 223 | unsigned int index, unsigned int n, bool flush); |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 224 | |
| 225 | /** |
| 226 | * Cluster functions |
| 227 | */ |
Kevin Wolf | b9b10c3 | 2023-02-03 16:21:50 +0100 | [diff] [blame] | 228 | int coroutine_fn GRAPH_RDLOCK |
| 229 | qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos, |
| 230 | size_t *len, uint64_t *img_offset); |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 231 | |
| 232 | /** |
| 233 | * Consistency check |
| 234 | */ |
Emanuele Giuseppe Esposito | 8809534 | 2023-02-03 16:21:46 +0100 | [diff] [blame] | 235 | int coroutine_fn GRAPH_RDLOCK |
| 236 | qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix); |
| 237 | |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 238 | QEDTable *qed_alloc_table(BDRVQEDState *s); |
| 239 | |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 240 | /** |
| 241 | * Round down to the start of a cluster |
| 242 | */ |
| 243 | static inline uint64_t qed_start_of_cluster(BDRVQEDState *s, uint64_t offset) |
| 244 | { |
| 245 | return offset & ~(uint64_t)(s->header.cluster_size - 1); |
| 246 | } |
| 247 | |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 248 | static inline uint64_t qed_offset_into_cluster(BDRVQEDState *s, uint64_t offset) |
| 249 | { |
| 250 | return offset & (s->header.cluster_size - 1); |
| 251 | } |
| 252 | |
Stefan Hajnoczi | 19dfc44 | 2011-04-24 18:38:58 +0100 | [diff] [blame] | 253 | static inline uint64_t qed_bytes_to_clusters(BDRVQEDState *s, uint64_t bytes) |
Stefan Hajnoczi | 298800c | 2010-12-06 16:08:01 +0000 | [diff] [blame] | 254 | { |
| 255 | return qed_start_of_cluster(s, bytes + (s->header.cluster_size - 1)) / |
| 256 | (s->header.cluster_size - 1); |
| 257 | } |
| 258 | |
| 259 | static inline unsigned int qed_l1_index(BDRVQEDState *s, uint64_t pos) |
| 260 | { |
| 261 | return pos >> s->l1_shift; |
| 262 | } |
| 263 | |
| 264 | static inline unsigned int qed_l2_index(BDRVQEDState *s, uint64_t pos) |
| 265 | { |
| 266 | return (pos >> s->l2_shift) & s->l2_mask; |
| 267 | } |
| 268 | |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 269 | /** |
| 270 | * Test if a cluster offset is valid |
| 271 | */ |
| 272 | static inline bool qed_check_cluster_offset(BDRVQEDState *s, uint64_t offset) |
| 273 | { |
| 274 | uint64_t header_size = (uint64_t)s->header.header_size * |
| 275 | s->header.cluster_size; |
| 276 | |
| 277 | if (offset & (s->header.cluster_size - 1)) { |
| 278 | return false; |
| 279 | } |
| 280 | return offset >= header_size && offset < s->file_size; |
| 281 | } |
| 282 | |
| 283 | /** |
| 284 | * Test if a table offset is valid |
| 285 | */ |
| 286 | static inline bool qed_check_table_offset(BDRVQEDState *s, uint64_t offset) |
| 287 | { |
| 288 | uint64_t end_offset = offset + (s->header.table_size - 1) * |
| 289 | s->header.cluster_size; |
| 290 | |
| 291 | /* Overflow check */ |
| 292 | if (end_offset <= offset) { |
| 293 | return false; |
| 294 | } |
| 295 | |
| 296 | return qed_check_cluster_offset(s, offset) && |
| 297 | qed_check_cluster_offset(s, end_offset); |
| 298 | } |
| 299 | |
Anthony Liguori | 21df65b | 2010-12-17 15:58:22 +0000 | [diff] [blame] | 300 | static inline bool qed_offset_is_cluster_aligned(BDRVQEDState *s, |
| 301 | uint64_t offset) |
| 302 | { |
| 303 | if (qed_offset_into_cluster(s, offset)) { |
| 304 | return false; |
| 305 | } |
| 306 | return true; |
| 307 | } |
| 308 | |
| 309 | static inline bool qed_offset_is_unalloc_cluster(uint64_t offset) |
| 310 | { |
| 311 | if (offset == 0) { |
| 312 | return true; |
| 313 | } |
| 314 | return false; |
| 315 | } |
| 316 | |
| 317 | static inline bool qed_offset_is_zero_cluster(uint64_t offset) |
| 318 | { |
| 319 | if (offset == 1) { |
| 320 | return true; |
| 321 | } |
| 322 | return false; |
| 323 | } |
| 324 | |
Stefan Hajnoczi | 75411d2 | 2010-12-06 16:08:00 +0000 | [diff] [blame] | 325 | #endif /* BLOCK_QED_H */ |