blob: 3b588b139f69a347c497a42feb72e827880bcc65 [file] [log] [blame]
Fam Zhengbdd6a902018-01-16 14:08:55 +08001/*
2 * NVMe block driver based on vfio
3 *
4 * Copyright 2016 - 2018 Red Hat, Inc.
5 *
6 * Authors:
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14#include "qemu/osdep.h"
15#include <linux/vfio.h>
16#include "qapi/error.h"
17#include "qapi/qmp/qdict.h"
18#include "qapi/qmp/qstring.h"
Stefan Hajnoczi433fcea2023-09-13 16:00:43 -040019#include "qemu/defer-call.h"
Fam Zhengbdd6a902018-01-16 14:08:55 +080020#include "qemu/error-report.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +020021#include "qemu/main-loop.h"
Markus Armbruster0b8fa322019-05-23 16:35:07 +020022#include "qemu/module.h"
Fam Zhengbdd6a902018-01-16 14:08:55 +080023#include "qemu/cutils.h"
Markus Armbruster922a01a2018-02-01 12:18:46 +010024#include "qemu/option.h"
Peter Maydell5df022c2022-02-26 18:07:23 +000025#include "qemu/memalign.h"
Fam Zhengbdd6a902018-01-16 14:08:55 +080026#include "qemu/vfio-helpers.h"
Markus Armbrustere2c1c342022-12-21 14:35:49 +010027#include "block/block-io.h"
Fam Zhengbdd6a902018-01-16 14:08:55 +080028#include "block/block_int.h"
Stefan Hajnoczif2e59002023-05-30 14:09:55 -040029#include "sysemu/block-backend.h"
Pavel Dovgalyuke4ec5ad2019-09-17 14:58:19 +030030#include "sysemu/replay.h"
Fam Zhengbdd6a902018-01-16 14:08:55 +080031#include "trace.h"
32
Fam Zhenga3d9a352018-01-16 14:08:59 +080033#include "block/nvme.h"
Fam Zhengbdd6a902018-01-16 14:08:55 +080034
35#define NVME_SQ_ENTRY_BYTES 64
36#define NVME_CQ_ENTRY_BYTES 16
37#define NVME_QUEUE_SIZE 128
Philippe Mathieu-Daudéf6845322020-09-22 10:38:17 +020038#define NVME_DOORBELL_SIZE 4096
Fam Zhengbdd6a902018-01-16 14:08:55 +080039
Stefan Hajnoczi1086e952020-06-17 14:21:58 +010040/*
41 * We have to leave one slot empty as that is the full queue case where
42 * head == tail + 1.
43 */
44#define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
45
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +010046typedef struct BDRVNVMeState BDRVNVMeState;
47
Philippe Mathieu-Daudé3214b0f2020-10-29 10:32:49 +010048/* Same index is used for queues and IRQs */
49#define INDEX_ADMIN 0
50#define INDEX_IO(n) (1 + n)
51
52/* This driver shares a single MSIX IRQ for the admin and I/O queues */
53enum {
54 MSIX_SHARED_IRQ_IDX = 0,
55 MSIX_IRQ_COUNT = 1
56};
57
Fam Zhengbdd6a902018-01-16 14:08:55 +080058typedef struct {
59 int32_t head, tail;
60 uint8_t *queue;
61 uint64_t iova;
62 /* Hardware MMIO register */
63 volatile uint32_t *doorbell;
64} NVMeQueue;
65
66typedef struct {
67 BlockCompletionFunc *cb;
68 void *opaque;
69 int cid;
70 void *prp_list_page;
71 uint64_t prp_list_iova;
Stefan Hajnoczi1086e952020-06-17 14:21:58 +010072 int free_req_next; /* q->reqs[] index of next free req */
Fam Zhengbdd6a902018-01-16 14:08:55 +080073} NVMeRequest;
74
75typedef struct {
Fam Zhengbdd6a902018-01-16 14:08:55 +080076 QemuMutex lock;
77
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +010078 /* Read from I/O code path, initialized under BQL */
79 BDRVNVMeState *s;
80 int index;
81
Fam Zhengbdd6a902018-01-16 14:08:55 +080082 /* Fields protected by BQL */
Fam Zhengbdd6a902018-01-16 14:08:55 +080083 uint8_t *prp_list_pages;
84
85 /* Fields protected by @lock */
Stefan Hajnoczia5db74f2020-06-17 14:21:59 +010086 CoQueue free_req_queue;
Fam Zhengbdd6a902018-01-16 14:08:55 +080087 NVMeQueue sq, cq;
88 int cq_phase;
Stefan Hajnoczi1086e952020-06-17 14:21:58 +010089 int free_req_head;
90 NVMeRequest reqs[NVME_NUM_REQS];
Fam Zhengbdd6a902018-01-16 14:08:55 +080091 int need_kick;
92 int inflight;
Stefan Hajnoczi7838c672020-06-17 14:22:01 +010093
94 /* Thread-safe, no lock necessary */
95 QEMUBH *completion_bh;
Fam Zhengbdd6a902018-01-16 14:08:55 +080096} NVMeQueuePair;
97
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +010098struct BDRVNVMeState {
Fam Zhengbdd6a902018-01-16 14:08:55 +080099 AioContext *aio_context;
100 QEMUVFIOState *vfio;
Philippe Mathieu-Daudé4b19e9b2020-10-29 10:33:04 +0100101 void *bar0_wo_map;
Philippe Mathieu-Daudéf6845322020-09-22 10:38:17 +0200102 /* Memory mapped registers */
103 volatile struct {
104 uint32_t sq_tail;
105 uint32_t cq_head;
106 } *doorbells;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800107 /* The submission/completion queue pairs.
108 * [0]: admin queue.
109 * [1..]: io queues.
110 */
111 NVMeQueuePair **queues;
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100112 unsigned queue_count;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800113 size_t page_size;
114 /* How many uint32_t elements does each doorbell entry take. */
115 size_t doorbell_scale;
116 bool write_cache_supported;
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200117 EventNotifier irq_notifier[MSIX_IRQ_COUNT];
Maxim Levitsky118d1b62019-07-16 19:30:19 +0300118
Fam Zhengbdd6a902018-01-16 14:08:55 +0800119 uint64_t nsze; /* Namespace size reported by identify command */
120 int nsid; /* The namespace id to read/write data. */
Max Reitz11204072019-07-30 13:48:12 +0200121 int blkshift;
Maxim Levitsky118d1b62019-07-16 19:30:19 +0300122
Fam Zhengbdd6a902018-01-16 14:08:55 +0800123 uint64_t max_transfer;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800124
Maxim Levitskye0dd95e2019-09-13 16:36:26 +0300125 bool supports_write_zeroes;
Maxim Levitskye87a09d2019-09-13 16:36:27 +0300126 bool supports_discard;
Maxim Levitskye0dd95e2019-09-13 16:36:26 +0300127
Fam Zhengbdd6a902018-01-16 14:08:55 +0800128 CoMutex dma_map_lock;
129 CoQueue dma_flush_queue;
130
131 /* Total size of mapped qiov, accessed under dma_map_lock */
132 int dma_map_count;
Max Reitzcc61b072019-02-01 20:29:30 +0100133
134 /* PCI address (required for nvme_refresh_filename()) */
135 char *device;
Philippe Mathieu-Daudéf25e7ab2020-10-01 18:29:39 +0200136
137 struct {
138 uint64_t completion_errors;
139 uint64_t aligned_accesses;
140 uint64_t unaligned_accesses;
141 } stats;
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100142};
Fam Zhengbdd6a902018-01-16 14:08:55 +0800143
144#define NVME_BLOCK_OPT_DEVICE "device"
145#define NVME_BLOCK_OPT_NAMESPACE "namespace"
146
Stefan Hajnoczi7838c672020-06-17 14:22:01 +0100147static void nvme_process_completion_bh(void *opaque);
148
Fam Zhengbdd6a902018-01-16 14:08:55 +0800149static QemuOptsList runtime_opts = {
150 .name = "nvme",
151 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
152 .desc = {
153 {
154 .name = NVME_BLOCK_OPT_DEVICE,
155 .type = QEMU_OPT_STRING,
156 .help = "NVMe PCI device address",
157 },
158 {
159 .name = NVME_BLOCK_OPT_NAMESPACE,
160 .type = QEMU_OPT_NUMBER,
161 .help = "NVMe namespace",
162 },
163 { /* end of list */ }
164 },
165};
166
Philippe Mathieu-Daudédfa9c6c2020-10-29 10:32:52 +0100167/* Returns true on success, false on failure. */
168static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100169 unsigned nentries, size_t entry_bytes, Error **errp)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800170{
Zhao Liued462172024-03-11 11:37:59 +0800171 ERRP_GUARD();
Fam Zhengbdd6a902018-01-16 14:08:55 +0800172 size_t bytes;
173 int r;
174
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +0400175 bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size());
Fam Zhengbdd6a902018-01-16 14:08:55 +0800176 q->head = q->tail = 0;
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +0400177 q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800178 if (!q->queue) {
179 error_setg(errp, "Cannot allocate queue");
Philippe Mathieu-Daudédfa9c6c2020-10-29 10:32:52 +0100180 return false;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800181 }
Philippe Mathieu-Daudé2ed84692020-08-21 21:53:53 +0200182 memset(q->queue, 0, bytes);
Philippe Mathieu-Daudé521b97c2021-09-02 09:00:20 +0200183 r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800184 if (r) {
Philippe Mathieu-Daudé521b97c2021-09-02 09:00:20 +0200185 error_prepend(errp, "Cannot map queue: ");
Fam Zhengbdd6a902018-01-16 14:08:55 +0800186 }
Philippe Mathieu-Daudé521b97c2021-09-02 09:00:20 +0200187 return r == 0;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800188}
189
Philippe Mathieu-Daudéa8951432021-10-06 18:49:29 +0200190static void nvme_free_queue(NVMeQueue *q)
191{
192 qemu_vfree(q->queue);
193}
194
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100195static void nvme_free_queue_pair(NVMeQueuePair *q)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800196{
Philippe Mathieu-Daudé53cedea2021-10-06 18:49:28 +0200197 trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq);
Stefan Hajnoczi7838c672020-06-17 14:22:01 +0100198 if (q->completion_bh) {
199 qemu_bh_delete(q->completion_bh);
200 }
Philippe Mathieu-Daudéa8951432021-10-06 18:49:29 +0200201 nvme_free_queue(&q->sq);
202 nvme_free_queue(&q->cq);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800203 qemu_vfree(q->prp_list_pages);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800204 qemu_mutex_destroy(&q->lock);
205 g_free(q);
206}
207
208static void nvme_free_req_queue_cb(void *opaque)
209{
210 NVMeQueuePair *q = opaque;
211
212 qemu_mutex_lock(&q->lock);
Stefan Hajnoczicf4fbc32021-12-08 15:22:46 +0000213 while (q->free_req_head != -1 &&
214 qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
215 /* Retry waiting requests */
Fam Zhengbdd6a902018-01-16 14:08:55 +0800216 }
217 qemu_mutex_unlock(&q->lock);
218}
219
Philippe Mathieu-Daudé0a28b022020-08-21 21:53:57 +0200220static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
221 AioContext *aio_context,
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100222 unsigned idx, size_t size,
Fam Zhengbdd6a902018-01-16 14:08:55 +0800223 Error **errp)
224{
Zhao Liued462172024-03-11 11:37:59 +0800225 ERRP_GUARD();
Fam Zhengbdd6a902018-01-16 14:08:55 +0800226 int i, r;
Philippe Mathieu-Daudé0ea45f72020-08-21 21:53:47 +0200227 NVMeQueuePair *q;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800228 uint64_t prp_list_iova;
Eric Augerf8fd3eb2020-10-29 10:33:02 +0100229 size_t bytes;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800230
Philippe Mathieu-Daudé0ea45f72020-08-21 21:53:47 +0200231 q = g_try_new0(NVMeQueuePair, 1);
232 if (!q) {
Philippe Mathieu-Daudé526c37c2021-09-02 09:00:19 +0200233 error_setg(errp, "Cannot allocate queue pair");
Philippe Mathieu-Daudé0ea45f72020-08-21 21:53:47 +0200234 return NULL;
235 }
Philippe Mathieu-Daudé6e1e9ff2020-10-29 10:32:48 +0100236 trace_nvme_create_queue_pair(idx, q, size, aio_context,
237 event_notifier_get_fd(s->irq_notifier));
Eric Augerf8fd3eb2020-10-29 10:33:02 +0100238 bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +0400239 qemu_real_host_page_size());
240 q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes);
Philippe Mathieu-Daudé0ea45f72020-08-21 21:53:47 +0200241 if (!q->prp_list_pages) {
Philippe Mathieu-Daudé526c37c2021-09-02 09:00:19 +0200242 error_setg(errp, "Cannot allocate PRP page list");
Philippe Mathieu-Daudé0ea45f72020-08-21 21:53:47 +0200243 goto fail;
244 }
Eric Augerf8fd3eb2020-10-29 10:33:02 +0100245 memset(q->prp_list_pages, 0, bytes);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800246 qemu_mutex_init(&q->lock);
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100247 q->s = s;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800248 q->index = idx;
249 qemu_co_queue_init(&q->free_req_queue);
Philippe Mathieu-Daudé0a28b022020-08-21 21:53:57 +0200250 q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
Eric Augerf8fd3eb2020-10-29 10:33:02 +0100251 r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
Philippe Mathieu-Daudé521b97c2021-09-02 09:00:20 +0200252 false, &prp_list_iova, errp);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800253 if (r) {
Philippe Mathieu-Daudé521b97c2021-09-02 09:00:20 +0200254 error_prepend(errp, "Cannot map buffer for DMA: ");
Fam Zhengbdd6a902018-01-16 14:08:55 +0800255 goto fail;
256 }
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100257 q->free_req_head = -1;
258 for (i = 0; i < NVME_NUM_REQS; i++) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800259 NVMeRequest *req = &q->reqs[i];
260 req->cid = i + 1;
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100261 req->free_req_next = q->free_req_head;
262 q->free_req_head = i;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800263 req->prp_list_page = q->prp_list_pages + i * s->page_size;
264 req->prp_list_iova = prp_list_iova + i * s->page_size;
265 }
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100266
Philippe Mathieu-Daudédfa9c6c2020-10-29 10:32:52 +0100267 if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800268 goto fail;
269 }
Philippe Mathieu-Daudéf6845322020-09-22 10:38:17 +0200270 q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800271
Philippe Mathieu-Daudédfa9c6c2020-10-29 10:32:52 +0100272 if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800273 goto fail;
274 }
Philippe Mathieu-Daudéf6845322020-09-22 10:38:17 +0200275 q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800276
277 return q;
278fail:
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100279 nvme_free_queue_pair(q);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800280 return NULL;
281}
282
283/* With q->lock */
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100284static void nvme_kick(NVMeQueuePair *q)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800285{
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100286 BDRVNVMeState *s = q->s;
287
Stefan Hajnoczif2e59002023-05-30 14:09:55 -0400288 if (!q->need_kick) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800289 return;
290 }
291 trace_nvme_kick(s, q->index);
292 assert(!(q->sq.tail & 0xFF00));
293 /* Fence the write to submission queue entry before notifying the device. */
294 smp_wmb();
295 *q->sq.doorbell = cpu_to_le32(q->sq.tail);
296 q->inflight += q->need_kick;
297 q->need_kick = 0;
298}
299
Paolo Bonzini82c45372022-09-22 10:48:59 +0200300static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800301{
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100302 NVMeRequest *req;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800303
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100304 req = &q->reqs[q->free_req_head];
305 q->free_req_head = req->free_req_next;
306 req->free_req_next = -1;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800307 return req;
308}
309
Paolo Bonzini82c45372022-09-22 10:48:59 +0200310/* Return a free request element if any, otherwise return NULL. */
311static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
312{
313 QEMU_LOCK_GUARD(&q->lock);
314 if (q->free_req_head == -1) {
315 return NULL;
316 }
317 return nvme_get_free_req_nofail_locked(q);
318}
319
320/*
321 * Wait for a free request to become available if necessary, then
322 * return it.
323 */
324static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
325{
326 QEMU_LOCK_GUARD(&q->lock);
327
328 while (q->free_req_head == -1) {
329 trace_nvme_free_req_queue_wait(q->s, q->index);
330 qemu_co_queue_wait(&q->free_req_queue, &q->lock);
331 }
332
333 return nvme_get_free_req_nofail_locked(q);
334}
335
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100336/* With q->lock */
337static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
338{
339 req->free_req_next = q->free_req_head;
340 q->free_req_head = req - q->reqs;
341}
342
343/* With q->lock */
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100344static void nvme_wake_free_req_locked(NVMeQueuePair *q)
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100345{
346 if (!qemu_co_queue_empty(&q->free_req_queue)) {
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100347 replay_bh_schedule_oneshot_event(q->s->aio_context,
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100348 nvme_free_req_queue_cb, q);
349 }
350}
351
352/* Insert a request in the freelist and wake waiters */
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100353static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100354{
355 qemu_mutex_lock(&q->lock);
356 nvme_put_free_req_locked(q, req);
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100357 nvme_wake_free_req_locked(q);
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100358 qemu_mutex_unlock(&q->lock);
359}
360
Fam Zhengbdd6a902018-01-16 14:08:55 +0800361static inline int nvme_translate_error(const NvmeCqe *c)
362{
363 uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
364 if (status) {
365 trace_nvme_error(le32_to_cpu(c->result),
366 le16_to_cpu(c->sq_head),
367 le16_to_cpu(c->sq_id),
368 le16_to_cpu(c->cid),
369 le16_to_cpu(status));
370 }
371 switch (status) {
372 case 0:
373 return 0;
374 case 1:
375 return -ENOSYS;
376 case 2:
377 return -EINVAL;
378 default:
379 return -EIO;
380 }
381}
382
383/* With q->lock */
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100384static bool nvme_process_completion(NVMeQueuePair *q)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800385{
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100386 BDRVNVMeState *s = q->s;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800387 bool progress = false;
388 NVMeRequest *preq;
389 NVMeRequest req;
390 NvmeCqe *c;
391
392 trace_nvme_process_completion(s, q->index, q->inflight);
Stefan Hajnoczi7838c672020-06-17 14:22:01 +0100393
394 /*
395 * Support re-entrancy when a request cb() function invokes aio_poll().
396 * Pending completions must be visible to aio_poll() so that a cb()
397 * function can wait for the completion of another request.
398 *
399 * The aio_poll() loop will execute our BH and we'll resume completion
400 * processing there.
401 */
402 qemu_bh_schedule(q->completion_bh);
403
Fam Zhengbdd6a902018-01-16 14:08:55 +0800404 assert(q->inflight >= 0);
405 while (q->inflight) {
Stefan Hajnoczi04b3fb32020-06-17 14:21:57 +0100406 int ret;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800407 int16_t cid;
Stefan Hajnoczi04b3fb32020-06-17 14:21:57 +0100408
Fam Zhengbdd6a902018-01-16 14:08:55 +0800409 c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
Maxim Levitsky258867d2019-07-16 19:30:20 +0300410 if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800411 break;
412 }
Stefan Hajnoczi04b3fb32020-06-17 14:21:57 +0100413 ret = nvme_translate_error(c);
Philippe Mathieu-Daudéf25e7ab2020-10-01 18:29:39 +0200414 if (ret) {
415 s->stats.completion_errors++;
416 }
Fam Zhengbdd6a902018-01-16 14:08:55 +0800417 q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
418 if (!q->cq.head) {
419 q->cq_phase = !q->cq_phase;
420 }
421 cid = le16_to_cpu(c->cid);
Vladimir Sementsov-Ogievskiycc8fb0c2023-11-06 15:00:28 +0000422 if (cid == 0 || cid > NVME_NUM_REQS) {
423 warn_report("NVMe: Unexpected CID in completion queue: %" PRIu32
424 ", should be within: 1..%u inclusively", cid,
425 NVME_NUM_REQS);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800426 continue;
427 }
Fam Zhengbdd6a902018-01-16 14:08:55 +0800428 trace_nvme_complete_command(s, q->index, cid);
429 preq = &q->reqs[cid - 1];
430 req = *preq;
431 assert(req.cid == cid);
432 assert(req.cb);
Stefan Hajnoczi1086e952020-06-17 14:21:58 +0100433 nvme_put_free_req_locked(q, preq);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800434 preq->cb = preq->opaque = NULL;
Stefan Hajnoczi7838c672020-06-17 14:22:01 +0100435 q->inflight--;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800436 qemu_mutex_unlock(&q->lock);
Stefan Hajnoczi04b3fb32020-06-17 14:21:57 +0100437 req.cb(req.opaque, ret);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800438 qemu_mutex_lock(&q->lock);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800439 progress = true;
440 }
441 if (progress) {
442 /* Notify the device so it can post more completions. */
443 smp_mb_release();
444 *q->cq.doorbell = cpu_to_le32(q->cq.head);
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100445 nvme_wake_free_req_locked(q);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800446 }
Stefan Hajnoczi7838c672020-06-17 14:22:01 +0100447
448 qemu_bh_cancel(q->completion_bh);
449
Fam Zhengbdd6a902018-01-16 14:08:55 +0800450 return progress;
451}
452
Stefan Hajnoczi7838c672020-06-17 14:22:01 +0100453static void nvme_process_completion_bh(void *opaque)
454{
455 NVMeQueuePair *q = opaque;
456
457 /*
458 * We're being invoked because a nvme_process_completion() cb() function
459 * called aio_poll(). The callback may be waiting for further completions
460 * so notify the device that it has space to fill in more completions now.
461 */
462 smp_mb_release();
463 *q->cq.doorbell = cpu_to_le32(q->cq.head);
464 nvme_wake_free_req_locked(q);
465
466 nvme_process_completion(q);
467}
468
Fam Zhengbdd6a902018-01-16 14:08:55 +0800469static void nvme_trace_command(const NvmeCmd *cmd)
470{
471 int i;
472
Philippe Mathieu-Daudée266f522020-08-21 21:53:46 +0200473 if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW)) {
474 return;
475 }
Fam Zhengbdd6a902018-01-16 14:08:55 +0800476 for (i = 0; i < 8; ++i) {
477 uint8_t *cmdp = (uint8_t *)cmd + i * 8;
478 trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
479 cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
480 }
481}
482
Stefan Hajnocziccee48a2023-09-13 16:00:42 -0400483static void nvme_deferred_fn(void *opaque)
Stefan Hajnoczif2e59002023-05-30 14:09:55 -0400484{
485 NVMeQueuePair *q = opaque;
486
487 QEMU_LOCK_GUARD(&q->lock);
488 nvme_kick(q);
489 nvme_process_completion(q);
490}
491
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100492static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
Fam Zhengbdd6a902018-01-16 14:08:55 +0800493 NvmeCmd *cmd, BlockCompletionFunc cb,
494 void *opaque)
495{
496 assert(!req->cb);
497 req->cb = cb;
498 req->opaque = opaque;
Philippe Mathieu-Daudéa0546a72020-10-29 10:33:05 +0100499 cmd->cid = cpu_to_le16(req->cid);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800500
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100501 trace_nvme_submit_command(q->s, q->index, req->cid);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800502 nvme_trace_command(cmd);
503 qemu_mutex_lock(&q->lock);
504 memcpy((uint8_t *)q->sq.queue +
505 q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
506 q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
507 q->need_kick++;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800508 qemu_mutex_unlock(&q->lock);
Stefan Hajnoczi66547f42023-07-12 15:16:28 -0400509
Stefan Hajnocziccee48a2023-09-13 16:00:42 -0400510 defer_call(nvme_deferred_fn, q);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800511}
512
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100513static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800514{
515 int *pret = opaque;
516 *pret = ret;
Kevin Wolf4720cbe2019-01-07 13:02:48 +0100517 aio_wait_kick();
Fam Zhengbdd6a902018-01-16 14:08:55 +0800518}
519
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100520static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800521{
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100522 BDRVNVMeState *s = bs->opaque;
523 NVMeQueuePair *q = s->queues[INDEX_ADMIN];
Philippe Mathieu-Daudé073a0692020-08-21 21:53:56 +0200524 AioContext *aio_context = bdrv_get_aio_context(bs);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800525 NVMeRequest *req;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800526 int ret = -EINPROGRESS;
Paolo Bonzini82c45372022-09-22 10:48:59 +0200527 req = nvme_get_free_req_nowait(q);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800528 if (!req) {
529 return -EBUSY;
530 }
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100531 nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800532
Philippe Mathieu-Daudé073a0692020-08-21 21:53:56 +0200533 AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800534 return ret;
535}
536
Philippe Mathieu-Daudé7a5f00d2020-10-29 10:32:51 +0100537/* Returns true on success, false on failure. */
538static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800539{
Zhao Liued462172024-03-11 11:37:59 +0800540 ERRP_GUARD();
Fam Zhengbdd6a902018-01-16 14:08:55 +0800541 BDRVNVMeState *s = bs->opaque;
Philippe Mathieu-Daudé7a5f00d2020-10-29 10:32:51 +0100542 bool ret = false;
Philippe Mathieu-Daudé4a613bd2021-10-06 18:49:27 +0200543 QEMU_AUTO_VFREE union {
Philippe Mathieu-Daudé7d3b2142020-08-21 21:53:52 +0200544 NvmeIdCtrl ctrl;
545 NvmeIdNs ns;
Philippe Mathieu-Daudé4a613bd2021-10-06 18:49:27 +0200546 } *id = NULL;
Maxim Levitsky118d1b62019-07-16 19:30:19 +0300547 NvmeLBAF *lbaf;
Maxim Levitskye0dd95e2019-09-13 16:36:26 +0300548 uint16_t oncs;
Max Reitz11204072019-07-30 13:48:12 +0200549 int r;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800550 uint64_t iova;
551 NvmeCmd cmd = {
552 .opcode = NVME_ADM_CMD_IDENTIFY,
553 .cdw10 = cpu_to_le32(0x1),
554 };
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +0400555 size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size());
Fam Zhengbdd6a902018-01-16 14:08:55 +0800556
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +0400557 id = qemu_try_memalign(qemu_real_host_page_size(), id_size);
Philippe Mathieu-Daudé4d980932020-08-21 21:53:51 +0200558 if (!id) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800559 error_setg(errp, "Cannot allocate buffer for identify response");
560 goto out;
561 }
Philippe Mathieu-Daudé521b97c2021-09-02 09:00:20 +0200562 r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800563 if (r) {
Philippe Mathieu-Daudé521b97c2021-09-02 09:00:20 +0200564 error_prepend(errp, "Cannot map buffer for DMA: ");
Fam Zhengbdd6a902018-01-16 14:08:55 +0800565 goto out;
566 }
Fam Zhengbdd6a902018-01-16 14:08:55 +0800567
Eric Auger0aecd062020-10-29 10:33:00 +0100568 memset(id, 0, id_size);
Philippe Mathieu-Daudé2ed84692020-08-21 21:53:53 +0200569 cmd.dptr.prp1 = cpu_to_le64(iova);
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100570 if (nvme_admin_cmd_sync(bs, &cmd)) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800571 error_setg(errp, "Failed to identify controller");
572 goto out;
573 }
574
Philippe Mathieu-Daudé7d3b2142020-08-21 21:53:52 +0200575 if (le32_to_cpu(id->ctrl.nn) < namespace) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800576 error_setg(errp, "Invalid namespace");
577 goto out;
578 }
Philippe Mathieu-Daudé7d3b2142020-08-21 21:53:52 +0200579 s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1;
580 s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800581 /* For now the page list buffer per command is one page, to hold at most
582 * s->page_size / sizeof(uint64_t) entries. */
583 s->max_transfer = MIN_NON_ZERO(s->max_transfer,
584 s->page_size / sizeof(uint64_t) * s->page_size);
585
Philippe Mathieu-Daudé7d3b2142020-08-21 21:53:52 +0200586 oncs = le16_to_cpu(id->ctrl.oncs);
Klaus Jensen69265152020-03-30 23:10:13 +0200587 s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES);
Maxim Levitskye87a09d2019-09-13 16:36:27 +0300588 s->supports_discard = !!(oncs & NVME_ONCS_DSM);
Maxim Levitskye0dd95e2019-09-13 16:36:26 +0300589
Eric Auger0aecd062020-10-29 10:33:00 +0100590 memset(id, 0, id_size);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800591 cmd.cdw10 = 0;
592 cmd.nsid = cpu_to_le32(namespace);
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100593 if (nvme_admin_cmd_sync(bs, &cmd)) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800594 error_setg(errp, "Failed to identify namespace");
595 goto out;
596 }
597
Philippe Mathieu-Daudé7d3b2142020-08-21 21:53:52 +0200598 s->nsze = le64_to_cpu(id->ns.nsze);
599 lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)];
Fam Zhengbdd6a902018-01-16 14:08:55 +0800600
Philippe Mathieu-Daudé7d3b2142020-08-21 21:53:52 +0200601 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) &&
602 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) ==
Maxim Levitskye0dd95e2019-09-13 16:36:26 +0300603 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) {
604 bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP;
605 }
606
Maxim Levitsky118d1b62019-07-16 19:30:19 +0300607 if (lbaf->ms) {
608 error_setg(errp, "Namespaces with metadata are not yet supported");
609 goto out;
610 }
611
Max Reitz11204072019-07-30 13:48:12 +0200612 if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 ||
613 (1 << lbaf->ds) > s->page_size)
614 {
615 error_setg(errp, "Namespace has unsupported block size (2^%d)",
616 lbaf->ds);
Maxim Levitsky118d1b62019-07-16 19:30:19 +0300617 goto out;
618 }
619
Philippe Mathieu-Daudé7a5f00d2020-10-29 10:32:51 +0100620 ret = true;
Maxim Levitsky118d1b62019-07-16 19:30:19 +0300621 s->blkshift = lbaf->ds;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800622out:
Philippe Mathieu-Daudé4d980932020-08-21 21:53:51 +0200623 qemu_vfio_dma_unmap(s->vfio, id);
Philippe Mathieu-Daudé7a5f00d2020-10-29 10:32:51 +0100624
625 return ret;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800626}
627
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000628static void nvme_poll_queue(NVMeQueuePair *q)
Philippe Mathieu-Daudé7a1fb2e2020-08-21 21:53:58 +0200629{
Philippe Mathieu-Daudé7a1fb2e2020-08-21 21:53:58 +0200630 const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
631 NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
632
Philippe Mathieu-Daudé1c914cd2020-10-29 10:32:46 +0100633 trace_nvme_poll_queue(q->s, q->index);
Philippe Mathieu-Daudé7a1fb2e2020-08-21 21:53:58 +0200634 /*
635 * Do an early check for completions. q->lock isn't needed because
636 * nvme_process_completion() only runs in the event loop thread and
637 * cannot race with itself.
638 */
639 if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000640 return;
Philippe Mathieu-Daudé7a1fb2e2020-08-21 21:53:58 +0200641 }
642
643 qemu_mutex_lock(&q->lock);
644 while (nvme_process_completion(q)) {
645 /* Keep polling */
Philippe Mathieu-Daudé7a1fb2e2020-08-21 21:53:58 +0200646 }
647 qemu_mutex_unlock(&q->lock);
Philippe Mathieu-Daudé7a1fb2e2020-08-21 21:53:58 +0200648}
649
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000650static void nvme_poll_queues(BDRVNVMeState *s)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800651{
Fam Zhengbdd6a902018-01-16 14:08:55 +0800652 int i;
653
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100654 for (i = 0; i < s->queue_count; i++) {
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000655 nvme_poll_queue(s->queues[i]);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800656 }
Fam Zhengbdd6a902018-01-16 14:08:55 +0800657}
658
659static void nvme_handle_event(EventNotifier *n)
660{
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200661 BDRVNVMeState *s = container_of(n, BDRVNVMeState,
662 irq_notifier[MSIX_SHARED_IRQ_IDX]);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800663
664 trace_nvme_handle_event(s);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800665 event_notifier_test_and_clear(n);
666 nvme_poll_queues(s);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800667}
668
669static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
670{
671 BDRVNVMeState *s = bs->opaque;
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100672 unsigned n = s->queue_count;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800673 NVMeQueuePair *q;
674 NvmeCmd cmd;
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100675 unsigned queue_size = NVME_QUEUE_SIZE;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800676
Philippe Mathieu-Daudé76a24782020-10-29 10:32:54 +0100677 assert(n <= UINT16_MAX);
Philippe Mathieu-Daudé0a28b022020-08-21 21:53:57 +0200678 q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
679 n, queue_size, errp);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800680 if (!q) {
681 return false;
682 }
683 cmd = (NvmeCmd) {
684 .opcode = NVME_ADM_CMD_CREATE_CQ,
Klaus Jensenc26f2172020-07-06 08:12:46 +0200685 .dptr.prp1 = cpu_to_le64(q->cq.iova),
Philippe Mathieu-Daudé76a24782020-10-29 10:32:54 +0100686 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
687 .cdw11 = cpu_to_le32(NVME_CQ_IEN | NVME_CQ_PC),
Fam Zhengbdd6a902018-01-16 14:08:55 +0800688 };
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100689 if (nvme_admin_cmd_sync(bs, &cmd)) {
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100690 error_setg(errp, "Failed to create CQ io queue [%u]", n);
Philippe Mathieu-Daudéc8edbfb2020-08-21 21:53:50 +0200691 goto out_error;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800692 }
693 cmd = (NvmeCmd) {
694 .opcode = NVME_ADM_CMD_CREATE_SQ,
Klaus Jensenc26f2172020-07-06 08:12:46 +0200695 .dptr.prp1 = cpu_to_le64(q->sq.iova),
Philippe Mathieu-Daudé76a24782020-10-29 10:32:54 +0100696 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
697 .cdw11 = cpu_to_le32(NVME_SQ_PC | (n << 16)),
Fam Zhengbdd6a902018-01-16 14:08:55 +0800698 };
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100699 if (nvme_admin_cmd_sync(bs, &cmd)) {
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100700 error_setg(errp, "Failed to create SQ io queue [%u]", n);
Philippe Mathieu-Daudéc8edbfb2020-08-21 21:53:50 +0200701 goto out_error;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800702 }
703 s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
704 s->queues[n] = q;
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100705 s->queue_count++;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800706 return true;
Philippe Mathieu-Daudéc8edbfb2020-08-21 21:53:50 +0200707out_error:
708 nvme_free_queue_pair(q);
709 return false;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800710}
711
712static bool nvme_poll_cb(void *opaque)
713{
714 EventNotifier *e = opaque;
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200715 BDRVNVMeState *s = container_of(e, BDRVNVMeState,
716 irq_notifier[MSIX_SHARED_IRQ_IDX]);
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000717 int i;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800718
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000719 for (i = 0; i < s->queue_count; i++) {
720 NVMeQueuePair *q = s->queues[i];
721 const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
722 NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
723
724 /*
725 * q->lock isn't needed because nvme_process_completion() only runs in
726 * the event loop thread and cannot race with itself.
727 */
728 if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) {
729 return true;
730 }
731 }
732 return false;
733}
734
735static void nvme_poll_ready(EventNotifier *e)
736{
737 BDRVNVMeState *s = container_of(e, BDRVNVMeState,
738 irq_notifier[MSIX_SHARED_IRQ_IDX]);
739
740 nvme_poll_queues(s);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800741}
742
743static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
744 Error **errp)
745{
746 BDRVNVMeState *s = bs->opaque;
Philippe Mathieu-Daudé52b75ea2020-10-29 10:32:56 +0100747 NVMeQueuePair *q;
Philippe Mathieu-Daudé0a28b022020-08-21 21:53:57 +0200748 AioContext *aio_context = bdrv_get_aio_context(bs);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800749 int ret;
750 uint64_t cap;
Philippe Mathieu-Daudéfcc86722021-01-27 22:21:37 +0100751 uint32_t ver;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800752 uint64_t timeout_ms;
753 uint64_t deadline, now;
Philippe Mathieu-Daudé9406e0d2020-09-22 10:38:19 +0200754 volatile NvmeBar *regs = NULL;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800755
756 qemu_co_mutex_init(&s->dma_map_lock);
757 qemu_co_queue_init(&s->dma_flush_queue);
Max Reitzcc61b072019-02-01 20:29:30 +0100758 s->device = g_strdup(device);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800759 s->nsid = namespace;
760 s->aio_context = bdrv_get_aio_context(bs);
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200761 ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800762 if (ret) {
763 error_setg(errp, "Failed to init event notifier");
764 return ret;
765 }
766
767 s->vfio = qemu_vfio_open_pci(device, errp);
768 if (!s->vfio) {
769 ret = -EINVAL;
Fam Zheng9582f352018-07-12 10:54:20 +0800770 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800771 }
772
Philippe Mathieu-Daudé37d7a452020-09-22 10:38:18 +0200773 regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
774 PROT_READ | PROT_WRITE, errp);
775 if (!regs) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800776 ret = -EINVAL;
Fam Zheng9582f352018-07-12 10:54:20 +0800777 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800778 }
Fam Zhengbdd6a902018-01-16 14:08:55 +0800779 /* Perform initialize sequence as described in NVMe spec "7.6.1
780 * Initialization". */
781
Philippe Mathieu-Daudé9406e0d2020-09-22 10:38:19 +0200782 cap = le64_to_cpu(regs->cap);
Philippe Mathieu-Daudé15b22602020-10-29 10:32:45 +0100783 trace_nvme_controller_capability_raw(cap);
784 trace_nvme_controller_capability("Maximum Queue Entries Supported",
785 1 + NVME_CAP_MQES(cap));
786 trace_nvme_controller_capability("Contiguous Queues Required",
787 NVME_CAP_CQR(cap));
788 trace_nvme_controller_capability("Doorbell Stride",
Philippe Mathieu-Daudé97b709f2021-01-27 22:21:36 +0100789 1 << (2 + NVME_CAP_DSTRD(cap)));
Philippe Mathieu-Daudé15b22602020-10-29 10:32:45 +0100790 trace_nvme_controller_capability("Subsystem Reset Supported",
791 NVME_CAP_NSSRS(cap));
792 trace_nvme_controller_capability("Memory Page Size Minimum",
793 1 << (12 + NVME_CAP_MPSMIN(cap)));
794 trace_nvme_controller_capability("Memory Page Size Maximum",
795 1 << (12 + NVME_CAP_MPSMAX(cap)));
Philippe Mathieu-Daudéfad1eb62020-09-22 10:38:20 +0200796 if (!NVME_CAP_CSS(cap)) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800797 error_setg(errp, "Device doesn't support NVMe command set");
798 ret = -EINVAL;
Fam Zheng9582f352018-07-12 10:54:20 +0800799 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800800 }
801
Philippe Mathieu-Daudéa652a3e2020-10-29 10:32:59 +0100802 s->page_size = 1u << (12 + NVME_CAP_MPSMIN(cap));
Philippe Mathieu-Daudéfad1eb62020-09-22 10:38:20 +0200803 s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800804 bs->bl.opt_mem_alignment = s->page_size;
Philippe Mathieu-Daudéc8228ac2020-10-29 10:32:58 +0100805 bs->bl.request_alignment = s->page_size;
Philippe Mathieu-Daudéfad1eb62020-09-22 10:38:20 +0200806 timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800807
Philippe Mathieu-Daudéfcc86722021-01-27 22:21:37 +0100808 ver = le32_to_cpu(regs->vs);
809 trace_nvme_controller_spec_version(extract32(ver, 16, 16),
810 extract32(ver, 8, 8),
811 extract32(ver, 0, 8));
812
Fam Zhengbdd6a902018-01-16 14:08:55 +0800813 /* Reset device to get a clean state. */
Philippe Mathieu-Daudé9406e0d2020-09-22 10:38:19 +0200814 regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800815 /* Wait for CSTS.RDY = 0. */
Philippe Mathieu-Daudée4f310f2020-08-21 21:53:45 +0200816 deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
Philippe Mathieu-Daudéfad1eb62020-09-22 10:38:20 +0200817 while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800818 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
819 error_setg(errp, "Timeout while waiting for device to reset (%"
820 PRId64 " ms)",
821 timeout_ms);
822 ret = -ETIMEDOUT;
Fam Zheng9582f352018-07-12 10:54:20 +0800823 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800824 }
825 }
826
Philippe Mathieu-Daudé4b19e9b2020-10-29 10:33:04 +0100827 s->bar0_wo_map = qemu_vfio_pci_map_bar(s->vfio, 0, 0,
828 sizeof(NvmeBar) + NVME_DOORBELL_SIZE,
829 PROT_WRITE, errp);
830 s->doorbells = (void *)((uintptr_t)s->bar0_wo_map + sizeof(NvmeBar));
Philippe Mathieu-Daudéf6845322020-09-22 10:38:17 +0200831 if (!s->doorbells) {
832 ret = -EINVAL;
833 goto out;
834 }
835
Fam Zhengbdd6a902018-01-16 14:08:55 +0800836 /* Set up admin queue. */
837 s->queues = g_new(NVMeQueuePair *, 1);
Philippe Mathieu-Daudé52b75ea2020-10-29 10:32:56 +0100838 q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp);
839 if (!q) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800840 ret = -EINVAL;
Fam Zheng9582f352018-07-12 10:54:20 +0800841 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800842 }
Philippe Mathieu-Daudé52b75ea2020-10-29 10:32:56 +0100843 s->queues[INDEX_ADMIN] = q;
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100844 s->queue_count = 1;
Philippe Mathieu-Daudé3c363c02020-10-29 10:32:55 +0100845 QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
846 regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
847 ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
Philippe Mathieu-Daudé52b75ea2020-10-29 10:32:56 +0100848 regs->asq = cpu_to_le64(q->sq.iova);
849 regs->acq = cpu_to_le64(q->cq.iova);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800850
851 /* After setting up all control registers we can enable device now. */
Philippe Mathieu-Daudéfad1eb62020-09-22 10:38:20 +0200852 regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
853 (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
854 CC_EN_MASK);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800855 /* Wait for CSTS.RDY = 1. */
856 now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Philippe Mathieu-Daudéeefffb02020-09-22 10:38:21 +0200857 deadline = now + timeout_ms * SCALE_MS;
Philippe Mathieu-Daudéfad1eb62020-09-22 10:38:20 +0200858 while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800859 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
860 error_setg(errp, "Timeout while waiting for device to start (%"
861 PRId64 " ms)",
862 timeout_ms);
863 ret = -ETIMEDOUT;
Fam Zheng9582f352018-07-12 10:54:20 +0800864 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800865 }
866 }
867
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200868 ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
Fam Zhengbdd6a902018-01-16 14:08:55 +0800869 VFIO_PCI_MSIX_IRQ_INDEX, errp);
870 if (ret) {
Fam Zheng9582f352018-07-12 10:54:20 +0800871 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800872 }
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200873 aio_set_event_notifier(bdrv_get_aio_context(bs),
874 &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
Stefan Hajnoczi60f782b2023-05-16 15:02:38 -0400875 nvme_handle_event, nvme_poll_cb,
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000876 nvme_poll_ready);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800877
Philippe Mathieu-Daudé7a5f00d2020-10-29 10:32:51 +0100878 if (!nvme_identify(bs, namespace, errp)) {
Fam Zhengbdd6a902018-01-16 14:08:55 +0800879 ret = -EIO;
Fam Zheng9582f352018-07-12 10:54:20 +0800880 goto out;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800881 }
882
883 /* Set up command queues. */
884 if (!nvme_add_io_queue(bs, errp)) {
885 ret = -EIO;
Fam Zhengbdd6a902018-01-16 14:08:55 +0800886 }
Fam Zheng9582f352018-07-12 10:54:20 +0800887out:
Philippe Mathieu-Daudé37d7a452020-09-22 10:38:18 +0200888 if (regs) {
889 qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar));
890 }
891
Paolo Bonzinid656aaa2023-09-04 12:07:19 +0200892 /* Cleaning up is done in nvme_open() upon error. */
Fam Zhengbdd6a902018-01-16 14:08:55 +0800893 return ret;
894}
895
896/* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
897 *
898 * nvme://0000:44:00.0/1
899 *
900 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
901 * is the PCI address, and the last part is the namespace number starting from
902 * 1 according to the NVMe spec. */
903static void nvme_parse_filename(const char *filename, QDict *options,
904 Error **errp)
905{
906 int pref = strlen("nvme://");
907
908 if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
909 const char *tmp = filename + pref;
910 char *device;
911 const char *namespace;
912 unsigned long ns;
913 const char *slash = strchr(tmp, '/');
914 if (!slash) {
Laurent Vivier625eaca2018-03-23 15:32:01 +0100915 qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800916 return;
917 }
918 device = g_strndup(tmp, slash - tmp);
Laurent Vivier625eaca2018-03-23 15:32:01 +0100919 qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800920 g_free(device);
921 namespace = slash + 1;
922 if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
923 error_setg(errp, "Invalid namespace '%s', positive number expected",
924 namespace);
925 return;
926 }
Laurent Vivier625eaca2018-03-23 15:32:01 +0100927 qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE,
928 *namespace ? namespace : "1");
Fam Zhengbdd6a902018-01-16 14:08:55 +0800929 }
930}
931
932static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
933 Error **errp)
934{
935 int ret;
936 BDRVNVMeState *s = bs->opaque;
937 NvmeCmd cmd = {
938 .opcode = NVME_ADM_CMD_SET_FEATURES,
939 .nsid = cpu_to_le32(s->nsid),
940 .cdw10 = cpu_to_le32(0x06),
941 .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
942 };
943
Philippe Mathieu-Daudé08d54062020-10-29 10:32:57 +0100944 ret = nvme_admin_cmd_sync(bs, &cmd);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800945 if (ret) {
946 error_setg(errp, "Failed to configure NVMe write cache");
947 }
948 return ret;
949}
950
951static void nvme_close(BlockDriverState *bs)
952{
Fam Zhengbdd6a902018-01-16 14:08:55 +0800953 BDRVNVMeState *s = bs->opaque;
954
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +0100955 for (unsigned i = 0; i < s->queue_count; ++i) {
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +0100956 nvme_free_queue_pair(s->queues[i]);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800957 }
Fam Zheng9582f352018-07-12 10:54:20 +0800958 g_free(s->queues);
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200959 aio_set_event_notifier(bdrv_get_aio_context(bs),
960 &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
Stefan Hajnoczi60f782b2023-05-16 15:02:38 -0400961 NULL, NULL, NULL);
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +0200962 event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
Philippe Mathieu-Daudé4b19e9b2020-10-29 10:33:04 +0100963 qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
964 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800965 qemu_vfio_close(s->vfio);
Max Reitzcc61b072019-02-01 20:29:30 +0100966
967 g_free(s->device);
Fam Zhengbdd6a902018-01-16 14:08:55 +0800968}
969
Paolo Bonzinid656aaa2023-09-04 12:07:19 +0200970static int nvme_open(BlockDriverState *bs, QDict *options, int flags,
971 Error **errp)
Fam Zhengbdd6a902018-01-16 14:08:55 +0800972{
973 const char *device;
974 QemuOpts *opts;
975 int namespace;
976 int ret;
977 BDRVNVMeState *s = bs->opaque;
978
Maxim Levitskye0dd95e2019-09-13 16:36:26 +0300979 bs->supported_write_flags = BDRV_REQ_FUA;
980
Fam Zhengbdd6a902018-01-16 14:08:55 +0800981 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
982 qemu_opts_absorb_qdict(opts, options, &error_abort);
983 device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
984 if (!device) {
985 error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
986 qemu_opts_del(opts);
987 return -EINVAL;
988 }
989
990 namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
991 ret = nvme_init(bs, device, namespace, errp);
992 qemu_opts_del(opts);
993 if (ret) {
994 goto fail;
995 }
996 if (flags & BDRV_O_NOCACHE) {
997 if (!s->write_cache_supported) {
998 error_setg(errp,
999 "NVMe controller doesn't support write cache configuration");
1000 ret = -EINVAL;
1001 } else {
1002 ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE),
1003 errp);
1004 }
1005 if (ret) {
1006 goto fail;
1007 }
1008 }
Fam Zhengbdd6a902018-01-16 14:08:55 +08001009 return 0;
1010fail:
1011 nvme_close(bs);
1012 return ret;
1013}
1014
Emanuele Giuseppe Espositoc86422c2023-01-13 21:42:04 +01001015static int64_t coroutine_fn nvme_co_getlength(BlockDriverState *bs)
Fam Zhengbdd6a902018-01-16 14:08:55 +08001016{
1017 BDRVNVMeState *s = bs->opaque;
Maxim Levitsky118d1b62019-07-16 19:30:19 +03001018 return s->nsze << s->blkshift;
1019}
Fam Zhengbdd6a902018-01-16 14:08:55 +08001020
Max Reitz11204072019-07-30 13:48:12 +02001021static uint32_t nvme_get_blocksize(BlockDriverState *bs)
Maxim Levitsky118d1b62019-07-16 19:30:19 +03001022{
1023 BDRVNVMeState *s = bs->opaque;
Max Reitz11204072019-07-30 13:48:12 +02001024 assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12);
1025 return UINT32_C(1) << s->blkshift;
Maxim Levitsky118d1b62019-07-16 19:30:19 +03001026}
1027
1028static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
1029{
Max Reitz11204072019-07-30 13:48:12 +02001030 uint32_t blocksize = nvme_get_blocksize(bs);
Maxim Levitsky118d1b62019-07-16 19:30:19 +03001031 bsz->phys = blocksize;
1032 bsz->log = blocksize;
1033 return 0;
Fam Zhengbdd6a902018-01-16 14:08:55 +08001034}
1035
1036/* Called with s->dma_map_lock */
1037static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
1038 QEMUIOVector *qiov)
1039{
1040 int r = 0;
1041 BDRVNVMeState *s = bs->opaque;
1042
1043 s->dma_map_count -= qiov->size;
1044 if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
1045 r = qemu_vfio_dma_reset_temporary(s->vfio);
1046 if (!r) {
1047 qemu_co_queue_restart_all(&s->dma_flush_queue);
1048 }
1049 }
1050 return r;
1051}
1052
1053/* Called with s->dma_map_lock */
1054static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
1055 NVMeRequest *req, QEMUIOVector *qiov)
1056{
1057 BDRVNVMeState *s = bs->opaque;
1058 uint64_t *pagelist = req->prp_list_page;
1059 int i, j, r;
1060 int entries = 0;
Philippe Mathieu-Daudé9bd27882021-09-02 09:00:25 +02001061 Error *local_err = NULL, **errp = NULL;
Fam Zhengbdd6a902018-01-16 14:08:55 +08001062
1063 assert(qiov->size);
1064 assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
1065 assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
1066 for (i = 0; i < qiov->niov; ++i) {
1067 bool retry = true;
1068 uint64_t iova;
Eric Auger9e13d592020-10-29 10:33:03 +01001069 size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +04001070 qemu_real_host_page_size());
Fam Zhengbdd6a902018-01-16 14:08:55 +08001071try_map:
1072 r = qemu_vfio_dma_map(s->vfio,
1073 qiov->iov[i].iov_base,
Philippe Mathieu-Daudé9bd27882021-09-02 09:00:25 +02001074 len, true, &iova, errp);
Philippe Mathieu-Daudé15a730e2021-07-23 21:58:43 +02001075 if (r == -ENOSPC) {
1076 /*
1077 * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
1078 * ioctl returns -ENOSPC to signal the user exhausted the DMA
1079 * mappings available for a container since Linux kernel commit
1080 * 492855939bdb ("vfio/type1: Limit DMA mappings per container",
1081 * April 2019, see CVE-2019-3882).
1082 *
1083 * This block driver already handles this error path by checking
1084 * for the -ENOMEM error, so we directly replace -ENOSPC by
1085 * -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev
1086 * coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and
1087 * BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator
1088 * to add more storage to the blockdev. Not something we can do
1089 * easily with an IOMMU :)
1090 */
1091 r = -ENOMEM;
1092 }
Fam Zhengbdd6a902018-01-16 14:08:55 +08001093 if (r == -ENOMEM && retry) {
Philippe Mathieu-Daudé15a730e2021-07-23 21:58:43 +02001094 /*
1095 * We exhausted the DMA mappings available for our container:
1096 * recycle the volatile IOVA mappings.
1097 */
Fam Zhengbdd6a902018-01-16 14:08:55 +08001098 retry = false;
1099 trace_nvme_dma_flush_queue_wait(s);
1100 if (s->dma_map_count) {
1101 trace_nvme_dma_map_flush(s);
1102 qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
1103 } else {
1104 r = qemu_vfio_dma_reset_temporary(s->vfio);
1105 if (r) {
1106 goto fail;
1107 }
1108 }
Philippe Mathieu-Daudé9bd27882021-09-02 09:00:25 +02001109 errp = &local_err;
1110
Fam Zhengbdd6a902018-01-16 14:08:55 +08001111 goto try_map;
1112 }
1113 if (r) {
1114 goto fail;
1115 }
1116
1117 for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
Li Feng29164052018-11-01 18:38:07 +08001118 pagelist[entries++] = cpu_to_le64(iova + j * s->page_size);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001119 }
1120 trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
1121 qiov->iov[i].iov_len / s->page_size);
1122 }
1123
1124 s->dma_map_count += qiov->size;
1125
1126 assert(entries <= s->page_size / sizeof(uint64_t));
1127 switch (entries) {
1128 case 0:
1129 abort();
1130 case 1:
Klaus Jensenc26f2172020-07-06 08:12:46 +02001131 cmd->dptr.prp1 = pagelist[0];
1132 cmd->dptr.prp2 = 0;
Fam Zhengbdd6a902018-01-16 14:08:55 +08001133 break;
1134 case 2:
Klaus Jensenc26f2172020-07-06 08:12:46 +02001135 cmd->dptr.prp1 = pagelist[0];
1136 cmd->dptr.prp2 = pagelist[1];
Fam Zhengbdd6a902018-01-16 14:08:55 +08001137 break;
1138 default:
Klaus Jensenc26f2172020-07-06 08:12:46 +02001139 cmd->dptr.prp1 = pagelist[0];
1140 cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
Fam Zhengbdd6a902018-01-16 14:08:55 +08001141 break;
1142 }
1143 trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
1144 for (i = 0; i < entries; ++i) {
1145 trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
1146 }
1147 return 0;
1148fail:
1149 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1150 * increment s->dma_map_count. This is okay for fixed mapping memory areas
1151 * because they are already mapped before calling this function; for
1152 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1153 * calling qemu_vfio_dma_reset_temporary when necessary. */
Philippe Mathieu-Daudé9bd27882021-09-02 09:00:25 +02001154 if (local_err) {
1155 error_reportf_err(local_err, "Cannot map buffer for DMA: ");
1156 }
Fam Zhengbdd6a902018-01-16 14:08:55 +08001157 return r;
1158}
1159
1160typedef struct {
1161 Coroutine *co;
1162 int ret;
1163 AioContext *ctx;
1164} NVMeCoData;
1165
1166static void nvme_rw_cb_bh(void *opaque)
1167{
1168 NVMeCoData *data = opaque;
1169 qemu_coroutine_enter(data->co);
1170}
1171
1172static void nvme_rw_cb(void *opaque, int ret)
1173{
1174 NVMeCoData *data = opaque;
1175 data->ret = ret;
1176 if (!data->co) {
1177 /* The rw coroutine hasn't yielded, don't try to enter. */
1178 return;
1179 }
Pavel Dovgalyuke4ec5ad2019-09-17 14:58:19 +03001180 replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001181}
1182
1183static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
1184 uint64_t offset, uint64_t bytes,
1185 QEMUIOVector *qiov,
1186 bool is_write,
1187 int flags)
1188{
1189 int r;
1190 BDRVNVMeState *s = bs->opaque;
Philippe Mathieu-Daudé73159e52020-08-21 21:53:48 +02001191 NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
Fam Zhengbdd6a902018-01-16 14:08:55 +08001192 NVMeRequest *req;
Maxim Levitsky118d1b62019-07-16 19:30:19 +03001193
1194 uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
Fam Zhengbdd6a902018-01-16 14:08:55 +08001195 (flags & BDRV_REQ_FUA ? 1 << 30 : 0);
1196 NvmeCmd cmd = {
1197 .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
1198 .nsid = cpu_to_le32(s->nsid),
Maxim Levitsky118d1b62019-07-16 19:30:19 +03001199 .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
1200 .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
Fam Zhengbdd6a902018-01-16 14:08:55 +08001201 .cdw12 = cpu_to_le32(cdw12),
1202 };
1203 NVMeCoData data = {
1204 .ctx = bdrv_get_aio_context(bs),
1205 .ret = -EINPROGRESS,
1206 };
1207
1208 trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +01001209 assert(s->queue_count > 1);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001210 req = nvme_get_free_req(ioq);
1211 assert(req);
1212
1213 qemu_co_mutex_lock(&s->dma_map_lock);
1214 r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
1215 qemu_co_mutex_unlock(&s->dma_map_lock);
1216 if (r) {
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +01001217 nvme_put_free_req_and_wake(ioq, req);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001218 return r;
1219 }
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +01001220 nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001221
1222 data.co = qemu_coroutine_self();
1223 while (data.ret == -EINPROGRESS) {
1224 qemu_coroutine_yield();
1225 }
1226
1227 qemu_co_mutex_lock(&s->dma_map_lock);
1228 r = nvme_cmd_unmap_qiov(bs, qiov);
1229 qemu_co_mutex_unlock(&s->dma_map_lock);
1230 if (r) {
1231 return r;
1232 }
1233
1234 trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
1235 return data.ret;
1236}
1237
1238static inline bool nvme_qiov_aligned(BlockDriverState *bs,
1239 const QEMUIOVector *qiov)
1240{
1241 int i;
1242 BDRVNVMeState *s = bs->opaque;
1243
1244 for (i = 0; i < qiov->niov; ++i) {
Eric Auger9e13d592020-10-29 10:33:03 +01001245 if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +04001246 qemu_real_host_page_size()) ||
1247 !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) {
Fam Zhengbdd6a902018-01-16 14:08:55 +08001248 trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
1249 qiov->iov[i].iov_len, s->page_size);
1250 return false;
1251 }
1252 }
1253 return true;
1254}
1255
Paolo Bonzini711b12e2022-09-22 10:49:10 +02001256static coroutine_fn int nvme_co_prw(BlockDriverState *bs,
1257 uint64_t offset, uint64_t bytes,
1258 QEMUIOVector *qiov, bool is_write,
1259 int flags)
Fam Zhengbdd6a902018-01-16 14:08:55 +08001260{
1261 BDRVNVMeState *s = bs->opaque;
1262 int r;
Philippe Mathieu-Daudé4a613bd2021-10-06 18:49:27 +02001263 QEMU_AUTO_VFREE uint8_t *buf = NULL;
Fam Zhengbdd6a902018-01-16 14:08:55 +08001264 QEMUIOVector local_qiov;
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +04001265 size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size());
Fam Zhengbdd6a902018-01-16 14:08:55 +08001266 assert(QEMU_IS_ALIGNED(offset, s->page_size));
1267 assert(QEMU_IS_ALIGNED(bytes, s->page_size));
1268 assert(bytes <= s->max_transfer);
1269 if (nvme_qiov_aligned(bs, qiov)) {
Philippe Mathieu-Daudéf25e7ab2020-10-01 18:29:39 +02001270 s->stats.aligned_accesses++;
Fam Zhengbdd6a902018-01-16 14:08:55 +08001271 return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
1272 }
Philippe Mathieu-Daudéf25e7ab2020-10-01 18:29:39 +02001273 s->stats.unaligned_accesses++;
Fam Zhengbdd6a902018-01-16 14:08:55 +08001274 trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
Marc-André Lureau8e3b0cb2022-03-23 19:57:22 +04001275 buf = qemu_try_memalign(qemu_real_host_page_size(), len);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001276
1277 if (!buf) {
1278 return -ENOMEM;
1279 }
1280 qemu_iovec_init(&local_qiov, 1);
1281 if (is_write) {
1282 qemu_iovec_to_buf(qiov, 0, buf, bytes);
1283 }
1284 qemu_iovec_add(&local_qiov, buf, bytes);
1285 r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
1286 qemu_iovec_destroy(&local_qiov);
1287 if (!r && !is_write) {
1288 qemu_iovec_from_buf(qiov, 0, buf, bytes);
1289 }
Fam Zhengbdd6a902018-01-16 14:08:55 +08001290 return r;
1291}
1292
1293static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
Vladimir Sementsov-Ogievskiyf7ef38d2021-09-03 13:27:59 +03001294 int64_t offset, int64_t bytes,
1295 QEMUIOVector *qiov,
1296 BdrvRequestFlags flags)
Fam Zhengbdd6a902018-01-16 14:08:55 +08001297{
1298 return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
1299}
1300
1301static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
Vladimir Sementsov-Ogievskiye75abed2021-09-03 13:28:00 +03001302 int64_t offset, int64_t bytes,
1303 QEMUIOVector *qiov,
1304 BdrvRequestFlags flags)
Fam Zhengbdd6a902018-01-16 14:08:55 +08001305{
1306 return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
1307}
1308
1309static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
1310{
1311 BDRVNVMeState *s = bs->opaque;
Philippe Mathieu-Daudé73159e52020-08-21 21:53:48 +02001312 NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
Fam Zhengbdd6a902018-01-16 14:08:55 +08001313 NVMeRequest *req;
1314 NvmeCmd cmd = {
1315 .opcode = NVME_CMD_FLUSH,
1316 .nsid = cpu_to_le32(s->nsid),
1317 };
1318 NVMeCoData data = {
1319 .ctx = bdrv_get_aio_context(bs),
1320 .ret = -EINPROGRESS,
1321 };
1322
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +01001323 assert(s->queue_count > 1);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001324 req = nvme_get_free_req(ioq);
1325 assert(req);
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +01001326 nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001327
1328 data.co = qemu_coroutine_self();
1329 if (data.ret == -EINPROGRESS) {
1330 qemu_coroutine_yield();
1331 }
1332
1333 return data.ret;
1334}
1335
1336
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001337static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
1338 int64_t offset,
Vladimir Sementsov-Ogievskiyf34b2bc2021-09-03 13:28:03 +03001339 int64_t bytes,
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001340 BdrvRequestFlags flags)
1341{
1342 BDRVNVMeState *s = bs->opaque;
Philippe Mathieu-Daudé73159e52020-08-21 21:53:48 +02001343 NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001344 NVMeRequest *req;
Vladimir Sementsov-Ogievskiyf34b2bc2021-09-03 13:28:03 +03001345 uint32_t cdw12;
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001346
1347 if (!s->supports_write_zeroes) {
1348 return -ENOTSUP;
1349 }
1350
Vladimir Sementsov-Ogievskiyf34b2bc2021-09-03 13:28:03 +03001351 if (bytes == 0) {
1352 return 0;
1353 }
1354
1355 cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
1356 /*
1357 * We should not lose information. pwrite_zeroes_alignment and
1358 * max_pwrite_zeroes guarantees it.
1359 */
1360 assert(((cdw12 + 1) << s->blkshift) == bytes);
1361
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001362 NvmeCmd cmd = {
Klaus Jensen69265152020-03-30 23:10:13 +02001363 .opcode = NVME_CMD_WRITE_ZEROES,
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001364 .nsid = cpu_to_le32(s->nsid),
1365 .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
1366 .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
1367 };
1368
1369 NVMeCoData data = {
1370 .ctx = bdrv_get_aio_context(bs),
1371 .ret = -EINPROGRESS,
1372 };
1373
1374 if (flags & BDRV_REQ_MAY_UNMAP) {
1375 cdw12 |= (1 << 25);
1376 }
1377
1378 if (flags & BDRV_REQ_FUA) {
1379 cdw12 |= (1 << 30);
1380 }
1381
1382 cmd.cdw12 = cpu_to_le32(cdw12);
1383
1384 trace_nvme_write_zeroes(s, offset, bytes, flags);
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +01001385 assert(s->queue_count > 1);
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001386 req = nvme_get_free_req(ioq);
1387 assert(req);
1388
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +01001389 nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001390
1391 data.co = qemu_coroutine_self();
1392 while (data.ret == -EINPROGRESS) {
1393 qemu_coroutine_yield();
1394 }
1395
1396 trace_nvme_rw_done(s, true, offset, bytes, data.ret);
1397 return data.ret;
1398}
1399
1400
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001401static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
1402 int64_t offset,
Vladimir Sementsov-Ogievskiy0c802282021-09-03 13:28:06 +03001403 int64_t bytes)
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001404{
1405 BDRVNVMeState *s = bs->opaque;
Philippe Mathieu-Daudé73159e52020-08-21 21:53:48 +02001406 NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001407 NVMeRequest *req;
Philippe Mathieu-Daudé4a613bd2021-10-06 18:49:27 +02001408 QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL;
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001409 QEMUIOVector local_qiov;
1410 int ret;
1411
1412 NvmeCmd cmd = {
1413 .opcode = NVME_CMD_DSM,
1414 .nsid = cpu_to_le32(s->nsid),
1415 .cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/
1416 .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
1417 };
1418
1419 NVMeCoData data = {
1420 .ctx = bdrv_get_aio_context(bs),
1421 .ret = -EINPROGRESS,
1422 };
1423
1424 if (!s->supports_discard) {
1425 return -ENOTSUP;
1426 }
1427
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +01001428 assert(s->queue_count > 1);
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001429
Vladimir Sementsov-Ogievskiy0c802282021-09-03 13:28:06 +03001430 /*
1431 * Filling the @buf requires @offset and @bytes to satisfy restrictions
1432 * defined in nvme_refresh_limits().
1433 */
1434 assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift));
1435 assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift));
1436 assert((bytes >> s->blkshift) <= UINT32_MAX);
1437
Philippe Mathieu-Daudé38e1f812020-08-21 21:53:54 +02001438 buf = qemu_try_memalign(s->page_size, s->page_size);
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001439 if (!buf) {
1440 return -ENOMEM;
1441 }
Philippe Mathieu-Daudé2ed84692020-08-21 21:53:53 +02001442 memset(buf, 0, s->page_size);
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001443 buf->nlb = cpu_to_le32(bytes >> s->blkshift);
1444 buf->slba = cpu_to_le64(offset >> s->blkshift);
1445 buf->cattr = 0;
1446
1447 qemu_iovec_init(&local_qiov, 1);
1448 qemu_iovec_add(&local_qiov, buf, 4096);
1449
1450 req = nvme_get_free_req(ioq);
1451 assert(req);
1452
1453 qemu_co_mutex_lock(&s->dma_map_lock);
1454 ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
1455 qemu_co_mutex_unlock(&s->dma_map_lock);
1456
1457 if (ret) {
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +01001458 nvme_put_free_req_and_wake(ioq, req);
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001459 goto out;
1460 }
1461
1462 trace_nvme_dsm(s, offset, bytes);
1463
Stefan Hajnoczib75fd5f2020-06-17 14:22:00 +01001464 nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001465
1466 data.co = qemu_coroutine_self();
1467 while (data.ret == -EINPROGRESS) {
1468 qemu_coroutine_yield();
1469 }
1470
1471 qemu_co_mutex_lock(&s->dma_map_lock);
1472 ret = nvme_cmd_unmap_qiov(bs, &local_qiov);
1473 qemu_co_mutex_unlock(&s->dma_map_lock);
1474
1475 if (ret) {
1476 goto out;
1477 }
1478
1479 ret = data.ret;
1480 trace_nvme_dsm_done(s, offset, bytes, ret);
1481out:
1482 qemu_iovec_destroy(&local_qiov);
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001483 return ret;
1484
1485}
1486
Philippe Mathieu-Daudéc8807c52020-12-10 13:52:02 +01001487static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset,
1488 bool exact, PreallocMode prealloc,
1489 BdrvRequestFlags flags, Error **errp)
1490{
1491 int64_t cur_length;
1492
1493 if (prealloc != PREALLOC_MODE_OFF) {
1494 error_setg(errp, "Unsupported preallocation mode '%s'",
1495 PreallocMode_str(prealloc));
1496 return -ENOTSUP;
1497 }
1498
Emanuele Giuseppe Espositoc86422c2023-01-13 21:42:04 +01001499 cur_length = nvme_co_getlength(bs);
Philippe Mathieu-Daudéc8807c52020-12-10 13:52:02 +01001500 if (offset != cur_length && exact) {
1501 error_setg(errp, "Cannot resize NVMe devices");
1502 return -ENOTSUP;
1503 } else if (offset > cur_length) {
1504 error_setg(errp, "Cannot grow NVMe devices");
1505 return -EINVAL;
1506 }
1507
1508 return 0;
1509}
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001510
Fam Zhengbdd6a902018-01-16 14:08:55 +08001511static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
1512 BlockReopenQueue *queue, Error **errp)
1513{
1514 return 0;
1515}
1516
Max Reitz998b3a12019-02-01 20:29:28 +01001517static void nvme_refresh_filename(BlockDriverState *bs)
Fam Zhengbdd6a902018-01-16 14:08:55 +08001518{
Max Reitzcc61b072019-02-01 20:29:30 +01001519 BDRVNVMeState *s = bs->opaque;
Fam Zhengbdd6a902018-01-16 14:08:55 +08001520
Max Reitzcc61b072019-02-01 20:29:30 +01001521 snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i",
1522 s->device, s->nsid);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001523}
1524
1525static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
1526{
1527 BDRVNVMeState *s = bs->opaque;
1528
1529 bs->bl.opt_mem_alignment = s->page_size;
1530 bs->bl.request_alignment = s->page_size;
1531 bs->bl.max_transfer = s->max_transfer;
Vladimir Sementsov-Ogievskiyf34b2bc2021-09-03 13:28:03 +03001532
1533 /*
1534 * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
1535 * at most 0xFFFF
1536 */
1537 bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16);
1538 bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment,
1539 1UL << s->blkshift);
Vladimir Sementsov-Ogievskiy0c802282021-09-03 13:28:06 +03001540
1541 bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift;
1542 bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment,
1543 1UL << s->blkshift);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001544}
1545
1546static void nvme_detach_aio_context(BlockDriverState *bs)
1547{
1548 BDRVNVMeState *s = bs->opaque;
1549
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +01001550 for (unsigned i = 0; i < s->queue_count; i++) {
Stefan Hajnoczi7838c672020-06-17 14:22:01 +01001551 NVMeQueuePair *q = s->queues[i];
1552
1553 qemu_bh_delete(q->completion_bh);
1554 q->completion_bh = NULL;
1555 }
1556
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +02001557 aio_set_event_notifier(bdrv_get_aio_context(bs),
1558 &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
Stefan Hajnoczi60f782b2023-05-16 15:02:38 -04001559 NULL, NULL, NULL);
Fam Zhengbdd6a902018-01-16 14:08:55 +08001560}
1561
1562static void nvme_attach_aio_context(BlockDriverState *bs,
1563 AioContext *new_context)
1564{
1565 BDRVNVMeState *s = bs->opaque;
1566
1567 s->aio_context = new_context;
Philippe Mathieu-Daudéb111b3f2020-08-21 21:53:59 +02001568 aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
Stefan Hajnoczi60f782b2023-05-16 15:02:38 -04001569 nvme_handle_event, nvme_poll_cb,
Stefan Hajnoczi826cc322021-12-07 13:23:31 +00001570 nvme_poll_ready);
Stefan Hajnoczi7838c672020-06-17 14:22:01 +01001571
Philippe Mathieu-Daudé1b539bd2020-10-29 10:32:50 +01001572 for (unsigned i = 0; i < s->queue_count; i++) {
Stefan Hajnoczi7838c672020-06-17 14:22:01 +01001573 NVMeQueuePair *q = s->queues[i];
1574
1575 q->completion_bh =
1576 aio_bh_new(new_context, nvme_process_completion_bh, q);
1577 }
Fam Zhengbdd6a902018-01-16 14:08:55 +08001578}
1579
Stefan Hajnoczif4ec04b2022-10-13 14:59:02 -04001580static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size,
1581 Error **errp)
Fam Zheng9ed61612018-01-16 14:08:57 +08001582{
1583 int ret;
1584 BDRVNVMeState *s = bs->opaque;
1585
Stefan Hajnoczif4ec04b2022-10-13 14:59:02 -04001586 /*
1587 * FIXME: we may run out of IOVA addresses after repeated
1588 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1589 * doesn't reclaim addresses for fixed mappings.
1590 */
1591 ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, errp);
1592 return ret == 0;
Fam Zheng9ed61612018-01-16 14:08:57 +08001593}
1594
Stefan Hajnoczi4f384012022-10-13 14:58:59 -04001595static void nvme_unregister_buf(BlockDriverState *bs, void *host, size_t size)
Fam Zheng9ed61612018-01-16 14:08:57 +08001596{
1597 BDRVNVMeState *s = bs->opaque;
1598
1599 qemu_vfio_dma_unmap(s->vfio, host);
1600}
1601
Philippe Mathieu-Daudéf25e7ab2020-10-01 18:29:39 +02001602static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs)
1603{
1604 BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
1605 BDRVNVMeState *s = bs->opaque;
1606
1607 stats->driver = BLOCKDEV_DRIVER_NVME;
1608 stats->u.nvme = (BlockStatsSpecificNvme) {
1609 .completion_errors = s->stats.completion_errors,
1610 .aligned_accesses = s->stats.aligned_accesses,
1611 .unaligned_accesses = s->stats.unaligned_accesses,
1612 };
1613
1614 return stats;
1615}
1616
Max Reitz26542672019-02-01 20:29:25 +01001617static const char *const nvme_strong_runtime_opts[] = {
1618 NVME_BLOCK_OPT_DEVICE,
1619 NVME_BLOCK_OPT_NAMESPACE,
1620
1621 NULL
1622};
1623
Fam Zhengbdd6a902018-01-16 14:08:55 +08001624static BlockDriver bdrv_nvme = {
1625 .format_name = "nvme",
1626 .protocol_name = "nvme",
1627 .instance_size = sizeof(BDRVNVMeState),
1628
Maxim Levitsky5a5e7f82020-03-26 03:12:18 +02001629 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
1630 .create_opts = &bdrv_create_opts_simple,
1631
Fam Zhengbdd6a902018-01-16 14:08:55 +08001632 .bdrv_parse_filename = nvme_parse_filename,
Paolo Bonzinid656aaa2023-09-04 12:07:19 +02001633 .bdrv_open = nvme_open,
Fam Zhengbdd6a902018-01-16 14:08:55 +08001634 .bdrv_close = nvme_close,
Emanuele Giuseppe Espositoc86422c2023-01-13 21:42:04 +01001635 .bdrv_co_getlength = nvme_co_getlength,
Maxim Levitsky118d1b62019-07-16 19:30:19 +03001636 .bdrv_probe_blocksizes = nvme_probe_blocksizes,
Philippe Mathieu-Daudéc8807c52020-12-10 13:52:02 +01001637 .bdrv_co_truncate = nvme_co_truncate,
Fam Zhengbdd6a902018-01-16 14:08:55 +08001638
1639 .bdrv_co_preadv = nvme_co_preadv,
1640 .bdrv_co_pwritev = nvme_co_pwritev,
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001641
1642 .bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes,
Maxim Levitskye87a09d2019-09-13 16:36:27 +03001643 .bdrv_co_pdiscard = nvme_co_pdiscard,
Maxim Levitskye0dd95e2019-09-13 16:36:26 +03001644
Fam Zhengbdd6a902018-01-16 14:08:55 +08001645 .bdrv_co_flush_to_disk = nvme_co_flush,
1646 .bdrv_reopen_prepare = nvme_reopen_prepare,
1647
Fam Zhengbdd6a902018-01-16 14:08:55 +08001648 .bdrv_refresh_filename = nvme_refresh_filename,
1649 .bdrv_refresh_limits = nvme_refresh_limits,
Max Reitz26542672019-02-01 20:29:25 +01001650 .strong_runtime_opts = nvme_strong_runtime_opts,
Philippe Mathieu-Daudéf25e7ab2020-10-01 18:29:39 +02001651 .bdrv_get_specific_stats = nvme_get_specific_stats,
Fam Zhengbdd6a902018-01-16 14:08:55 +08001652
1653 .bdrv_detach_aio_context = nvme_detach_aio_context,
1654 .bdrv_attach_aio_context = nvme_attach_aio_context,
1655
Fam Zheng9ed61612018-01-16 14:08:57 +08001656 .bdrv_register_buf = nvme_register_buf,
1657 .bdrv_unregister_buf = nvme_unregister_buf,
Fam Zhengbdd6a902018-01-16 14:08:55 +08001658};
1659
1660static void bdrv_nvme_init(void)
1661{
1662 bdrv_register(&bdrv_nvme);
1663}
1664
1665block_init(bdrv_nvme_init);