Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1 | /* |
| 2 | * NVMe block driver based on vfio |
| 3 | * |
| 4 | * Copyright 2016 - 2018 Red Hat, Inc. |
| 5 | * |
| 6 | * Authors: |
| 7 | * Fam Zheng <famz@redhat.com> |
| 8 | * Paolo Bonzini <pbonzini@redhat.com> |
| 9 | * |
| 10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 11 | * See the COPYING file in the top-level directory. |
| 12 | */ |
| 13 | |
| 14 | #include "qemu/osdep.h" |
| 15 | #include <linux/vfio.h> |
| 16 | #include "qapi/error.h" |
| 17 | #include "qapi/qmp/qdict.h" |
| 18 | #include "qapi/qmp/qstring.h" |
Stefan Hajnoczi | 433fcea | 2023-09-13 16:00:43 -0400 | [diff] [blame] | 19 | #include "qemu/defer-call.h" |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 20 | #include "qemu/error-report.h" |
Markus Armbruster | db72581 | 2019-08-12 07:23:50 +0200 | [diff] [blame] | 21 | #include "qemu/main-loop.h" |
Markus Armbruster | 0b8fa32 | 2019-05-23 16:35:07 +0200 | [diff] [blame] | 22 | #include "qemu/module.h" |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 23 | #include "qemu/cutils.h" |
Markus Armbruster | 922a01a | 2018-02-01 12:18:46 +0100 | [diff] [blame] | 24 | #include "qemu/option.h" |
Peter Maydell | 5df022c | 2022-02-26 18:07:23 +0000 | [diff] [blame] | 25 | #include "qemu/memalign.h" |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 26 | #include "qemu/vfio-helpers.h" |
Markus Armbruster | e2c1c34 | 2022-12-21 14:35:49 +0100 | [diff] [blame] | 27 | #include "block/block-io.h" |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 28 | #include "block/block_int.h" |
Stefan Hajnoczi | f2e5900 | 2023-05-30 14:09:55 -0400 | [diff] [blame] | 29 | #include "sysemu/block-backend.h" |
Pavel Dovgalyuk | e4ec5ad | 2019-09-17 14:58:19 +0300 | [diff] [blame] | 30 | #include "sysemu/replay.h" |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 31 | #include "trace.h" |
| 32 | |
Fam Zheng | a3d9a35 | 2018-01-16 14:08:59 +0800 | [diff] [blame] | 33 | #include "block/nvme.h" |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 34 | |
| 35 | #define NVME_SQ_ENTRY_BYTES 64 |
| 36 | #define NVME_CQ_ENTRY_BYTES 16 |
| 37 | #define NVME_QUEUE_SIZE 128 |
Philippe Mathieu-Daudé | f684532 | 2020-09-22 10:38:17 +0200 | [diff] [blame] | 38 | #define NVME_DOORBELL_SIZE 4096 |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 39 | |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 40 | /* |
| 41 | * We have to leave one slot empty as that is the full queue case where |
| 42 | * head == tail + 1. |
| 43 | */ |
| 44 | #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1) |
| 45 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 46 | typedef struct BDRVNVMeState BDRVNVMeState; |
| 47 | |
Philippe Mathieu-Daudé | 3214b0f | 2020-10-29 10:32:49 +0100 | [diff] [blame] | 48 | /* Same index is used for queues and IRQs */ |
| 49 | #define INDEX_ADMIN 0 |
| 50 | #define INDEX_IO(n) (1 + n) |
| 51 | |
| 52 | /* This driver shares a single MSIX IRQ for the admin and I/O queues */ |
| 53 | enum { |
| 54 | MSIX_SHARED_IRQ_IDX = 0, |
| 55 | MSIX_IRQ_COUNT = 1 |
| 56 | }; |
| 57 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 58 | typedef struct { |
| 59 | int32_t head, tail; |
| 60 | uint8_t *queue; |
| 61 | uint64_t iova; |
| 62 | /* Hardware MMIO register */ |
| 63 | volatile uint32_t *doorbell; |
| 64 | } NVMeQueue; |
| 65 | |
| 66 | typedef struct { |
| 67 | BlockCompletionFunc *cb; |
| 68 | void *opaque; |
| 69 | int cid; |
| 70 | void *prp_list_page; |
| 71 | uint64_t prp_list_iova; |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 72 | int free_req_next; /* q->reqs[] index of next free req */ |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 73 | } NVMeRequest; |
| 74 | |
| 75 | typedef struct { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 76 | QemuMutex lock; |
| 77 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 78 | /* Read from I/O code path, initialized under BQL */ |
| 79 | BDRVNVMeState *s; |
| 80 | int index; |
| 81 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 82 | /* Fields protected by BQL */ |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 83 | uint8_t *prp_list_pages; |
| 84 | |
| 85 | /* Fields protected by @lock */ |
Stefan Hajnoczi | a5db74f | 2020-06-17 14:21:59 +0100 | [diff] [blame] | 86 | CoQueue free_req_queue; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 87 | NVMeQueue sq, cq; |
| 88 | int cq_phase; |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 89 | int free_req_head; |
| 90 | NVMeRequest reqs[NVME_NUM_REQS]; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 91 | int need_kick; |
| 92 | int inflight; |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 93 | |
| 94 | /* Thread-safe, no lock necessary */ |
| 95 | QEMUBH *completion_bh; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 96 | } NVMeQueuePair; |
| 97 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 98 | struct BDRVNVMeState { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 99 | AioContext *aio_context; |
| 100 | QEMUVFIOState *vfio; |
Philippe Mathieu-Daudé | 4b19e9b | 2020-10-29 10:33:04 +0100 | [diff] [blame] | 101 | void *bar0_wo_map; |
Philippe Mathieu-Daudé | f684532 | 2020-09-22 10:38:17 +0200 | [diff] [blame] | 102 | /* Memory mapped registers */ |
| 103 | volatile struct { |
| 104 | uint32_t sq_tail; |
| 105 | uint32_t cq_head; |
| 106 | } *doorbells; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 107 | /* The submission/completion queue pairs. |
| 108 | * [0]: admin queue. |
| 109 | * [1..]: io queues. |
| 110 | */ |
| 111 | NVMeQueuePair **queues; |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 112 | unsigned queue_count; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 113 | size_t page_size; |
| 114 | /* How many uint32_t elements does each doorbell entry take. */ |
| 115 | size_t doorbell_scale; |
| 116 | bool write_cache_supported; |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 117 | EventNotifier irq_notifier[MSIX_IRQ_COUNT]; |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 118 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 119 | uint64_t nsze; /* Namespace size reported by identify command */ |
| 120 | int nsid; /* The namespace id to read/write data. */ |
Max Reitz | 1120407 | 2019-07-30 13:48:12 +0200 | [diff] [blame] | 121 | int blkshift; |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 122 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 123 | uint64_t max_transfer; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 124 | |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 125 | bool supports_write_zeroes; |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 126 | bool supports_discard; |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 127 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 128 | CoMutex dma_map_lock; |
| 129 | CoQueue dma_flush_queue; |
| 130 | |
| 131 | /* Total size of mapped qiov, accessed under dma_map_lock */ |
| 132 | int dma_map_count; |
Max Reitz | cc61b07 | 2019-02-01 20:29:30 +0100 | [diff] [blame] | 133 | |
| 134 | /* PCI address (required for nvme_refresh_filename()) */ |
| 135 | char *device; |
Philippe Mathieu-Daudé | f25e7ab | 2020-10-01 18:29:39 +0200 | [diff] [blame] | 136 | |
| 137 | struct { |
| 138 | uint64_t completion_errors; |
| 139 | uint64_t aligned_accesses; |
| 140 | uint64_t unaligned_accesses; |
| 141 | } stats; |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 142 | }; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 143 | |
| 144 | #define NVME_BLOCK_OPT_DEVICE "device" |
| 145 | #define NVME_BLOCK_OPT_NAMESPACE "namespace" |
| 146 | |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 147 | static void nvme_process_completion_bh(void *opaque); |
| 148 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 149 | static QemuOptsList runtime_opts = { |
| 150 | .name = "nvme", |
| 151 | .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), |
| 152 | .desc = { |
| 153 | { |
| 154 | .name = NVME_BLOCK_OPT_DEVICE, |
| 155 | .type = QEMU_OPT_STRING, |
| 156 | .help = "NVMe PCI device address", |
| 157 | }, |
| 158 | { |
| 159 | .name = NVME_BLOCK_OPT_NAMESPACE, |
| 160 | .type = QEMU_OPT_NUMBER, |
| 161 | .help = "NVMe namespace", |
| 162 | }, |
| 163 | { /* end of list */ } |
| 164 | }, |
| 165 | }; |
| 166 | |
Philippe Mathieu-Daudé | dfa9c6c | 2020-10-29 10:32:52 +0100 | [diff] [blame] | 167 | /* Returns true on success, false on failure. */ |
| 168 | static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q, |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 169 | unsigned nentries, size_t entry_bytes, Error **errp) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 170 | { |
Zhao Liu | ed46217 | 2024-03-11 11:37:59 +0800 | [diff] [blame] | 171 | ERRP_GUARD(); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 172 | size_t bytes; |
| 173 | int r; |
| 174 | |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 175 | bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size()); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 176 | q->head = q->tail = 0; |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 177 | q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 178 | if (!q->queue) { |
| 179 | error_setg(errp, "Cannot allocate queue"); |
Philippe Mathieu-Daudé | dfa9c6c | 2020-10-29 10:32:52 +0100 | [diff] [blame] | 180 | return false; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 181 | } |
Philippe Mathieu-Daudé | 2ed8469 | 2020-08-21 21:53:53 +0200 | [diff] [blame] | 182 | memset(q->queue, 0, bytes); |
Philippe Mathieu-Daudé | 521b97c | 2021-09-02 09:00:20 +0200 | [diff] [blame] | 183 | r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 184 | if (r) { |
Philippe Mathieu-Daudé | 521b97c | 2021-09-02 09:00:20 +0200 | [diff] [blame] | 185 | error_prepend(errp, "Cannot map queue: "); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 186 | } |
Philippe Mathieu-Daudé | 521b97c | 2021-09-02 09:00:20 +0200 | [diff] [blame] | 187 | return r == 0; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 188 | } |
| 189 | |
Philippe Mathieu-Daudé | a895143 | 2021-10-06 18:49:29 +0200 | [diff] [blame] | 190 | static void nvme_free_queue(NVMeQueue *q) |
| 191 | { |
| 192 | qemu_vfree(q->queue); |
| 193 | } |
| 194 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 195 | static void nvme_free_queue_pair(NVMeQueuePair *q) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 196 | { |
Philippe Mathieu-Daudé | 53cedea | 2021-10-06 18:49:28 +0200 | [diff] [blame] | 197 | trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq); |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 198 | if (q->completion_bh) { |
| 199 | qemu_bh_delete(q->completion_bh); |
| 200 | } |
Philippe Mathieu-Daudé | a895143 | 2021-10-06 18:49:29 +0200 | [diff] [blame] | 201 | nvme_free_queue(&q->sq); |
| 202 | nvme_free_queue(&q->cq); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 203 | qemu_vfree(q->prp_list_pages); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 204 | qemu_mutex_destroy(&q->lock); |
| 205 | g_free(q); |
| 206 | } |
| 207 | |
| 208 | static void nvme_free_req_queue_cb(void *opaque) |
| 209 | { |
| 210 | NVMeQueuePair *q = opaque; |
| 211 | |
| 212 | qemu_mutex_lock(&q->lock); |
Stefan Hajnoczi | cf4fbc3 | 2021-12-08 15:22:46 +0000 | [diff] [blame] | 213 | while (q->free_req_head != -1 && |
| 214 | qemu_co_enter_next(&q->free_req_queue, &q->lock)) { |
| 215 | /* Retry waiting requests */ |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 216 | } |
| 217 | qemu_mutex_unlock(&q->lock); |
| 218 | } |
| 219 | |
Philippe Mathieu-Daudé | 0a28b02 | 2020-08-21 21:53:57 +0200 | [diff] [blame] | 220 | static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s, |
| 221 | AioContext *aio_context, |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 222 | unsigned idx, size_t size, |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 223 | Error **errp) |
| 224 | { |
Zhao Liu | ed46217 | 2024-03-11 11:37:59 +0800 | [diff] [blame] | 225 | ERRP_GUARD(); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 226 | int i, r; |
Philippe Mathieu-Daudé | 0ea45f7 | 2020-08-21 21:53:47 +0200 | [diff] [blame] | 227 | NVMeQueuePair *q; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 228 | uint64_t prp_list_iova; |
Eric Auger | f8fd3eb | 2020-10-29 10:33:02 +0100 | [diff] [blame] | 229 | size_t bytes; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 230 | |
Philippe Mathieu-Daudé | 0ea45f7 | 2020-08-21 21:53:47 +0200 | [diff] [blame] | 231 | q = g_try_new0(NVMeQueuePair, 1); |
| 232 | if (!q) { |
Philippe Mathieu-Daudé | 526c37c | 2021-09-02 09:00:19 +0200 | [diff] [blame] | 233 | error_setg(errp, "Cannot allocate queue pair"); |
Philippe Mathieu-Daudé | 0ea45f7 | 2020-08-21 21:53:47 +0200 | [diff] [blame] | 234 | return NULL; |
| 235 | } |
Philippe Mathieu-Daudé | 6e1e9ff | 2020-10-29 10:32:48 +0100 | [diff] [blame] | 236 | trace_nvme_create_queue_pair(idx, q, size, aio_context, |
| 237 | event_notifier_get_fd(s->irq_notifier)); |
Eric Auger | f8fd3eb | 2020-10-29 10:33:02 +0100 | [diff] [blame] | 238 | bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS, |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 239 | qemu_real_host_page_size()); |
| 240 | q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes); |
Philippe Mathieu-Daudé | 0ea45f7 | 2020-08-21 21:53:47 +0200 | [diff] [blame] | 241 | if (!q->prp_list_pages) { |
Philippe Mathieu-Daudé | 526c37c | 2021-09-02 09:00:19 +0200 | [diff] [blame] | 242 | error_setg(errp, "Cannot allocate PRP page list"); |
Philippe Mathieu-Daudé | 0ea45f7 | 2020-08-21 21:53:47 +0200 | [diff] [blame] | 243 | goto fail; |
| 244 | } |
Eric Auger | f8fd3eb | 2020-10-29 10:33:02 +0100 | [diff] [blame] | 245 | memset(q->prp_list_pages, 0, bytes); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 246 | qemu_mutex_init(&q->lock); |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 247 | q->s = s; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 248 | q->index = idx; |
| 249 | qemu_co_queue_init(&q->free_req_queue); |
Philippe Mathieu-Daudé | 0a28b02 | 2020-08-21 21:53:57 +0200 | [diff] [blame] | 250 | q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q); |
Eric Auger | f8fd3eb | 2020-10-29 10:33:02 +0100 | [diff] [blame] | 251 | r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes, |
Philippe Mathieu-Daudé | 521b97c | 2021-09-02 09:00:20 +0200 | [diff] [blame] | 252 | false, &prp_list_iova, errp); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 253 | if (r) { |
Philippe Mathieu-Daudé | 521b97c | 2021-09-02 09:00:20 +0200 | [diff] [blame] | 254 | error_prepend(errp, "Cannot map buffer for DMA: "); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 255 | goto fail; |
| 256 | } |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 257 | q->free_req_head = -1; |
| 258 | for (i = 0; i < NVME_NUM_REQS; i++) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 259 | NVMeRequest *req = &q->reqs[i]; |
| 260 | req->cid = i + 1; |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 261 | req->free_req_next = q->free_req_head; |
| 262 | q->free_req_head = i; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 263 | req->prp_list_page = q->prp_list_pages + i * s->page_size; |
| 264 | req->prp_list_iova = prp_list_iova + i * s->page_size; |
| 265 | } |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 266 | |
Philippe Mathieu-Daudé | dfa9c6c | 2020-10-29 10:32:52 +0100 | [diff] [blame] | 267 | if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 268 | goto fail; |
| 269 | } |
Philippe Mathieu-Daudé | f684532 | 2020-09-22 10:38:17 +0200 | [diff] [blame] | 270 | q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 271 | |
Philippe Mathieu-Daudé | dfa9c6c | 2020-10-29 10:32:52 +0100 | [diff] [blame] | 272 | if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 273 | goto fail; |
| 274 | } |
Philippe Mathieu-Daudé | f684532 | 2020-09-22 10:38:17 +0200 | [diff] [blame] | 275 | q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 276 | |
| 277 | return q; |
| 278 | fail: |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 279 | nvme_free_queue_pair(q); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 280 | return NULL; |
| 281 | } |
| 282 | |
| 283 | /* With q->lock */ |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 284 | static void nvme_kick(NVMeQueuePair *q) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 285 | { |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 286 | BDRVNVMeState *s = q->s; |
| 287 | |
Stefan Hajnoczi | f2e5900 | 2023-05-30 14:09:55 -0400 | [diff] [blame] | 288 | if (!q->need_kick) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 289 | return; |
| 290 | } |
| 291 | trace_nvme_kick(s, q->index); |
| 292 | assert(!(q->sq.tail & 0xFF00)); |
| 293 | /* Fence the write to submission queue entry before notifying the device. */ |
| 294 | smp_wmb(); |
| 295 | *q->sq.doorbell = cpu_to_le32(q->sq.tail); |
| 296 | q->inflight += q->need_kick; |
| 297 | q->need_kick = 0; |
| 298 | } |
| 299 | |
Paolo Bonzini | 82c4537 | 2022-09-22 10:48:59 +0200 | [diff] [blame] | 300 | static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 301 | { |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 302 | NVMeRequest *req; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 303 | |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 304 | req = &q->reqs[q->free_req_head]; |
| 305 | q->free_req_head = req->free_req_next; |
| 306 | req->free_req_next = -1; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 307 | return req; |
| 308 | } |
| 309 | |
Paolo Bonzini | 82c4537 | 2022-09-22 10:48:59 +0200 | [diff] [blame] | 310 | /* Return a free request element if any, otherwise return NULL. */ |
| 311 | static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q) |
| 312 | { |
| 313 | QEMU_LOCK_GUARD(&q->lock); |
| 314 | if (q->free_req_head == -1) { |
| 315 | return NULL; |
| 316 | } |
| 317 | return nvme_get_free_req_nofail_locked(q); |
| 318 | } |
| 319 | |
| 320 | /* |
| 321 | * Wait for a free request to become available if necessary, then |
| 322 | * return it. |
| 323 | */ |
| 324 | static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) |
| 325 | { |
| 326 | QEMU_LOCK_GUARD(&q->lock); |
| 327 | |
| 328 | while (q->free_req_head == -1) { |
| 329 | trace_nvme_free_req_queue_wait(q->s, q->index); |
| 330 | qemu_co_queue_wait(&q->free_req_queue, &q->lock); |
| 331 | } |
| 332 | |
| 333 | return nvme_get_free_req_nofail_locked(q); |
| 334 | } |
| 335 | |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 336 | /* With q->lock */ |
| 337 | static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) |
| 338 | { |
| 339 | req->free_req_next = q->free_req_head; |
| 340 | q->free_req_head = req - q->reqs; |
| 341 | } |
| 342 | |
| 343 | /* With q->lock */ |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 344 | static void nvme_wake_free_req_locked(NVMeQueuePair *q) |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 345 | { |
| 346 | if (!qemu_co_queue_empty(&q->free_req_queue)) { |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 347 | replay_bh_schedule_oneshot_event(q->s->aio_context, |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 348 | nvme_free_req_queue_cb, q); |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | /* Insert a request in the freelist and wake waiters */ |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 353 | static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req) |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 354 | { |
| 355 | qemu_mutex_lock(&q->lock); |
| 356 | nvme_put_free_req_locked(q, req); |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 357 | nvme_wake_free_req_locked(q); |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 358 | qemu_mutex_unlock(&q->lock); |
| 359 | } |
| 360 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 361 | static inline int nvme_translate_error(const NvmeCqe *c) |
| 362 | { |
| 363 | uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF; |
| 364 | if (status) { |
| 365 | trace_nvme_error(le32_to_cpu(c->result), |
| 366 | le16_to_cpu(c->sq_head), |
| 367 | le16_to_cpu(c->sq_id), |
| 368 | le16_to_cpu(c->cid), |
| 369 | le16_to_cpu(status)); |
| 370 | } |
| 371 | switch (status) { |
| 372 | case 0: |
| 373 | return 0; |
| 374 | case 1: |
| 375 | return -ENOSYS; |
| 376 | case 2: |
| 377 | return -EINVAL; |
| 378 | default: |
| 379 | return -EIO; |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | /* With q->lock */ |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 384 | static bool nvme_process_completion(NVMeQueuePair *q) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 385 | { |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 386 | BDRVNVMeState *s = q->s; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 387 | bool progress = false; |
| 388 | NVMeRequest *preq; |
| 389 | NVMeRequest req; |
| 390 | NvmeCqe *c; |
| 391 | |
| 392 | trace_nvme_process_completion(s, q->index, q->inflight); |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 393 | |
| 394 | /* |
| 395 | * Support re-entrancy when a request cb() function invokes aio_poll(). |
| 396 | * Pending completions must be visible to aio_poll() so that a cb() |
| 397 | * function can wait for the completion of another request. |
| 398 | * |
| 399 | * The aio_poll() loop will execute our BH and we'll resume completion |
| 400 | * processing there. |
| 401 | */ |
| 402 | qemu_bh_schedule(q->completion_bh); |
| 403 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 404 | assert(q->inflight >= 0); |
| 405 | while (q->inflight) { |
Stefan Hajnoczi | 04b3fb3 | 2020-06-17 14:21:57 +0100 | [diff] [blame] | 406 | int ret; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 407 | int16_t cid; |
Stefan Hajnoczi | 04b3fb3 | 2020-06-17 14:21:57 +0100 | [diff] [blame] | 408 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 409 | c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES]; |
Maxim Levitsky | 258867d | 2019-07-16 19:30:20 +0300 | [diff] [blame] | 410 | if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 411 | break; |
| 412 | } |
Stefan Hajnoczi | 04b3fb3 | 2020-06-17 14:21:57 +0100 | [diff] [blame] | 413 | ret = nvme_translate_error(c); |
Philippe Mathieu-Daudé | f25e7ab | 2020-10-01 18:29:39 +0200 | [diff] [blame] | 414 | if (ret) { |
| 415 | s->stats.completion_errors++; |
| 416 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 417 | q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE; |
| 418 | if (!q->cq.head) { |
| 419 | q->cq_phase = !q->cq_phase; |
| 420 | } |
| 421 | cid = le16_to_cpu(c->cid); |
Vladimir Sementsov-Ogievskiy | cc8fb0c | 2023-11-06 15:00:28 +0000 | [diff] [blame] | 422 | if (cid == 0 || cid > NVME_NUM_REQS) { |
| 423 | warn_report("NVMe: Unexpected CID in completion queue: %" PRIu32 |
| 424 | ", should be within: 1..%u inclusively", cid, |
| 425 | NVME_NUM_REQS); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 426 | continue; |
| 427 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 428 | trace_nvme_complete_command(s, q->index, cid); |
| 429 | preq = &q->reqs[cid - 1]; |
| 430 | req = *preq; |
| 431 | assert(req.cid == cid); |
| 432 | assert(req.cb); |
Stefan Hajnoczi | 1086e95 | 2020-06-17 14:21:58 +0100 | [diff] [blame] | 433 | nvme_put_free_req_locked(q, preq); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 434 | preq->cb = preq->opaque = NULL; |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 435 | q->inflight--; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 436 | qemu_mutex_unlock(&q->lock); |
Stefan Hajnoczi | 04b3fb3 | 2020-06-17 14:21:57 +0100 | [diff] [blame] | 437 | req.cb(req.opaque, ret); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 438 | qemu_mutex_lock(&q->lock); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 439 | progress = true; |
| 440 | } |
| 441 | if (progress) { |
| 442 | /* Notify the device so it can post more completions. */ |
| 443 | smp_mb_release(); |
| 444 | *q->cq.doorbell = cpu_to_le32(q->cq.head); |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 445 | nvme_wake_free_req_locked(q); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 446 | } |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 447 | |
| 448 | qemu_bh_cancel(q->completion_bh); |
| 449 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 450 | return progress; |
| 451 | } |
| 452 | |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 453 | static void nvme_process_completion_bh(void *opaque) |
| 454 | { |
| 455 | NVMeQueuePair *q = opaque; |
| 456 | |
| 457 | /* |
| 458 | * We're being invoked because a nvme_process_completion() cb() function |
| 459 | * called aio_poll(). The callback may be waiting for further completions |
| 460 | * so notify the device that it has space to fill in more completions now. |
| 461 | */ |
| 462 | smp_mb_release(); |
| 463 | *q->cq.doorbell = cpu_to_le32(q->cq.head); |
| 464 | nvme_wake_free_req_locked(q); |
| 465 | |
| 466 | nvme_process_completion(q); |
| 467 | } |
| 468 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 469 | static void nvme_trace_command(const NvmeCmd *cmd) |
| 470 | { |
| 471 | int i; |
| 472 | |
Philippe Mathieu-Daudé | e266f52 | 2020-08-21 21:53:46 +0200 | [diff] [blame] | 473 | if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW)) { |
| 474 | return; |
| 475 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 476 | for (i = 0; i < 8; ++i) { |
| 477 | uint8_t *cmdp = (uint8_t *)cmd + i * 8; |
| 478 | trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3], |
| 479 | cmdp[4], cmdp[5], cmdp[6], cmdp[7]); |
| 480 | } |
| 481 | } |
| 482 | |
Stefan Hajnoczi | ccee48a | 2023-09-13 16:00:42 -0400 | [diff] [blame] | 483 | static void nvme_deferred_fn(void *opaque) |
Stefan Hajnoczi | f2e5900 | 2023-05-30 14:09:55 -0400 | [diff] [blame] | 484 | { |
| 485 | NVMeQueuePair *q = opaque; |
| 486 | |
| 487 | QEMU_LOCK_GUARD(&q->lock); |
| 488 | nvme_kick(q); |
| 489 | nvme_process_completion(q); |
| 490 | } |
| 491 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 492 | static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 493 | NvmeCmd *cmd, BlockCompletionFunc cb, |
| 494 | void *opaque) |
| 495 | { |
| 496 | assert(!req->cb); |
| 497 | req->cb = cb; |
| 498 | req->opaque = opaque; |
Philippe Mathieu-Daudé | a0546a7 | 2020-10-29 10:33:05 +0100 | [diff] [blame] | 499 | cmd->cid = cpu_to_le16(req->cid); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 500 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 501 | trace_nvme_submit_command(q->s, q->index, req->cid); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 502 | nvme_trace_command(cmd); |
| 503 | qemu_mutex_lock(&q->lock); |
| 504 | memcpy((uint8_t *)q->sq.queue + |
| 505 | q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); |
| 506 | q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; |
| 507 | q->need_kick++; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 508 | qemu_mutex_unlock(&q->lock); |
Stefan Hajnoczi | 66547f4 | 2023-07-12 15:16:28 -0400 | [diff] [blame] | 509 | |
Stefan Hajnoczi | ccee48a | 2023-09-13 16:00:42 -0400 | [diff] [blame] | 510 | defer_call(nvme_deferred_fn, q); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 511 | } |
| 512 | |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 513 | static void nvme_admin_cmd_sync_cb(void *opaque, int ret) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 514 | { |
| 515 | int *pret = opaque; |
| 516 | *pret = ret; |
Kevin Wolf | 4720cbe | 2019-01-07 13:02:48 +0100 | [diff] [blame] | 517 | aio_wait_kick(); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 518 | } |
| 519 | |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 520 | static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 521 | { |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 522 | BDRVNVMeState *s = bs->opaque; |
| 523 | NVMeQueuePair *q = s->queues[INDEX_ADMIN]; |
Philippe Mathieu-Daudé | 073a069 | 2020-08-21 21:53:56 +0200 | [diff] [blame] | 524 | AioContext *aio_context = bdrv_get_aio_context(bs); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 525 | NVMeRequest *req; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 526 | int ret = -EINPROGRESS; |
Paolo Bonzini | 82c4537 | 2022-09-22 10:48:59 +0200 | [diff] [blame] | 527 | req = nvme_get_free_req_nowait(q); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 528 | if (!req) { |
| 529 | return -EBUSY; |
| 530 | } |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 531 | nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 532 | |
Philippe Mathieu-Daudé | 073a069 | 2020-08-21 21:53:56 +0200 | [diff] [blame] | 533 | AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 534 | return ret; |
| 535 | } |
| 536 | |
Philippe Mathieu-Daudé | 7a5f00d | 2020-10-29 10:32:51 +0100 | [diff] [blame] | 537 | /* Returns true on success, false on failure. */ |
| 538 | static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 539 | { |
Zhao Liu | ed46217 | 2024-03-11 11:37:59 +0800 | [diff] [blame] | 540 | ERRP_GUARD(); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 541 | BDRVNVMeState *s = bs->opaque; |
Philippe Mathieu-Daudé | 7a5f00d | 2020-10-29 10:32:51 +0100 | [diff] [blame] | 542 | bool ret = false; |
Philippe Mathieu-Daudé | 4a613bd | 2021-10-06 18:49:27 +0200 | [diff] [blame] | 543 | QEMU_AUTO_VFREE union { |
Philippe Mathieu-Daudé | 7d3b214 | 2020-08-21 21:53:52 +0200 | [diff] [blame] | 544 | NvmeIdCtrl ctrl; |
| 545 | NvmeIdNs ns; |
Philippe Mathieu-Daudé | 4a613bd | 2021-10-06 18:49:27 +0200 | [diff] [blame] | 546 | } *id = NULL; |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 547 | NvmeLBAF *lbaf; |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 548 | uint16_t oncs; |
Max Reitz | 1120407 | 2019-07-30 13:48:12 +0200 | [diff] [blame] | 549 | int r; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 550 | uint64_t iova; |
| 551 | NvmeCmd cmd = { |
| 552 | .opcode = NVME_ADM_CMD_IDENTIFY, |
| 553 | .cdw10 = cpu_to_le32(0x1), |
| 554 | }; |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 555 | size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size()); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 556 | |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 557 | id = qemu_try_memalign(qemu_real_host_page_size(), id_size); |
Philippe Mathieu-Daudé | 4d98093 | 2020-08-21 21:53:51 +0200 | [diff] [blame] | 558 | if (!id) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 559 | error_setg(errp, "Cannot allocate buffer for identify response"); |
| 560 | goto out; |
| 561 | } |
Philippe Mathieu-Daudé | 521b97c | 2021-09-02 09:00:20 +0200 | [diff] [blame] | 562 | r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 563 | if (r) { |
Philippe Mathieu-Daudé | 521b97c | 2021-09-02 09:00:20 +0200 | [diff] [blame] | 564 | error_prepend(errp, "Cannot map buffer for DMA: "); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 565 | goto out; |
| 566 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 567 | |
Eric Auger | 0aecd06 | 2020-10-29 10:33:00 +0100 | [diff] [blame] | 568 | memset(id, 0, id_size); |
Philippe Mathieu-Daudé | 2ed8469 | 2020-08-21 21:53:53 +0200 | [diff] [blame] | 569 | cmd.dptr.prp1 = cpu_to_le64(iova); |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 570 | if (nvme_admin_cmd_sync(bs, &cmd)) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 571 | error_setg(errp, "Failed to identify controller"); |
| 572 | goto out; |
| 573 | } |
| 574 | |
Philippe Mathieu-Daudé | 7d3b214 | 2020-08-21 21:53:52 +0200 | [diff] [blame] | 575 | if (le32_to_cpu(id->ctrl.nn) < namespace) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 576 | error_setg(errp, "Invalid namespace"); |
| 577 | goto out; |
| 578 | } |
Philippe Mathieu-Daudé | 7d3b214 | 2020-08-21 21:53:52 +0200 | [diff] [blame] | 579 | s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1; |
| 580 | s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 581 | /* For now the page list buffer per command is one page, to hold at most |
| 582 | * s->page_size / sizeof(uint64_t) entries. */ |
| 583 | s->max_transfer = MIN_NON_ZERO(s->max_transfer, |
| 584 | s->page_size / sizeof(uint64_t) * s->page_size); |
| 585 | |
Philippe Mathieu-Daudé | 7d3b214 | 2020-08-21 21:53:52 +0200 | [diff] [blame] | 586 | oncs = le16_to_cpu(id->ctrl.oncs); |
Klaus Jensen | 6926515 | 2020-03-30 23:10:13 +0200 | [diff] [blame] | 587 | s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES); |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 588 | s->supports_discard = !!(oncs & NVME_ONCS_DSM); |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 589 | |
Eric Auger | 0aecd06 | 2020-10-29 10:33:00 +0100 | [diff] [blame] | 590 | memset(id, 0, id_size); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 591 | cmd.cdw10 = 0; |
| 592 | cmd.nsid = cpu_to_le32(namespace); |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 593 | if (nvme_admin_cmd_sync(bs, &cmd)) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 594 | error_setg(errp, "Failed to identify namespace"); |
| 595 | goto out; |
| 596 | } |
| 597 | |
Philippe Mathieu-Daudé | 7d3b214 | 2020-08-21 21:53:52 +0200 | [diff] [blame] | 598 | s->nsze = le64_to_cpu(id->ns.nsze); |
| 599 | lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)]; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 600 | |
Philippe Mathieu-Daudé | 7d3b214 | 2020-08-21 21:53:52 +0200 | [diff] [blame] | 601 | if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) && |
| 602 | NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) == |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 603 | NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) { |
| 604 | bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP; |
| 605 | } |
| 606 | |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 607 | if (lbaf->ms) { |
| 608 | error_setg(errp, "Namespaces with metadata are not yet supported"); |
| 609 | goto out; |
| 610 | } |
| 611 | |
Max Reitz | 1120407 | 2019-07-30 13:48:12 +0200 | [diff] [blame] | 612 | if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 || |
| 613 | (1 << lbaf->ds) > s->page_size) |
| 614 | { |
| 615 | error_setg(errp, "Namespace has unsupported block size (2^%d)", |
| 616 | lbaf->ds); |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 617 | goto out; |
| 618 | } |
| 619 | |
Philippe Mathieu-Daudé | 7a5f00d | 2020-10-29 10:32:51 +0100 | [diff] [blame] | 620 | ret = true; |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 621 | s->blkshift = lbaf->ds; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 622 | out: |
Philippe Mathieu-Daudé | 4d98093 | 2020-08-21 21:53:51 +0200 | [diff] [blame] | 623 | qemu_vfio_dma_unmap(s->vfio, id); |
Philippe Mathieu-Daudé | 7a5f00d | 2020-10-29 10:32:51 +0100 | [diff] [blame] | 624 | |
| 625 | return ret; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 626 | } |
| 627 | |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 628 | static void nvme_poll_queue(NVMeQueuePair *q) |
Philippe Mathieu-Daudé | 7a1fb2e | 2020-08-21 21:53:58 +0200 | [diff] [blame] | 629 | { |
Philippe Mathieu-Daudé | 7a1fb2e | 2020-08-21 21:53:58 +0200 | [diff] [blame] | 630 | const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; |
| 631 | NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; |
| 632 | |
Philippe Mathieu-Daudé | 1c914cd | 2020-10-29 10:32:46 +0100 | [diff] [blame] | 633 | trace_nvme_poll_queue(q->s, q->index); |
Philippe Mathieu-Daudé | 7a1fb2e | 2020-08-21 21:53:58 +0200 | [diff] [blame] | 634 | /* |
| 635 | * Do an early check for completions. q->lock isn't needed because |
| 636 | * nvme_process_completion() only runs in the event loop thread and |
| 637 | * cannot race with itself. |
| 638 | */ |
| 639 | if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) { |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 640 | return; |
Philippe Mathieu-Daudé | 7a1fb2e | 2020-08-21 21:53:58 +0200 | [diff] [blame] | 641 | } |
| 642 | |
| 643 | qemu_mutex_lock(&q->lock); |
| 644 | while (nvme_process_completion(q)) { |
| 645 | /* Keep polling */ |
Philippe Mathieu-Daudé | 7a1fb2e | 2020-08-21 21:53:58 +0200 | [diff] [blame] | 646 | } |
| 647 | qemu_mutex_unlock(&q->lock); |
Philippe Mathieu-Daudé | 7a1fb2e | 2020-08-21 21:53:58 +0200 | [diff] [blame] | 648 | } |
| 649 | |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 650 | static void nvme_poll_queues(BDRVNVMeState *s) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 651 | { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 652 | int i; |
| 653 | |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 654 | for (i = 0; i < s->queue_count; i++) { |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 655 | nvme_poll_queue(s->queues[i]); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 656 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | static void nvme_handle_event(EventNotifier *n) |
| 660 | { |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 661 | BDRVNVMeState *s = container_of(n, BDRVNVMeState, |
| 662 | irq_notifier[MSIX_SHARED_IRQ_IDX]); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 663 | |
| 664 | trace_nvme_handle_event(s); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 665 | event_notifier_test_and_clear(n); |
| 666 | nvme_poll_queues(s); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp) |
| 670 | { |
| 671 | BDRVNVMeState *s = bs->opaque; |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 672 | unsigned n = s->queue_count; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 673 | NVMeQueuePair *q; |
| 674 | NvmeCmd cmd; |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 675 | unsigned queue_size = NVME_QUEUE_SIZE; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 676 | |
Philippe Mathieu-Daudé | 76a2478 | 2020-10-29 10:32:54 +0100 | [diff] [blame] | 677 | assert(n <= UINT16_MAX); |
Philippe Mathieu-Daudé | 0a28b02 | 2020-08-21 21:53:57 +0200 | [diff] [blame] | 678 | q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs), |
| 679 | n, queue_size, errp); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 680 | if (!q) { |
| 681 | return false; |
| 682 | } |
| 683 | cmd = (NvmeCmd) { |
| 684 | .opcode = NVME_ADM_CMD_CREATE_CQ, |
Klaus Jensen | c26f217 | 2020-07-06 08:12:46 +0200 | [diff] [blame] | 685 | .dptr.prp1 = cpu_to_le64(q->cq.iova), |
Philippe Mathieu-Daudé | 76a2478 | 2020-10-29 10:32:54 +0100 | [diff] [blame] | 686 | .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n), |
| 687 | .cdw11 = cpu_to_le32(NVME_CQ_IEN | NVME_CQ_PC), |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 688 | }; |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 689 | if (nvme_admin_cmd_sync(bs, &cmd)) { |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 690 | error_setg(errp, "Failed to create CQ io queue [%u]", n); |
Philippe Mathieu-Daudé | c8edbfb | 2020-08-21 21:53:50 +0200 | [diff] [blame] | 691 | goto out_error; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 692 | } |
| 693 | cmd = (NvmeCmd) { |
| 694 | .opcode = NVME_ADM_CMD_CREATE_SQ, |
Klaus Jensen | c26f217 | 2020-07-06 08:12:46 +0200 | [diff] [blame] | 695 | .dptr.prp1 = cpu_to_le64(q->sq.iova), |
Philippe Mathieu-Daudé | 76a2478 | 2020-10-29 10:32:54 +0100 | [diff] [blame] | 696 | .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n), |
| 697 | .cdw11 = cpu_to_le32(NVME_SQ_PC | (n << 16)), |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 698 | }; |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 699 | if (nvme_admin_cmd_sync(bs, &cmd)) { |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 700 | error_setg(errp, "Failed to create SQ io queue [%u]", n); |
Philippe Mathieu-Daudé | c8edbfb | 2020-08-21 21:53:50 +0200 | [diff] [blame] | 701 | goto out_error; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 702 | } |
| 703 | s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1); |
| 704 | s->queues[n] = q; |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 705 | s->queue_count++; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 706 | return true; |
Philippe Mathieu-Daudé | c8edbfb | 2020-08-21 21:53:50 +0200 | [diff] [blame] | 707 | out_error: |
| 708 | nvme_free_queue_pair(q); |
| 709 | return false; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | static bool nvme_poll_cb(void *opaque) |
| 713 | { |
| 714 | EventNotifier *e = opaque; |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 715 | BDRVNVMeState *s = container_of(e, BDRVNVMeState, |
| 716 | irq_notifier[MSIX_SHARED_IRQ_IDX]); |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 717 | int i; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 718 | |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 719 | for (i = 0; i < s->queue_count; i++) { |
| 720 | NVMeQueuePair *q = s->queues[i]; |
| 721 | const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; |
| 722 | NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; |
| 723 | |
| 724 | /* |
| 725 | * q->lock isn't needed because nvme_process_completion() only runs in |
| 726 | * the event loop thread and cannot race with itself. |
| 727 | */ |
| 728 | if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) { |
| 729 | return true; |
| 730 | } |
| 731 | } |
| 732 | return false; |
| 733 | } |
| 734 | |
| 735 | static void nvme_poll_ready(EventNotifier *e) |
| 736 | { |
| 737 | BDRVNVMeState *s = container_of(e, BDRVNVMeState, |
| 738 | irq_notifier[MSIX_SHARED_IRQ_IDX]); |
| 739 | |
| 740 | nvme_poll_queues(s); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 741 | } |
| 742 | |
| 743 | static int nvme_init(BlockDriverState *bs, const char *device, int namespace, |
| 744 | Error **errp) |
| 745 | { |
| 746 | BDRVNVMeState *s = bs->opaque; |
Philippe Mathieu-Daudé | 52b75ea | 2020-10-29 10:32:56 +0100 | [diff] [blame] | 747 | NVMeQueuePair *q; |
Philippe Mathieu-Daudé | 0a28b02 | 2020-08-21 21:53:57 +0200 | [diff] [blame] | 748 | AioContext *aio_context = bdrv_get_aio_context(bs); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 749 | int ret; |
| 750 | uint64_t cap; |
Philippe Mathieu-Daudé | fcc8672 | 2021-01-27 22:21:37 +0100 | [diff] [blame] | 751 | uint32_t ver; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 752 | uint64_t timeout_ms; |
| 753 | uint64_t deadline, now; |
Philippe Mathieu-Daudé | 9406e0d | 2020-09-22 10:38:19 +0200 | [diff] [blame] | 754 | volatile NvmeBar *regs = NULL; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 755 | |
| 756 | qemu_co_mutex_init(&s->dma_map_lock); |
| 757 | qemu_co_queue_init(&s->dma_flush_queue); |
Max Reitz | cc61b07 | 2019-02-01 20:29:30 +0100 | [diff] [blame] | 758 | s->device = g_strdup(device); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 759 | s->nsid = namespace; |
| 760 | s->aio_context = bdrv_get_aio_context(bs); |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 761 | ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 762 | if (ret) { |
| 763 | error_setg(errp, "Failed to init event notifier"); |
| 764 | return ret; |
| 765 | } |
| 766 | |
| 767 | s->vfio = qemu_vfio_open_pci(device, errp); |
| 768 | if (!s->vfio) { |
| 769 | ret = -EINVAL; |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 770 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 771 | } |
| 772 | |
Philippe Mathieu-Daudé | 37d7a45 | 2020-09-22 10:38:18 +0200 | [diff] [blame] | 773 | regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar), |
| 774 | PROT_READ | PROT_WRITE, errp); |
| 775 | if (!regs) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 776 | ret = -EINVAL; |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 777 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 778 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 779 | /* Perform initialize sequence as described in NVMe spec "7.6.1 |
| 780 | * Initialization". */ |
| 781 | |
Philippe Mathieu-Daudé | 9406e0d | 2020-09-22 10:38:19 +0200 | [diff] [blame] | 782 | cap = le64_to_cpu(regs->cap); |
Philippe Mathieu-Daudé | 15b2260 | 2020-10-29 10:32:45 +0100 | [diff] [blame] | 783 | trace_nvme_controller_capability_raw(cap); |
| 784 | trace_nvme_controller_capability("Maximum Queue Entries Supported", |
| 785 | 1 + NVME_CAP_MQES(cap)); |
| 786 | trace_nvme_controller_capability("Contiguous Queues Required", |
| 787 | NVME_CAP_CQR(cap)); |
| 788 | trace_nvme_controller_capability("Doorbell Stride", |
Philippe Mathieu-Daudé | 97b709f | 2021-01-27 22:21:36 +0100 | [diff] [blame] | 789 | 1 << (2 + NVME_CAP_DSTRD(cap))); |
Philippe Mathieu-Daudé | 15b2260 | 2020-10-29 10:32:45 +0100 | [diff] [blame] | 790 | trace_nvme_controller_capability("Subsystem Reset Supported", |
| 791 | NVME_CAP_NSSRS(cap)); |
| 792 | trace_nvme_controller_capability("Memory Page Size Minimum", |
| 793 | 1 << (12 + NVME_CAP_MPSMIN(cap))); |
| 794 | trace_nvme_controller_capability("Memory Page Size Maximum", |
| 795 | 1 << (12 + NVME_CAP_MPSMAX(cap))); |
Philippe Mathieu-Daudé | fad1eb6 | 2020-09-22 10:38:20 +0200 | [diff] [blame] | 796 | if (!NVME_CAP_CSS(cap)) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 797 | error_setg(errp, "Device doesn't support NVMe command set"); |
| 798 | ret = -EINVAL; |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 799 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 800 | } |
| 801 | |
Philippe Mathieu-Daudé | a652a3e | 2020-10-29 10:32:59 +0100 | [diff] [blame] | 802 | s->page_size = 1u << (12 + NVME_CAP_MPSMIN(cap)); |
Philippe Mathieu-Daudé | fad1eb6 | 2020-09-22 10:38:20 +0200 | [diff] [blame] | 803 | s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 804 | bs->bl.opt_mem_alignment = s->page_size; |
Philippe Mathieu-Daudé | c8228ac | 2020-10-29 10:32:58 +0100 | [diff] [blame] | 805 | bs->bl.request_alignment = s->page_size; |
Philippe Mathieu-Daudé | fad1eb6 | 2020-09-22 10:38:20 +0200 | [diff] [blame] | 806 | timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 807 | |
Philippe Mathieu-Daudé | fcc8672 | 2021-01-27 22:21:37 +0100 | [diff] [blame] | 808 | ver = le32_to_cpu(regs->vs); |
| 809 | trace_nvme_controller_spec_version(extract32(ver, 16, 16), |
| 810 | extract32(ver, 8, 8), |
| 811 | extract32(ver, 0, 8)); |
| 812 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 813 | /* Reset device to get a clean state. */ |
Philippe Mathieu-Daudé | 9406e0d | 2020-09-22 10:38:19 +0200 | [diff] [blame] | 814 | regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 815 | /* Wait for CSTS.RDY = 0. */ |
Philippe Mathieu-Daudé | e4f310f | 2020-08-21 21:53:45 +0200 | [diff] [blame] | 816 | deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS; |
Philippe Mathieu-Daudé | fad1eb6 | 2020-09-22 10:38:20 +0200 | [diff] [blame] | 817 | while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 818 | if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { |
| 819 | error_setg(errp, "Timeout while waiting for device to reset (%" |
| 820 | PRId64 " ms)", |
| 821 | timeout_ms); |
| 822 | ret = -ETIMEDOUT; |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 823 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 824 | } |
| 825 | } |
| 826 | |
Philippe Mathieu-Daudé | 4b19e9b | 2020-10-29 10:33:04 +0100 | [diff] [blame] | 827 | s->bar0_wo_map = qemu_vfio_pci_map_bar(s->vfio, 0, 0, |
| 828 | sizeof(NvmeBar) + NVME_DOORBELL_SIZE, |
| 829 | PROT_WRITE, errp); |
| 830 | s->doorbells = (void *)((uintptr_t)s->bar0_wo_map + sizeof(NvmeBar)); |
Philippe Mathieu-Daudé | f684532 | 2020-09-22 10:38:17 +0200 | [diff] [blame] | 831 | if (!s->doorbells) { |
| 832 | ret = -EINVAL; |
| 833 | goto out; |
| 834 | } |
| 835 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 836 | /* Set up admin queue. */ |
| 837 | s->queues = g_new(NVMeQueuePair *, 1); |
Philippe Mathieu-Daudé | 52b75ea | 2020-10-29 10:32:56 +0100 | [diff] [blame] | 838 | q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp); |
| 839 | if (!q) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 840 | ret = -EINVAL; |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 841 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 842 | } |
Philippe Mathieu-Daudé | 52b75ea | 2020-10-29 10:32:56 +0100 | [diff] [blame] | 843 | s->queues[INDEX_ADMIN] = q; |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 844 | s->queue_count = 1; |
Philippe Mathieu-Daudé | 3c363c0 | 2020-10-29 10:32:55 +0100 | [diff] [blame] | 845 | QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000); |
| 846 | regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) | |
| 847 | ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT)); |
Philippe Mathieu-Daudé | 52b75ea | 2020-10-29 10:32:56 +0100 | [diff] [blame] | 848 | regs->asq = cpu_to_le64(q->sq.iova); |
| 849 | regs->acq = cpu_to_le64(q->cq.iova); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 850 | |
| 851 | /* After setting up all control registers we can enable device now. */ |
Philippe Mathieu-Daudé | fad1eb6 | 2020-09-22 10:38:20 +0200 | [diff] [blame] | 852 | regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) | |
| 853 | (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) | |
| 854 | CC_EN_MASK); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 855 | /* Wait for CSTS.RDY = 1. */ |
| 856 | now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
Philippe Mathieu-Daudé | eefffb0 | 2020-09-22 10:38:21 +0200 | [diff] [blame] | 857 | deadline = now + timeout_ms * SCALE_MS; |
Philippe Mathieu-Daudé | fad1eb6 | 2020-09-22 10:38:20 +0200 | [diff] [blame] | 858 | while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 859 | if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { |
| 860 | error_setg(errp, "Timeout while waiting for device to start (%" |
| 861 | PRId64 " ms)", |
| 862 | timeout_ms); |
| 863 | ret = -ETIMEDOUT; |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 864 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 865 | } |
| 866 | } |
| 867 | |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 868 | ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier, |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 869 | VFIO_PCI_MSIX_IRQ_INDEX, errp); |
| 870 | if (ret) { |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 871 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 872 | } |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 873 | aio_set_event_notifier(bdrv_get_aio_context(bs), |
| 874 | &s->irq_notifier[MSIX_SHARED_IRQ_IDX], |
Stefan Hajnoczi | 60f782b | 2023-05-16 15:02:38 -0400 | [diff] [blame] | 875 | nvme_handle_event, nvme_poll_cb, |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 876 | nvme_poll_ready); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 877 | |
Philippe Mathieu-Daudé | 7a5f00d | 2020-10-29 10:32:51 +0100 | [diff] [blame] | 878 | if (!nvme_identify(bs, namespace, errp)) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 879 | ret = -EIO; |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 880 | goto out; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 881 | } |
| 882 | |
| 883 | /* Set up command queues. */ |
| 884 | if (!nvme_add_io_queue(bs, errp)) { |
| 885 | ret = -EIO; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 886 | } |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 887 | out: |
Philippe Mathieu-Daudé | 37d7a45 | 2020-09-22 10:38:18 +0200 | [diff] [blame] | 888 | if (regs) { |
| 889 | qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar)); |
| 890 | } |
| 891 | |
Paolo Bonzini | d656aaa | 2023-09-04 12:07:19 +0200 | [diff] [blame] | 892 | /* Cleaning up is done in nvme_open() upon error. */ |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 893 | return ret; |
| 894 | } |
| 895 | |
| 896 | /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example: |
| 897 | * |
| 898 | * nvme://0000:44:00.0/1 |
| 899 | * |
| 900 | * where the "nvme://" is a fixed form of the protocol prefix, the middle part |
| 901 | * is the PCI address, and the last part is the namespace number starting from |
| 902 | * 1 according to the NVMe spec. */ |
| 903 | static void nvme_parse_filename(const char *filename, QDict *options, |
| 904 | Error **errp) |
| 905 | { |
| 906 | int pref = strlen("nvme://"); |
| 907 | |
| 908 | if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) { |
| 909 | const char *tmp = filename + pref; |
| 910 | char *device; |
| 911 | const char *namespace; |
| 912 | unsigned long ns; |
| 913 | const char *slash = strchr(tmp, '/'); |
| 914 | if (!slash) { |
Laurent Vivier | 625eaca | 2018-03-23 15:32:01 +0100 | [diff] [blame] | 915 | qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 916 | return; |
| 917 | } |
| 918 | device = g_strndup(tmp, slash - tmp); |
Laurent Vivier | 625eaca | 2018-03-23 15:32:01 +0100 | [diff] [blame] | 919 | qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 920 | g_free(device); |
| 921 | namespace = slash + 1; |
| 922 | if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) { |
| 923 | error_setg(errp, "Invalid namespace '%s', positive number expected", |
| 924 | namespace); |
| 925 | return; |
| 926 | } |
Laurent Vivier | 625eaca | 2018-03-23 15:32:01 +0100 | [diff] [blame] | 927 | qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE, |
| 928 | *namespace ? namespace : "1"); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 929 | } |
| 930 | } |
| 931 | |
| 932 | static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable, |
| 933 | Error **errp) |
| 934 | { |
| 935 | int ret; |
| 936 | BDRVNVMeState *s = bs->opaque; |
| 937 | NvmeCmd cmd = { |
| 938 | .opcode = NVME_ADM_CMD_SET_FEATURES, |
| 939 | .nsid = cpu_to_le32(s->nsid), |
| 940 | .cdw10 = cpu_to_le32(0x06), |
| 941 | .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00), |
| 942 | }; |
| 943 | |
Philippe Mathieu-Daudé | 08d5406 | 2020-10-29 10:32:57 +0100 | [diff] [blame] | 944 | ret = nvme_admin_cmd_sync(bs, &cmd); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 945 | if (ret) { |
| 946 | error_setg(errp, "Failed to configure NVMe write cache"); |
| 947 | } |
| 948 | return ret; |
| 949 | } |
| 950 | |
| 951 | static void nvme_close(BlockDriverState *bs) |
| 952 | { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 953 | BDRVNVMeState *s = bs->opaque; |
| 954 | |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 955 | for (unsigned i = 0; i < s->queue_count; ++i) { |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 956 | nvme_free_queue_pair(s->queues[i]); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 957 | } |
Fam Zheng | 9582f35 | 2018-07-12 10:54:20 +0800 | [diff] [blame] | 958 | g_free(s->queues); |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 959 | aio_set_event_notifier(bdrv_get_aio_context(bs), |
| 960 | &s->irq_notifier[MSIX_SHARED_IRQ_IDX], |
Stefan Hajnoczi | 60f782b | 2023-05-16 15:02:38 -0400 | [diff] [blame] | 961 | NULL, NULL, NULL); |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 962 | event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]); |
Philippe Mathieu-Daudé | 4b19e9b | 2020-10-29 10:33:04 +0100 | [diff] [blame] | 963 | qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map, |
| 964 | 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 965 | qemu_vfio_close(s->vfio); |
Max Reitz | cc61b07 | 2019-02-01 20:29:30 +0100 | [diff] [blame] | 966 | |
| 967 | g_free(s->device); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 968 | } |
| 969 | |
Paolo Bonzini | d656aaa | 2023-09-04 12:07:19 +0200 | [diff] [blame] | 970 | static int nvme_open(BlockDriverState *bs, QDict *options, int flags, |
| 971 | Error **errp) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 972 | { |
| 973 | const char *device; |
| 974 | QemuOpts *opts; |
| 975 | int namespace; |
| 976 | int ret; |
| 977 | BDRVNVMeState *s = bs->opaque; |
| 978 | |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 979 | bs->supported_write_flags = BDRV_REQ_FUA; |
| 980 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 981 | opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); |
| 982 | qemu_opts_absorb_qdict(opts, options, &error_abort); |
| 983 | device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE); |
| 984 | if (!device) { |
| 985 | error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required"); |
| 986 | qemu_opts_del(opts); |
| 987 | return -EINVAL; |
| 988 | } |
| 989 | |
| 990 | namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1); |
| 991 | ret = nvme_init(bs, device, namespace, errp); |
| 992 | qemu_opts_del(opts); |
| 993 | if (ret) { |
| 994 | goto fail; |
| 995 | } |
| 996 | if (flags & BDRV_O_NOCACHE) { |
| 997 | if (!s->write_cache_supported) { |
| 998 | error_setg(errp, |
| 999 | "NVMe controller doesn't support write cache configuration"); |
| 1000 | ret = -EINVAL; |
| 1001 | } else { |
| 1002 | ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE), |
| 1003 | errp); |
| 1004 | } |
| 1005 | if (ret) { |
| 1006 | goto fail; |
| 1007 | } |
| 1008 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1009 | return 0; |
| 1010 | fail: |
| 1011 | nvme_close(bs); |
| 1012 | return ret; |
| 1013 | } |
| 1014 | |
Emanuele Giuseppe Esposito | c86422c | 2023-01-13 21:42:04 +0100 | [diff] [blame] | 1015 | static int64_t coroutine_fn nvme_co_getlength(BlockDriverState *bs) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1016 | { |
| 1017 | BDRVNVMeState *s = bs->opaque; |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 1018 | return s->nsze << s->blkshift; |
| 1019 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1020 | |
Max Reitz | 1120407 | 2019-07-30 13:48:12 +0200 | [diff] [blame] | 1021 | static uint32_t nvme_get_blocksize(BlockDriverState *bs) |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 1022 | { |
| 1023 | BDRVNVMeState *s = bs->opaque; |
Max Reitz | 1120407 | 2019-07-30 13:48:12 +0200 | [diff] [blame] | 1024 | assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12); |
| 1025 | return UINT32_C(1) << s->blkshift; |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 1026 | } |
| 1027 | |
| 1028 | static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) |
| 1029 | { |
Max Reitz | 1120407 | 2019-07-30 13:48:12 +0200 | [diff] [blame] | 1030 | uint32_t blocksize = nvme_get_blocksize(bs); |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 1031 | bsz->phys = blocksize; |
| 1032 | bsz->log = blocksize; |
| 1033 | return 0; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1034 | } |
| 1035 | |
| 1036 | /* Called with s->dma_map_lock */ |
| 1037 | static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs, |
| 1038 | QEMUIOVector *qiov) |
| 1039 | { |
| 1040 | int r = 0; |
| 1041 | BDRVNVMeState *s = bs->opaque; |
| 1042 | |
| 1043 | s->dma_map_count -= qiov->size; |
| 1044 | if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) { |
| 1045 | r = qemu_vfio_dma_reset_temporary(s->vfio); |
| 1046 | if (!r) { |
| 1047 | qemu_co_queue_restart_all(&s->dma_flush_queue); |
| 1048 | } |
| 1049 | } |
| 1050 | return r; |
| 1051 | } |
| 1052 | |
| 1053 | /* Called with s->dma_map_lock */ |
| 1054 | static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd, |
| 1055 | NVMeRequest *req, QEMUIOVector *qiov) |
| 1056 | { |
| 1057 | BDRVNVMeState *s = bs->opaque; |
| 1058 | uint64_t *pagelist = req->prp_list_page; |
| 1059 | int i, j, r; |
| 1060 | int entries = 0; |
Philippe Mathieu-Daudé | 9bd2788 | 2021-09-02 09:00:25 +0200 | [diff] [blame] | 1061 | Error *local_err = NULL, **errp = NULL; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1062 | |
| 1063 | assert(qiov->size); |
| 1064 | assert(QEMU_IS_ALIGNED(qiov->size, s->page_size)); |
| 1065 | assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t)); |
| 1066 | for (i = 0; i < qiov->niov; ++i) { |
| 1067 | bool retry = true; |
| 1068 | uint64_t iova; |
Eric Auger | 9e13d59 | 2020-10-29 10:33:03 +0100 | [diff] [blame] | 1069 | size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len, |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 1070 | qemu_real_host_page_size()); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1071 | try_map: |
| 1072 | r = qemu_vfio_dma_map(s->vfio, |
| 1073 | qiov->iov[i].iov_base, |
Philippe Mathieu-Daudé | 9bd2788 | 2021-09-02 09:00:25 +0200 | [diff] [blame] | 1074 | len, true, &iova, errp); |
Philippe Mathieu-Daudé | 15a730e | 2021-07-23 21:58:43 +0200 | [diff] [blame] | 1075 | if (r == -ENOSPC) { |
| 1076 | /* |
| 1077 | * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA |
| 1078 | * ioctl returns -ENOSPC to signal the user exhausted the DMA |
| 1079 | * mappings available for a container since Linux kernel commit |
| 1080 | * 492855939bdb ("vfio/type1: Limit DMA mappings per container", |
| 1081 | * April 2019, see CVE-2019-3882). |
| 1082 | * |
| 1083 | * This block driver already handles this error path by checking |
| 1084 | * for the -ENOMEM error, so we directly replace -ENOSPC by |
| 1085 | * -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev |
| 1086 | * coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and |
| 1087 | * BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator |
| 1088 | * to add more storage to the blockdev. Not something we can do |
| 1089 | * easily with an IOMMU :) |
| 1090 | */ |
| 1091 | r = -ENOMEM; |
| 1092 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1093 | if (r == -ENOMEM && retry) { |
Philippe Mathieu-Daudé | 15a730e | 2021-07-23 21:58:43 +0200 | [diff] [blame] | 1094 | /* |
| 1095 | * We exhausted the DMA mappings available for our container: |
| 1096 | * recycle the volatile IOVA mappings. |
| 1097 | */ |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1098 | retry = false; |
| 1099 | trace_nvme_dma_flush_queue_wait(s); |
| 1100 | if (s->dma_map_count) { |
| 1101 | trace_nvme_dma_map_flush(s); |
| 1102 | qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock); |
| 1103 | } else { |
| 1104 | r = qemu_vfio_dma_reset_temporary(s->vfio); |
| 1105 | if (r) { |
| 1106 | goto fail; |
| 1107 | } |
| 1108 | } |
Philippe Mathieu-Daudé | 9bd2788 | 2021-09-02 09:00:25 +0200 | [diff] [blame] | 1109 | errp = &local_err; |
| 1110 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1111 | goto try_map; |
| 1112 | } |
| 1113 | if (r) { |
| 1114 | goto fail; |
| 1115 | } |
| 1116 | |
| 1117 | for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) { |
Li Feng | 2916405 | 2018-11-01 18:38:07 +0800 | [diff] [blame] | 1118 | pagelist[entries++] = cpu_to_le64(iova + j * s->page_size); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1119 | } |
| 1120 | trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base, |
| 1121 | qiov->iov[i].iov_len / s->page_size); |
| 1122 | } |
| 1123 | |
| 1124 | s->dma_map_count += qiov->size; |
| 1125 | |
| 1126 | assert(entries <= s->page_size / sizeof(uint64_t)); |
| 1127 | switch (entries) { |
| 1128 | case 0: |
| 1129 | abort(); |
| 1130 | case 1: |
Klaus Jensen | c26f217 | 2020-07-06 08:12:46 +0200 | [diff] [blame] | 1131 | cmd->dptr.prp1 = pagelist[0]; |
| 1132 | cmd->dptr.prp2 = 0; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1133 | break; |
| 1134 | case 2: |
Klaus Jensen | c26f217 | 2020-07-06 08:12:46 +0200 | [diff] [blame] | 1135 | cmd->dptr.prp1 = pagelist[0]; |
| 1136 | cmd->dptr.prp2 = pagelist[1]; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1137 | break; |
| 1138 | default: |
Klaus Jensen | c26f217 | 2020-07-06 08:12:46 +0200 | [diff] [blame] | 1139 | cmd->dptr.prp1 = pagelist[0]; |
| 1140 | cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t)); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1141 | break; |
| 1142 | } |
| 1143 | trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries); |
| 1144 | for (i = 0; i < entries; ++i) { |
| 1145 | trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]); |
| 1146 | } |
| 1147 | return 0; |
| 1148 | fail: |
| 1149 | /* No need to unmap [0 - i) iovs even if we've failed, since we don't |
| 1150 | * increment s->dma_map_count. This is okay for fixed mapping memory areas |
| 1151 | * because they are already mapped before calling this function; for |
| 1152 | * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by |
| 1153 | * calling qemu_vfio_dma_reset_temporary when necessary. */ |
Philippe Mathieu-Daudé | 9bd2788 | 2021-09-02 09:00:25 +0200 | [diff] [blame] | 1154 | if (local_err) { |
| 1155 | error_reportf_err(local_err, "Cannot map buffer for DMA: "); |
| 1156 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1157 | return r; |
| 1158 | } |
| 1159 | |
| 1160 | typedef struct { |
| 1161 | Coroutine *co; |
| 1162 | int ret; |
| 1163 | AioContext *ctx; |
| 1164 | } NVMeCoData; |
| 1165 | |
| 1166 | static void nvme_rw_cb_bh(void *opaque) |
| 1167 | { |
| 1168 | NVMeCoData *data = opaque; |
| 1169 | qemu_coroutine_enter(data->co); |
| 1170 | } |
| 1171 | |
| 1172 | static void nvme_rw_cb(void *opaque, int ret) |
| 1173 | { |
| 1174 | NVMeCoData *data = opaque; |
| 1175 | data->ret = ret; |
| 1176 | if (!data->co) { |
| 1177 | /* The rw coroutine hasn't yielded, don't try to enter. */ |
| 1178 | return; |
| 1179 | } |
Pavel Dovgalyuk | e4ec5ad | 2019-09-17 14:58:19 +0300 | [diff] [blame] | 1180 | replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1181 | } |
| 1182 | |
| 1183 | static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, |
| 1184 | uint64_t offset, uint64_t bytes, |
| 1185 | QEMUIOVector *qiov, |
| 1186 | bool is_write, |
| 1187 | int flags) |
| 1188 | { |
| 1189 | int r; |
| 1190 | BDRVNVMeState *s = bs->opaque; |
Philippe Mathieu-Daudé | 73159e5 | 2020-08-21 21:53:48 +0200 | [diff] [blame] | 1191 | NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1192 | NVMeRequest *req; |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 1193 | |
| 1194 | uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1195 | (flags & BDRV_REQ_FUA ? 1 << 30 : 0); |
| 1196 | NvmeCmd cmd = { |
| 1197 | .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ, |
| 1198 | .nsid = cpu_to_le32(s->nsid), |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 1199 | .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF), |
| 1200 | .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF), |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1201 | .cdw12 = cpu_to_le32(cdw12), |
| 1202 | }; |
| 1203 | NVMeCoData data = { |
| 1204 | .ctx = bdrv_get_aio_context(bs), |
| 1205 | .ret = -EINPROGRESS, |
| 1206 | }; |
| 1207 | |
| 1208 | trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov); |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 1209 | assert(s->queue_count > 1); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1210 | req = nvme_get_free_req(ioq); |
| 1211 | assert(req); |
| 1212 | |
| 1213 | qemu_co_mutex_lock(&s->dma_map_lock); |
| 1214 | r = nvme_cmd_map_qiov(bs, &cmd, req, qiov); |
| 1215 | qemu_co_mutex_unlock(&s->dma_map_lock); |
| 1216 | if (r) { |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 1217 | nvme_put_free_req_and_wake(ioq, req); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1218 | return r; |
| 1219 | } |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 1220 | nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1221 | |
| 1222 | data.co = qemu_coroutine_self(); |
| 1223 | while (data.ret == -EINPROGRESS) { |
| 1224 | qemu_coroutine_yield(); |
| 1225 | } |
| 1226 | |
| 1227 | qemu_co_mutex_lock(&s->dma_map_lock); |
| 1228 | r = nvme_cmd_unmap_qiov(bs, qiov); |
| 1229 | qemu_co_mutex_unlock(&s->dma_map_lock); |
| 1230 | if (r) { |
| 1231 | return r; |
| 1232 | } |
| 1233 | |
| 1234 | trace_nvme_rw_done(s, is_write, offset, bytes, data.ret); |
| 1235 | return data.ret; |
| 1236 | } |
| 1237 | |
| 1238 | static inline bool nvme_qiov_aligned(BlockDriverState *bs, |
| 1239 | const QEMUIOVector *qiov) |
| 1240 | { |
| 1241 | int i; |
| 1242 | BDRVNVMeState *s = bs->opaque; |
| 1243 | |
| 1244 | for (i = 0; i < qiov->niov; ++i) { |
Eric Auger | 9e13d59 | 2020-10-29 10:33:03 +0100 | [diff] [blame] | 1245 | if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 1246 | qemu_real_host_page_size()) || |
| 1247 | !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) { |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1248 | trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base, |
| 1249 | qiov->iov[i].iov_len, s->page_size); |
| 1250 | return false; |
| 1251 | } |
| 1252 | } |
| 1253 | return true; |
| 1254 | } |
| 1255 | |
Paolo Bonzini | 711b12e | 2022-09-22 10:49:10 +0200 | [diff] [blame] | 1256 | static coroutine_fn int nvme_co_prw(BlockDriverState *bs, |
| 1257 | uint64_t offset, uint64_t bytes, |
| 1258 | QEMUIOVector *qiov, bool is_write, |
| 1259 | int flags) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1260 | { |
| 1261 | BDRVNVMeState *s = bs->opaque; |
| 1262 | int r; |
Philippe Mathieu-Daudé | 4a613bd | 2021-10-06 18:49:27 +0200 | [diff] [blame] | 1263 | QEMU_AUTO_VFREE uint8_t *buf = NULL; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1264 | QEMUIOVector local_qiov; |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 1265 | size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size()); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1266 | assert(QEMU_IS_ALIGNED(offset, s->page_size)); |
| 1267 | assert(QEMU_IS_ALIGNED(bytes, s->page_size)); |
| 1268 | assert(bytes <= s->max_transfer); |
| 1269 | if (nvme_qiov_aligned(bs, qiov)) { |
Philippe Mathieu-Daudé | f25e7ab | 2020-10-01 18:29:39 +0200 | [diff] [blame] | 1270 | s->stats.aligned_accesses++; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1271 | return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags); |
| 1272 | } |
Philippe Mathieu-Daudé | f25e7ab | 2020-10-01 18:29:39 +0200 | [diff] [blame] | 1273 | s->stats.unaligned_accesses++; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1274 | trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write); |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 1275 | buf = qemu_try_memalign(qemu_real_host_page_size(), len); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1276 | |
| 1277 | if (!buf) { |
| 1278 | return -ENOMEM; |
| 1279 | } |
| 1280 | qemu_iovec_init(&local_qiov, 1); |
| 1281 | if (is_write) { |
| 1282 | qemu_iovec_to_buf(qiov, 0, buf, bytes); |
| 1283 | } |
| 1284 | qemu_iovec_add(&local_qiov, buf, bytes); |
| 1285 | r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags); |
| 1286 | qemu_iovec_destroy(&local_qiov); |
| 1287 | if (!r && !is_write) { |
| 1288 | qemu_iovec_from_buf(qiov, 0, buf, bytes); |
| 1289 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1290 | return r; |
| 1291 | } |
| 1292 | |
| 1293 | static coroutine_fn int nvme_co_preadv(BlockDriverState *bs, |
Vladimir Sementsov-Ogievskiy | f7ef38d | 2021-09-03 13:27:59 +0300 | [diff] [blame] | 1294 | int64_t offset, int64_t bytes, |
| 1295 | QEMUIOVector *qiov, |
| 1296 | BdrvRequestFlags flags) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1297 | { |
| 1298 | return nvme_co_prw(bs, offset, bytes, qiov, false, flags); |
| 1299 | } |
| 1300 | |
| 1301 | static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs, |
Vladimir Sementsov-Ogievskiy | e75abed | 2021-09-03 13:28:00 +0300 | [diff] [blame] | 1302 | int64_t offset, int64_t bytes, |
| 1303 | QEMUIOVector *qiov, |
| 1304 | BdrvRequestFlags flags) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1305 | { |
| 1306 | return nvme_co_prw(bs, offset, bytes, qiov, true, flags); |
| 1307 | } |
| 1308 | |
| 1309 | static coroutine_fn int nvme_co_flush(BlockDriverState *bs) |
| 1310 | { |
| 1311 | BDRVNVMeState *s = bs->opaque; |
Philippe Mathieu-Daudé | 73159e5 | 2020-08-21 21:53:48 +0200 | [diff] [blame] | 1312 | NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1313 | NVMeRequest *req; |
| 1314 | NvmeCmd cmd = { |
| 1315 | .opcode = NVME_CMD_FLUSH, |
| 1316 | .nsid = cpu_to_le32(s->nsid), |
| 1317 | }; |
| 1318 | NVMeCoData data = { |
| 1319 | .ctx = bdrv_get_aio_context(bs), |
| 1320 | .ret = -EINPROGRESS, |
| 1321 | }; |
| 1322 | |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 1323 | assert(s->queue_count > 1); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1324 | req = nvme_get_free_req(ioq); |
| 1325 | assert(req); |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 1326 | nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1327 | |
| 1328 | data.co = qemu_coroutine_self(); |
| 1329 | if (data.ret == -EINPROGRESS) { |
| 1330 | qemu_coroutine_yield(); |
| 1331 | } |
| 1332 | |
| 1333 | return data.ret; |
| 1334 | } |
| 1335 | |
| 1336 | |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1337 | static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, |
| 1338 | int64_t offset, |
Vladimir Sementsov-Ogievskiy | f34b2bc | 2021-09-03 13:28:03 +0300 | [diff] [blame] | 1339 | int64_t bytes, |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1340 | BdrvRequestFlags flags) |
| 1341 | { |
| 1342 | BDRVNVMeState *s = bs->opaque; |
Philippe Mathieu-Daudé | 73159e5 | 2020-08-21 21:53:48 +0200 | [diff] [blame] | 1343 | NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1344 | NVMeRequest *req; |
Vladimir Sementsov-Ogievskiy | f34b2bc | 2021-09-03 13:28:03 +0300 | [diff] [blame] | 1345 | uint32_t cdw12; |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1346 | |
| 1347 | if (!s->supports_write_zeroes) { |
| 1348 | return -ENOTSUP; |
| 1349 | } |
| 1350 | |
Vladimir Sementsov-Ogievskiy | f34b2bc | 2021-09-03 13:28:03 +0300 | [diff] [blame] | 1351 | if (bytes == 0) { |
| 1352 | return 0; |
| 1353 | } |
| 1354 | |
| 1355 | cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF; |
| 1356 | /* |
| 1357 | * We should not lose information. pwrite_zeroes_alignment and |
| 1358 | * max_pwrite_zeroes guarantees it. |
| 1359 | */ |
| 1360 | assert(((cdw12 + 1) << s->blkshift) == bytes); |
| 1361 | |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1362 | NvmeCmd cmd = { |
Klaus Jensen | 6926515 | 2020-03-30 23:10:13 +0200 | [diff] [blame] | 1363 | .opcode = NVME_CMD_WRITE_ZEROES, |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1364 | .nsid = cpu_to_le32(s->nsid), |
| 1365 | .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF), |
| 1366 | .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF), |
| 1367 | }; |
| 1368 | |
| 1369 | NVMeCoData data = { |
| 1370 | .ctx = bdrv_get_aio_context(bs), |
| 1371 | .ret = -EINPROGRESS, |
| 1372 | }; |
| 1373 | |
| 1374 | if (flags & BDRV_REQ_MAY_UNMAP) { |
| 1375 | cdw12 |= (1 << 25); |
| 1376 | } |
| 1377 | |
| 1378 | if (flags & BDRV_REQ_FUA) { |
| 1379 | cdw12 |= (1 << 30); |
| 1380 | } |
| 1381 | |
| 1382 | cmd.cdw12 = cpu_to_le32(cdw12); |
| 1383 | |
| 1384 | trace_nvme_write_zeroes(s, offset, bytes, flags); |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 1385 | assert(s->queue_count > 1); |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1386 | req = nvme_get_free_req(ioq); |
| 1387 | assert(req); |
| 1388 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 1389 | nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1390 | |
| 1391 | data.co = qemu_coroutine_self(); |
| 1392 | while (data.ret == -EINPROGRESS) { |
| 1393 | qemu_coroutine_yield(); |
| 1394 | } |
| 1395 | |
| 1396 | trace_nvme_rw_done(s, true, offset, bytes, data.ret); |
| 1397 | return data.ret; |
| 1398 | } |
| 1399 | |
| 1400 | |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1401 | static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, |
| 1402 | int64_t offset, |
Vladimir Sementsov-Ogievskiy | 0c80228 | 2021-09-03 13:28:06 +0300 | [diff] [blame] | 1403 | int64_t bytes) |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1404 | { |
| 1405 | BDRVNVMeState *s = bs->opaque; |
Philippe Mathieu-Daudé | 73159e5 | 2020-08-21 21:53:48 +0200 | [diff] [blame] | 1406 | NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1407 | NVMeRequest *req; |
Philippe Mathieu-Daudé | 4a613bd | 2021-10-06 18:49:27 +0200 | [diff] [blame] | 1408 | QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL; |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1409 | QEMUIOVector local_qiov; |
| 1410 | int ret; |
| 1411 | |
| 1412 | NvmeCmd cmd = { |
| 1413 | .opcode = NVME_CMD_DSM, |
| 1414 | .nsid = cpu_to_le32(s->nsid), |
| 1415 | .cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/ |
| 1416 | .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/ |
| 1417 | }; |
| 1418 | |
| 1419 | NVMeCoData data = { |
| 1420 | .ctx = bdrv_get_aio_context(bs), |
| 1421 | .ret = -EINPROGRESS, |
| 1422 | }; |
| 1423 | |
| 1424 | if (!s->supports_discard) { |
| 1425 | return -ENOTSUP; |
| 1426 | } |
| 1427 | |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 1428 | assert(s->queue_count > 1); |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1429 | |
Vladimir Sementsov-Ogievskiy | 0c80228 | 2021-09-03 13:28:06 +0300 | [diff] [blame] | 1430 | /* |
| 1431 | * Filling the @buf requires @offset and @bytes to satisfy restrictions |
| 1432 | * defined in nvme_refresh_limits(). |
| 1433 | */ |
| 1434 | assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift)); |
| 1435 | assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift)); |
| 1436 | assert((bytes >> s->blkshift) <= UINT32_MAX); |
| 1437 | |
Philippe Mathieu-Daudé | 38e1f81 | 2020-08-21 21:53:54 +0200 | [diff] [blame] | 1438 | buf = qemu_try_memalign(s->page_size, s->page_size); |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1439 | if (!buf) { |
| 1440 | return -ENOMEM; |
| 1441 | } |
Philippe Mathieu-Daudé | 2ed8469 | 2020-08-21 21:53:53 +0200 | [diff] [blame] | 1442 | memset(buf, 0, s->page_size); |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1443 | buf->nlb = cpu_to_le32(bytes >> s->blkshift); |
| 1444 | buf->slba = cpu_to_le64(offset >> s->blkshift); |
| 1445 | buf->cattr = 0; |
| 1446 | |
| 1447 | qemu_iovec_init(&local_qiov, 1); |
| 1448 | qemu_iovec_add(&local_qiov, buf, 4096); |
| 1449 | |
| 1450 | req = nvme_get_free_req(ioq); |
| 1451 | assert(req); |
| 1452 | |
| 1453 | qemu_co_mutex_lock(&s->dma_map_lock); |
| 1454 | ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov); |
| 1455 | qemu_co_mutex_unlock(&s->dma_map_lock); |
| 1456 | |
| 1457 | if (ret) { |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 1458 | nvme_put_free_req_and_wake(ioq, req); |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1459 | goto out; |
| 1460 | } |
| 1461 | |
| 1462 | trace_nvme_dsm(s, offset, bytes); |
| 1463 | |
Stefan Hajnoczi | b75fd5f | 2020-06-17 14:22:00 +0100 | [diff] [blame] | 1464 | nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1465 | |
| 1466 | data.co = qemu_coroutine_self(); |
| 1467 | while (data.ret == -EINPROGRESS) { |
| 1468 | qemu_coroutine_yield(); |
| 1469 | } |
| 1470 | |
| 1471 | qemu_co_mutex_lock(&s->dma_map_lock); |
| 1472 | ret = nvme_cmd_unmap_qiov(bs, &local_qiov); |
| 1473 | qemu_co_mutex_unlock(&s->dma_map_lock); |
| 1474 | |
| 1475 | if (ret) { |
| 1476 | goto out; |
| 1477 | } |
| 1478 | |
| 1479 | ret = data.ret; |
| 1480 | trace_nvme_dsm_done(s, offset, bytes, ret); |
| 1481 | out: |
| 1482 | qemu_iovec_destroy(&local_qiov); |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1483 | return ret; |
| 1484 | |
| 1485 | } |
| 1486 | |
Philippe Mathieu-Daudé | c8807c5 | 2020-12-10 13:52:02 +0100 | [diff] [blame] | 1487 | static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset, |
| 1488 | bool exact, PreallocMode prealloc, |
| 1489 | BdrvRequestFlags flags, Error **errp) |
| 1490 | { |
| 1491 | int64_t cur_length; |
| 1492 | |
| 1493 | if (prealloc != PREALLOC_MODE_OFF) { |
| 1494 | error_setg(errp, "Unsupported preallocation mode '%s'", |
| 1495 | PreallocMode_str(prealloc)); |
| 1496 | return -ENOTSUP; |
| 1497 | } |
| 1498 | |
Emanuele Giuseppe Esposito | c86422c | 2023-01-13 21:42:04 +0100 | [diff] [blame] | 1499 | cur_length = nvme_co_getlength(bs); |
Philippe Mathieu-Daudé | c8807c5 | 2020-12-10 13:52:02 +0100 | [diff] [blame] | 1500 | if (offset != cur_length && exact) { |
| 1501 | error_setg(errp, "Cannot resize NVMe devices"); |
| 1502 | return -ENOTSUP; |
| 1503 | } else if (offset > cur_length) { |
| 1504 | error_setg(errp, "Cannot grow NVMe devices"); |
| 1505 | return -EINVAL; |
| 1506 | } |
| 1507 | |
| 1508 | return 0; |
| 1509 | } |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1510 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1511 | static int nvme_reopen_prepare(BDRVReopenState *reopen_state, |
| 1512 | BlockReopenQueue *queue, Error **errp) |
| 1513 | { |
| 1514 | return 0; |
| 1515 | } |
| 1516 | |
Max Reitz | 998b3a1 | 2019-02-01 20:29:28 +0100 | [diff] [blame] | 1517 | static void nvme_refresh_filename(BlockDriverState *bs) |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1518 | { |
Max Reitz | cc61b07 | 2019-02-01 20:29:30 +0100 | [diff] [blame] | 1519 | BDRVNVMeState *s = bs->opaque; |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1520 | |
Max Reitz | cc61b07 | 2019-02-01 20:29:30 +0100 | [diff] [blame] | 1521 | snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i", |
| 1522 | s->device, s->nsid); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1523 | } |
| 1524 | |
| 1525 | static void nvme_refresh_limits(BlockDriverState *bs, Error **errp) |
| 1526 | { |
| 1527 | BDRVNVMeState *s = bs->opaque; |
| 1528 | |
| 1529 | bs->bl.opt_mem_alignment = s->page_size; |
| 1530 | bs->bl.request_alignment = s->page_size; |
| 1531 | bs->bl.max_transfer = s->max_transfer; |
Vladimir Sementsov-Ogievskiy | f34b2bc | 2021-09-03 13:28:03 +0300 | [diff] [blame] | 1532 | |
| 1533 | /* |
| 1534 | * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get |
| 1535 | * at most 0xFFFF |
| 1536 | */ |
| 1537 | bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16); |
| 1538 | bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment, |
| 1539 | 1UL << s->blkshift); |
Vladimir Sementsov-Ogievskiy | 0c80228 | 2021-09-03 13:28:06 +0300 | [diff] [blame] | 1540 | |
| 1541 | bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift; |
| 1542 | bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment, |
| 1543 | 1UL << s->blkshift); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1544 | } |
| 1545 | |
| 1546 | static void nvme_detach_aio_context(BlockDriverState *bs) |
| 1547 | { |
| 1548 | BDRVNVMeState *s = bs->opaque; |
| 1549 | |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 1550 | for (unsigned i = 0; i < s->queue_count; i++) { |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 1551 | NVMeQueuePair *q = s->queues[i]; |
| 1552 | |
| 1553 | qemu_bh_delete(q->completion_bh); |
| 1554 | q->completion_bh = NULL; |
| 1555 | } |
| 1556 | |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 1557 | aio_set_event_notifier(bdrv_get_aio_context(bs), |
| 1558 | &s->irq_notifier[MSIX_SHARED_IRQ_IDX], |
Stefan Hajnoczi | 60f782b | 2023-05-16 15:02:38 -0400 | [diff] [blame] | 1559 | NULL, NULL, NULL); |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1560 | } |
| 1561 | |
| 1562 | static void nvme_attach_aio_context(BlockDriverState *bs, |
| 1563 | AioContext *new_context) |
| 1564 | { |
| 1565 | BDRVNVMeState *s = bs->opaque; |
| 1566 | |
| 1567 | s->aio_context = new_context; |
Philippe Mathieu-Daudé | b111b3f | 2020-08-21 21:53:59 +0200 | [diff] [blame] | 1568 | aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX], |
Stefan Hajnoczi | 60f782b | 2023-05-16 15:02:38 -0400 | [diff] [blame] | 1569 | nvme_handle_event, nvme_poll_cb, |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 1570 | nvme_poll_ready); |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 1571 | |
Philippe Mathieu-Daudé | 1b539bd | 2020-10-29 10:32:50 +0100 | [diff] [blame] | 1572 | for (unsigned i = 0; i < s->queue_count; i++) { |
Stefan Hajnoczi | 7838c67 | 2020-06-17 14:22:01 +0100 | [diff] [blame] | 1573 | NVMeQueuePair *q = s->queues[i]; |
| 1574 | |
| 1575 | q->completion_bh = |
| 1576 | aio_bh_new(new_context, nvme_process_completion_bh, q); |
| 1577 | } |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1578 | } |
| 1579 | |
Stefan Hajnoczi | f4ec04b | 2022-10-13 14:59:02 -0400 | [diff] [blame] | 1580 | static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size, |
| 1581 | Error **errp) |
Fam Zheng | 9ed6161 | 2018-01-16 14:08:57 +0800 | [diff] [blame] | 1582 | { |
| 1583 | int ret; |
| 1584 | BDRVNVMeState *s = bs->opaque; |
| 1585 | |
Stefan Hajnoczi | f4ec04b | 2022-10-13 14:59:02 -0400 | [diff] [blame] | 1586 | /* |
| 1587 | * FIXME: we may run out of IOVA addresses after repeated |
| 1588 | * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap |
| 1589 | * doesn't reclaim addresses for fixed mappings. |
| 1590 | */ |
| 1591 | ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, errp); |
| 1592 | return ret == 0; |
Fam Zheng | 9ed6161 | 2018-01-16 14:08:57 +0800 | [diff] [blame] | 1593 | } |
| 1594 | |
Stefan Hajnoczi | 4f38401 | 2022-10-13 14:58:59 -0400 | [diff] [blame] | 1595 | static void nvme_unregister_buf(BlockDriverState *bs, void *host, size_t size) |
Fam Zheng | 9ed6161 | 2018-01-16 14:08:57 +0800 | [diff] [blame] | 1596 | { |
| 1597 | BDRVNVMeState *s = bs->opaque; |
| 1598 | |
| 1599 | qemu_vfio_dma_unmap(s->vfio, host); |
| 1600 | } |
| 1601 | |
Philippe Mathieu-Daudé | f25e7ab | 2020-10-01 18:29:39 +0200 | [diff] [blame] | 1602 | static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs) |
| 1603 | { |
| 1604 | BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1); |
| 1605 | BDRVNVMeState *s = bs->opaque; |
| 1606 | |
| 1607 | stats->driver = BLOCKDEV_DRIVER_NVME; |
| 1608 | stats->u.nvme = (BlockStatsSpecificNvme) { |
| 1609 | .completion_errors = s->stats.completion_errors, |
| 1610 | .aligned_accesses = s->stats.aligned_accesses, |
| 1611 | .unaligned_accesses = s->stats.unaligned_accesses, |
| 1612 | }; |
| 1613 | |
| 1614 | return stats; |
| 1615 | } |
| 1616 | |
Max Reitz | 2654267 | 2019-02-01 20:29:25 +0100 | [diff] [blame] | 1617 | static const char *const nvme_strong_runtime_opts[] = { |
| 1618 | NVME_BLOCK_OPT_DEVICE, |
| 1619 | NVME_BLOCK_OPT_NAMESPACE, |
| 1620 | |
| 1621 | NULL |
| 1622 | }; |
| 1623 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1624 | static BlockDriver bdrv_nvme = { |
| 1625 | .format_name = "nvme", |
| 1626 | .protocol_name = "nvme", |
| 1627 | .instance_size = sizeof(BDRVNVMeState), |
| 1628 | |
Maxim Levitsky | 5a5e7f8 | 2020-03-26 03:12:18 +0200 | [diff] [blame] | 1629 | .bdrv_co_create_opts = bdrv_co_create_opts_simple, |
| 1630 | .create_opts = &bdrv_create_opts_simple, |
| 1631 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1632 | .bdrv_parse_filename = nvme_parse_filename, |
Paolo Bonzini | d656aaa | 2023-09-04 12:07:19 +0200 | [diff] [blame] | 1633 | .bdrv_open = nvme_open, |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1634 | .bdrv_close = nvme_close, |
Emanuele Giuseppe Esposito | c86422c | 2023-01-13 21:42:04 +0100 | [diff] [blame] | 1635 | .bdrv_co_getlength = nvme_co_getlength, |
Maxim Levitsky | 118d1b6 | 2019-07-16 19:30:19 +0300 | [diff] [blame] | 1636 | .bdrv_probe_blocksizes = nvme_probe_blocksizes, |
Philippe Mathieu-Daudé | c8807c5 | 2020-12-10 13:52:02 +0100 | [diff] [blame] | 1637 | .bdrv_co_truncate = nvme_co_truncate, |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1638 | |
| 1639 | .bdrv_co_preadv = nvme_co_preadv, |
| 1640 | .bdrv_co_pwritev = nvme_co_pwritev, |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1641 | |
| 1642 | .bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes, |
Maxim Levitsky | e87a09d | 2019-09-13 16:36:27 +0300 | [diff] [blame] | 1643 | .bdrv_co_pdiscard = nvme_co_pdiscard, |
Maxim Levitsky | e0dd95e | 2019-09-13 16:36:26 +0300 | [diff] [blame] | 1644 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1645 | .bdrv_co_flush_to_disk = nvme_co_flush, |
| 1646 | .bdrv_reopen_prepare = nvme_reopen_prepare, |
| 1647 | |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1648 | .bdrv_refresh_filename = nvme_refresh_filename, |
| 1649 | .bdrv_refresh_limits = nvme_refresh_limits, |
Max Reitz | 2654267 | 2019-02-01 20:29:25 +0100 | [diff] [blame] | 1650 | .strong_runtime_opts = nvme_strong_runtime_opts, |
Philippe Mathieu-Daudé | f25e7ab | 2020-10-01 18:29:39 +0200 | [diff] [blame] | 1651 | .bdrv_get_specific_stats = nvme_get_specific_stats, |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1652 | |
| 1653 | .bdrv_detach_aio_context = nvme_detach_aio_context, |
| 1654 | .bdrv_attach_aio_context = nvme_attach_aio_context, |
| 1655 | |
Fam Zheng | 9ed6161 | 2018-01-16 14:08:57 +0800 | [diff] [blame] | 1656 | .bdrv_register_buf = nvme_register_buf, |
| 1657 | .bdrv_unregister_buf = nvme_unregister_buf, |
Fam Zheng | bdd6a90 | 2018-01-16 14:08:55 +0800 | [diff] [blame] | 1658 | }; |
| 1659 | |
| 1660 | static void bdrv_nvme_init(void) |
| 1661 | { |
| 1662 | bdrv_register(&bdrv_nvme); |
| 1663 | } |
| 1664 | |
| 1665 | block_init(bdrv_nvme_init); |