Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Virtio PMEM device |
| 3 | * |
| 4 | * Copyright (C) 2018-2019 Red Hat, Inc. |
| 5 | * |
| 6 | * Authors: |
| 7 | * Pankaj Gupta <pagupta@redhat.com> |
| 8 | * David Hildenbrand <david@redhat.com> |
| 9 | * |
| 10 | * This work is licensed under the terms of the GNU GPL, version 2. |
| 11 | * See the COPYING file in the top-level directory. |
| 12 | */ |
| 13 | |
| 14 | #include "qemu/osdep.h" |
| 15 | #include "qapi/error.h" |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 16 | #include "qemu/error-report.h" |
Markus Armbruster | e2c1c34 | 2022-12-21 14:35:49 +0100 | [diff] [blame] | 17 | #include "qemu/iov.h" |
Markus Armbruster | db72581 | 2019-08-12 07:23:50 +0200 | [diff] [blame] | 18 | #include "qemu/main-loop.h" |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 19 | #include "hw/virtio/virtio-pmem.h" |
Markus Armbruster | a27bd6c | 2019-08-12 07:23:51 +0200 | [diff] [blame] | 20 | #include "hw/qdev-properties.h" |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 21 | #include "hw/virtio/virtio-access.h" |
| 22 | #include "standard-headers/linux/virtio_ids.h" |
| 23 | #include "standard-headers/linux/virtio_pmem.h" |
Markus Armbruster | 7969dd9 | 2019-08-12 07:23:54 +0200 | [diff] [blame] | 24 | #include "sysemu/hostmem.h" |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 25 | #include "block/aio.h" |
| 26 | #include "block/thread-pool.h" |
Pankaj Gupta | 73b1230 | 2020-11-17 12:57:05 +0100 | [diff] [blame] | 27 | #include "trace.h" |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 28 | |
| 29 | typedef struct VirtIODeviceRequest { |
| 30 | VirtQueueElement elem; |
| 31 | int fd; |
| 32 | VirtIOPMEM *pmem; |
| 33 | VirtIODevice *vdev; |
| 34 | struct virtio_pmem_req req; |
| 35 | struct virtio_pmem_resp resp; |
| 36 | } VirtIODeviceRequest; |
| 37 | |
| 38 | static int worker_cb(void *opaque) |
| 39 | { |
| 40 | VirtIODeviceRequest *req_data = opaque; |
| 41 | int err = 0; |
| 42 | |
| 43 | /* flush raw backing image */ |
| 44 | err = fsync(req_data->fd); |
Pankaj Gupta | 73b1230 | 2020-11-17 12:57:05 +0100 | [diff] [blame] | 45 | trace_virtio_pmem_flush_done(err); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 46 | if (err != 0) { |
| 47 | err = 1; |
| 48 | } |
| 49 | |
Wang Liang | d2adda3 | 2021-03-16 22:41:45 -0400 | [diff] [blame] | 50 | virtio_stl_p(req_data->vdev, &req_data->resp.ret, err); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 51 | |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | static void done_cb(void *opaque, int ret) |
| 56 | { |
| 57 | VirtIODeviceRequest *req_data = opaque; |
| 58 | int len = iov_from_buf(req_data->elem.in_sg, req_data->elem.in_num, 0, |
| 59 | &req_data->resp, sizeof(struct virtio_pmem_resp)); |
| 60 | |
| 61 | /* Callbacks are serialized, so no need to use atomic ops. */ |
| 62 | virtqueue_push(req_data->pmem->rq_vq, &req_data->elem, len); |
| 63 | virtio_notify((VirtIODevice *)req_data->pmem, req_data->pmem->rq_vq); |
Pankaj Gupta | 73b1230 | 2020-11-17 12:57:05 +0100 | [diff] [blame] | 64 | trace_virtio_pmem_response(); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 65 | g_free(req_data); |
| 66 | } |
| 67 | |
| 68 | static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq) |
| 69 | { |
| 70 | VirtIODeviceRequest *req_data; |
| 71 | VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); |
| 72 | HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 73 | |
Pankaj Gupta | 73b1230 | 2020-11-17 12:57:05 +0100 | [diff] [blame] | 74 | trace_virtio_pmem_flush_request(); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 75 | req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest)); |
| 76 | if (!req_data) { |
| 77 | virtio_error(vdev, "virtio-pmem missing request data"); |
| 78 | return; |
| 79 | } |
| 80 | |
| 81 | if (req_data->elem.out_num < 1 || req_data->elem.in_num < 1) { |
| 82 | virtio_error(vdev, "virtio-pmem request not proper"); |
Li Qiang | 2bc9e0d | 2020-08-13 09:51:25 -0700 | [diff] [blame] | 83 | virtqueue_detach_element(vq, (VirtQueueElement *)req_data, 0); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 84 | g_free(req_data); |
| 85 | return; |
| 86 | } |
| 87 | req_data->fd = memory_region_get_fd(&backend->mr); |
| 88 | req_data->pmem = pmem; |
| 89 | req_data->vdev = vdev; |
Emanuele Giuseppe Esposito | aef04fc | 2023-02-03 08:17:31 -0500 | [diff] [blame] | 90 | thread_pool_submit_aio(worker_cb, req_data, done_cb, req_data); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config) |
| 94 | { |
| 95 | VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); |
| 96 | struct virtio_pmem_config *pmemcfg = (struct virtio_pmem_config *) config; |
| 97 | |
| 98 | virtio_stq_p(vdev, &pmemcfg->start, pmem->start); |
| 99 | virtio_stq_p(vdev, &pmemcfg->size, memory_region_size(&pmem->memdev->mr)); |
| 100 | } |
| 101 | |
| 102 | static uint64_t virtio_pmem_get_features(VirtIODevice *vdev, uint64_t features, |
| 103 | Error **errp) |
| 104 | { |
| 105 | return features; |
| 106 | } |
| 107 | |
| 108 | static void virtio_pmem_realize(DeviceState *dev, Error **errp) |
| 109 | { |
| 110 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
| 111 | VirtIOPMEM *pmem = VIRTIO_PMEM(dev); |
| 112 | |
| 113 | if (!pmem->memdev) { |
| 114 | error_setg(errp, "virtio-pmem memdev not set"); |
| 115 | return; |
| 116 | } |
| 117 | |
| 118 | if (host_memory_backend_is_mapped(pmem->memdev)) { |
Markus Armbruster | 7a309cc | 2020-07-14 18:02:00 +0200 | [diff] [blame] | 119 | error_setg(errp, "can't use already busy memdev: %s", |
| 120 | object_get_canonical_path_component(OBJECT(pmem->memdev))); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 121 | return; |
| 122 | } |
| 123 | |
| 124 | host_memory_backend_set_mapped(pmem->memdev, true); |
Jonah Palmer | 3857cd5 | 2022-04-01 09:23:18 -0400 | [diff] [blame] | 125 | virtio_init(vdev, VIRTIO_ID_PMEM, sizeof(struct virtio_pmem_config)); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 126 | pmem->rq_vq = virtio_add_queue(vdev, 128, virtio_pmem_flush); |
| 127 | } |
| 128 | |
Markus Armbruster | b69c3c2 | 2020-05-05 17:29:24 +0200 | [diff] [blame] | 129 | static void virtio_pmem_unrealize(DeviceState *dev) |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 130 | { |
| 131 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
| 132 | VirtIOPMEM *pmem = VIRTIO_PMEM(dev); |
| 133 | |
| 134 | host_memory_backend_set_mapped(pmem->memdev, false); |
Pan Nengyuan | 9861546 | 2020-02-25 15:55:53 +0800 | [diff] [blame] | 135 | virtio_delete_queue(pmem->rq_vq); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 136 | virtio_cleanup(vdev); |
| 137 | } |
| 138 | |
| 139 | static void virtio_pmem_fill_device_info(const VirtIOPMEM *pmem, |
| 140 | VirtioPMEMDeviceInfo *vi) |
| 141 | { |
| 142 | vi->memaddr = pmem->start; |
Pankaj Gupta | 7b8a847 | 2019-07-12 13:05:53 +0530 | [diff] [blame] | 143 | vi->size = memory_region_size(&pmem->memdev->mr); |
| 144 | vi->memdev = object_get_canonical_path(OBJECT(pmem->memdev)); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem, |
| 148 | Error **errp) |
| 149 | { |
| 150 | if (!pmem->memdev) { |
| 151 | error_setg(errp, "'%s' property must be set", VIRTIO_PMEM_MEMDEV_PROP); |
| 152 | return NULL; |
| 153 | } |
| 154 | |
| 155 | return &pmem->memdev->mr; |
| 156 | } |
| 157 | |
| 158 | static Property virtio_pmem_properties[] = { |
| 159 | DEFINE_PROP_UINT64(VIRTIO_PMEM_ADDR_PROP, VirtIOPMEM, start, 0), |
| 160 | DEFINE_PROP_LINK(VIRTIO_PMEM_MEMDEV_PROP, VirtIOPMEM, memdev, |
| 161 | TYPE_MEMORY_BACKEND, HostMemoryBackend *), |
| 162 | DEFINE_PROP_END_OF_LIST(), |
| 163 | }; |
| 164 | |
| 165 | static void virtio_pmem_class_init(ObjectClass *klass, void *data) |
| 166 | { |
| 167 | DeviceClass *dc = DEVICE_CLASS(klass); |
| 168 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); |
| 169 | VirtIOPMEMClass *vpc = VIRTIO_PMEM_CLASS(klass); |
| 170 | |
Marc-André Lureau | 4f67d30 | 2020-01-10 19:30:32 +0400 | [diff] [blame] | 171 | device_class_set_props(dc, virtio_pmem_properties); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 172 | |
| 173 | vdc->realize = virtio_pmem_realize; |
| 174 | vdc->unrealize = virtio_pmem_unrealize; |
| 175 | vdc->get_config = virtio_pmem_get_config; |
| 176 | vdc->get_features = virtio_pmem_get_features; |
| 177 | |
| 178 | vpc->fill_device_info = virtio_pmem_fill_device_info; |
| 179 | vpc->get_memory_region = virtio_pmem_get_memory_region; |
Gan Qixin | d3649bf | 2020-11-30 16:36:20 +0800 | [diff] [blame] | 180 | set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 181 | } |
| 182 | |
Bernhard Beschow | 5e78c98 | 2022-01-17 15:58:04 +0100 | [diff] [blame] | 183 | static const TypeInfo virtio_pmem_info = { |
Pankaj Gupta | 5f503cd | 2019-06-19 15:19:01 +0530 | [diff] [blame] | 184 | .name = TYPE_VIRTIO_PMEM, |
| 185 | .parent = TYPE_VIRTIO_DEVICE, |
| 186 | .class_size = sizeof(VirtIOPMEMClass), |
| 187 | .class_init = virtio_pmem_class_init, |
| 188 | .instance_size = sizeof(VirtIOPMEM), |
| 189 | }; |
| 190 | |
| 191 | static void virtio_register_types(void) |
| 192 | { |
| 193 | type_register_static(&virtio_pmem_info); |
| 194 | } |
| 195 | |
| 196 | type_init(virtio_register_types) |