blob: a4c1555cc0580dd9979bfebfb9e31eb705e6d424 [file] [log] [blame]
Julian Stecklinac83e15b2017-02-13 10:03:59 +01001// NVMe datastructures and constants
2//
3// Copyright 2017 Amazon.com, Inc. or its affiliates.
4//
5// This file may be distributed under the terms of the GNU LGPLv3 license.
6
7#ifndef __NVME_INT_H
8#define __NVME_INT_H
9
10#include "types.h" // u32
11#include "pcidevice.h" // struct pci_device
12
Alexander Graf01f27362020-09-30 23:10:55 +020013#define NVME_MAX_PRPL_ENTRIES 15 /* Allows requests up to 64kb */
14
Julian Stecklinac83e15b2017-02-13 10:03:59 +010015/* Data structures */
16
17/* The register file of a NVMe host controller. This struct follows the naming
18 scheme in the NVMe specification. */
19struct nvme_reg {
20 u64 cap; /* controller capabilities */
21 u32 vs; /* version */
22 u32 intms; /* interrupt mask set */
23 u32 intmc; /* interrupt mask clear */
24 u32 cc; /* controller configuration */
25 u32 _res0;
26 u32 csts; /* controller status */
27 u32 _res1;
28 u32 aqa; /* admin queue attributes */
29 u64 asq; /* admin submission queue base address */
30 u64 acq; /* admin completion queue base address */
31};
32
33/* Submission queue entry */
34struct nvme_sqe {
35 union {
36 u32 dword[16];
37 struct {
38 u32 cdw0; /* Command DWORD 0 */
39 u32 nsid; /* Namespace ID */
40 u64 _res0;
41 u64 mptr; /* metadata ptr */
42
43 u64 dptr_prp1;
44 u64 dptr_prp2;
45 };
46 };
47};
48
49/* Completion queue entry */
50struct nvme_cqe {
51 union {
52 u32 dword[4];
53 struct {
54 u32 cdw0;
55 u32 _res0;
56 u16 sq_head;
57 u16 sq_id;
58 u16 cid;
59 u16 status;
60 };
61 };
62};
63
64/* The common part of every submission or completion queue. */
65struct nvme_queue {
66 u32 *dbl; /* doorbell */
67 u16 mask; /* length - 1 */
68};
69
70struct nvme_cq {
71 struct nvme_queue common;
72 struct nvme_cqe *cqe;
73
74 /* We have read upto (but not including) this entry in the queue. */
75 u16 head;
76
77 /* The current phase bit the controller uses to indicate that it has written
78 a new entry. This is inverted after each wrap. */
79 unsigned phase : 1;
80};
81
82struct nvme_sq {
83 struct nvme_queue common;
84 struct nvme_sqe *sqe;
85
86 /* Corresponding completion queue. We only support a single SQ per CQ. */
87 struct nvme_cq *cq;
88
89 /* The last entry the controller has fetched. */
90 u16 head;
91
92 /* The last value we have written to the tail doorbell. */
93 u16 tail;
94};
95
96struct nvme_ctrl {
97 struct pci_device *pci;
98 struct nvme_reg volatile *reg;
99
100 u32 doorbell_stride; /* in bytes */
101
102 struct nvme_sq admin_sq;
103 struct nvme_cq admin_cq;
104
105 u32 ns_count;
Julian Stecklinac83e15b2017-02-13 10:03:59 +0100106
107 struct nvme_sq io_sq;
108 struct nvme_cq io_cq;
109};
110
111struct nvme_namespace {
112 struct drive_s drive;
113 struct nvme_ctrl *ctrl;
114
115 u32 ns_id;
116
117 u64 lba_count; /* The total amount of sectors. */
118
119 u32 block_size;
120 u32 metadata_size;
Alexander Grafb68f3132020-09-30 23:10:53 +0200121 u32 max_req_size;
Julian Stecklinac83e15b2017-02-13 10:03:59 +0100122
123 /* Page aligned buffer of size NVME_PAGE_SIZE. */
124 char *dma_buffer;
Alexander Graf01f27362020-09-30 23:10:55 +0200125
126 /* Page List */
127 u32 prpl_len;
128 void *prp1;
129 u64 prpl[NVME_MAX_PRPL_ENTRIES];
Julian Stecklinac83e15b2017-02-13 10:03:59 +0100130};
131
132/* Data structures for NVMe admin identify commands */
133
134struct nvme_identify_ctrl {
135 u16 vid;
136 u16 ssvid;
137 char sn[20];
138 char mn[40];
139 char fr[8];
140
Alexander Grafb68f3132020-09-30 23:10:53 +0200141 u8 rab;
142 u8 ieee[3];
143 u8 cmic;
144 u8 mdts;
145
146 char _boring[516 - 78];
Julian Stecklinac83e15b2017-02-13 10:03:59 +0100147
148 u32 nn; /* number of namespaces */
149};
150
151struct nvme_identify_ns_list {
152 u32 ns_id[1024];
153};
154
155struct nvme_lba_format {
156 u16 ms;
157 u8 lbads;
158 u8 rp;
159 u8 res;
160};
161
162struct nvme_identify_ns {
163 u64 nsze;
164 u64 ncap;
165 u64 nuse;
166 u8 nsfeat;
167 u8 nlbaf;
168 u8 flbas;
169
170 char _boring[128 - 27];
171
172 struct nvme_lba_format lbaf[16];
173};
174
175union nvme_identify {
176 struct nvme_identify_ns ns;
177 struct nvme_identify_ctrl ctrl;
178 struct nvme_identify_ns_list ns_list;
179};
180
181/* NVMe constants */
182
183#define NVME_CAP_CSS_NVME (1ULL << 37)
184
185#define NVME_CSTS_FATAL (1U << 1)
186#define NVME_CSTS_RDY (1U << 0)
187
188#define NVME_CC_EN (1U << 0)
189
190#define NVME_SQE_OPC_ADMIN_CREATE_IO_SQ 1U
191#define NVME_SQE_OPC_ADMIN_CREATE_IO_CQ 5U
192#define NVME_SQE_OPC_ADMIN_IDENTIFY 6U
193
194#define NVME_SQE_OPC_IO_WRITE 1U
195#define NVME_SQE_OPC_IO_READ 2U
196
197#define NVME_ADMIN_IDENTIFY_CNS_ID_NS 0U
198#define NVME_ADMIN_IDENTIFY_CNS_ID_CTRL 1U
199#define NVME_ADMIN_IDENTIFY_CNS_GET_NS_LIST 2U
200
201#define NVME_CQE_DW3_P (1U << 16)
202
203#define NVME_PAGE_SIZE 4096
Alexander Graf01f27362020-09-30 23:10:55 +0200204#define NVME_PAGE_MASK ~(NVME_PAGE_SIZE - 1)
Julian Stecklinac83e15b2017-02-13 10:03:59 +0100205
206/* Length for the queue entries. */
207#define NVME_SQE_SIZE_LOG 6
208#define NVME_CQE_SIZE_LOG 4
209
210#endif
211
212/* EOF */