1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2011-2014, Intel Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef _NVME_H
7*4882a593Smuzhiyun #define _NVME_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/nvme.h>
10*4882a593Smuzhiyun #include <linux/cdev.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/kref.h>
13*4882a593Smuzhiyun #include <linux/blk-mq.h>
14*4882a593Smuzhiyun #include <linux/lightnvm.h>
15*4882a593Smuzhiyun #include <linux/sed-opal.h>
16*4882a593Smuzhiyun #include <linux/fault-inject.h>
17*4882a593Smuzhiyun #include <linux/rcupdate.h>
18*4882a593Smuzhiyun #include <linux/wait.h>
19*4882a593Smuzhiyun #include <linux/t10-pi.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <trace/events/block.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun extern unsigned int nvme_io_timeout;
24*4882a593Smuzhiyun #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun extern unsigned int admin_timeout;
27*4882a593Smuzhiyun #define ADMIN_TIMEOUT (admin_timeout * HZ)
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define NVME_DEFAULT_KATO 5
30*4882a593Smuzhiyun #define NVME_KATO_GRACE 10
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #ifdef CONFIG_ARCH_NO_SG_CHAIN
33*4882a593Smuzhiyun #define NVME_INLINE_SG_CNT 0
34*4882a593Smuzhiyun #define NVME_INLINE_METADATA_SG_CNT 0
35*4882a593Smuzhiyun #else
36*4882a593Smuzhiyun #define NVME_INLINE_SG_CNT 2
37*4882a593Smuzhiyun #define NVME_INLINE_METADATA_SG_CNT 1
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * Default to a 4K page size, with the intention to update this
42*4882a593Smuzhiyun * path in the future to accommodate architectures with differing
43*4882a593Smuzhiyun * kernel and IO page sizes.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun #define NVME_CTRL_PAGE_SHIFT 12
46*4882a593Smuzhiyun #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun extern struct workqueue_struct *nvme_wq;
49*4882a593Smuzhiyun extern struct workqueue_struct *nvme_reset_wq;
50*4882a593Smuzhiyun extern struct workqueue_struct *nvme_delete_wq;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun enum {
53*4882a593Smuzhiyun NVME_NS_LBA = 0,
54*4882a593Smuzhiyun NVME_NS_LIGHTNVM = 1,
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * List of workarounds for devices that required behavior not specified in
59*4882a593Smuzhiyun * the standard.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun enum nvme_quirks {
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Prefers I/O aligned to a stripe size specified in a vendor
64*4882a593Smuzhiyun * specific Identify field.
65*4882a593Smuzhiyun */
66*4882a593Smuzhiyun NVME_QUIRK_STRIPE_SIZE = (1 << 0),
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * The controller doesn't handle Identify value others than 0 or 1
70*4882a593Smuzhiyun * correctly.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * The controller deterministically returns O's on reads to
76*4882a593Smuzhiyun * logical blocks that deallocate was called on.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * The controller needs a delay before starts checking the device
82*4882a593Smuzhiyun * readiness, which is done by reading the NVME_CSTS_RDY bit.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * APST should not be used.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun NVME_QUIRK_NO_APST = (1 << 4),
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun * The deepest sleep state should not be used.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * Supports the LighNVM command set if indicated in vs[1].
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun NVME_QUIRK_LIGHTNVM = (1 << 6),
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * Set MEDIUM priority on SQ creation
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * Ignore device provided subnqn.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * Broken Write Zeroes.
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Force simple suspend/resume path.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * Use only one interrupt vector for all queues
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun NVME_QUIRK_SINGLE_VECTOR = (1 << 11),
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun * Use non-standard 128 bytes SQEs.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun NVME_QUIRK_128_BYTES_SQES = (1 << 12),
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * Prevent tag overlap between queues
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun NVME_QUIRK_SHARED_TAGS = (1 << 13),
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * Don't change the value of the temperature threshold feature
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * The controller doesn't handle the Identify Namespace
143*4882a593Smuzhiyun * Identification Descriptor list subcommand despite claiming
144*4882a593Smuzhiyun * NVMe 1.3 compliance.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * The controller requires the command_id value be be limited, so skip
150*4882a593Smuzhiyun * encoding the generation sequence number.
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun NVME_QUIRK_BOGUS_NID = (1 << 18),
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * Limit io queue depth to 32
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun NVME_QUIRK_LIMIT_IOQD32 = (1 << 31),
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * Common request structure for NVMe passthrough. All drivers must have
167*4882a593Smuzhiyun * this structure as the first member of their request-private data.
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun struct nvme_request {
170*4882a593Smuzhiyun struct nvme_command *cmd;
171*4882a593Smuzhiyun union nvme_result result;
172*4882a593Smuzhiyun u8 genctr;
173*4882a593Smuzhiyun u8 retries;
174*4882a593Smuzhiyun u8 flags;
175*4882a593Smuzhiyun u16 status;
176*4882a593Smuzhiyun struct nvme_ctrl *ctrl;
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Mark a bio as coming in through the mpath node.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun #define REQ_NVME_MPATH REQ_DRV
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun enum {
185*4882a593Smuzhiyun NVME_REQ_CANCELLED = (1 << 0),
186*4882a593Smuzhiyun NVME_REQ_USERCMD = (1 << 1),
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun
nvme_req(struct request * req)189*4882a593Smuzhiyun static inline struct nvme_request *nvme_req(struct request *req)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun return blk_mq_rq_to_pdu(req);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
nvme_req_qid(struct request * req)194*4882a593Smuzhiyun static inline u16 nvme_req_qid(struct request *req)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun if (!req->q->queuedata)
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* The below value is the specific amount of delay needed before checking
202*4882a593Smuzhiyun * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
203*4882a593Smuzhiyun * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
204*4882a593Smuzhiyun * found empirically.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun #define NVME_QUIRK_DELAY_AMOUNT 2300
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * enum nvme_ctrl_state: Controller state
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * @NVME_CTRL_NEW: New controller just allocated, initial state
212*4882a593Smuzhiyun * @NVME_CTRL_LIVE: Controller is connected and I/O capable
213*4882a593Smuzhiyun * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
214*4882a593Smuzhiyun * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
215*4882a593Smuzhiyun * transport
216*4882a593Smuzhiyun * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
217*4882a593Smuzhiyun * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
218*4882a593Smuzhiyun * disabled/failed immediately. This state comes
219*4882a593Smuzhiyun * after all async event processing took place and
220*4882a593Smuzhiyun * before ns removal and the controller deletion
221*4882a593Smuzhiyun * progress
222*4882a593Smuzhiyun * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
223*4882a593Smuzhiyun * shutdown or removal. In this case we forcibly
224*4882a593Smuzhiyun * kill all inflight I/O as they have no chance to
225*4882a593Smuzhiyun * complete
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun enum nvme_ctrl_state {
228*4882a593Smuzhiyun NVME_CTRL_NEW,
229*4882a593Smuzhiyun NVME_CTRL_LIVE,
230*4882a593Smuzhiyun NVME_CTRL_RESETTING,
231*4882a593Smuzhiyun NVME_CTRL_CONNECTING,
232*4882a593Smuzhiyun NVME_CTRL_DELETING,
233*4882a593Smuzhiyun NVME_CTRL_DELETING_NOIO,
234*4882a593Smuzhiyun NVME_CTRL_DEAD,
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun struct nvme_fault_inject {
238*4882a593Smuzhiyun #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
239*4882a593Smuzhiyun struct fault_attr attr;
240*4882a593Smuzhiyun struct dentry *parent;
241*4882a593Smuzhiyun bool dont_retry; /* DNR, do not retry */
242*4882a593Smuzhiyun u16 status; /* status code */
243*4882a593Smuzhiyun #endif
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun struct nvme_ctrl {
247*4882a593Smuzhiyun bool comp_seen;
248*4882a593Smuzhiyun enum nvme_ctrl_state state;
249*4882a593Smuzhiyun bool identified;
250*4882a593Smuzhiyun spinlock_t lock;
251*4882a593Smuzhiyun struct mutex scan_lock;
252*4882a593Smuzhiyun const struct nvme_ctrl_ops *ops;
253*4882a593Smuzhiyun struct request_queue *admin_q;
254*4882a593Smuzhiyun struct request_queue *connect_q;
255*4882a593Smuzhiyun struct request_queue *fabrics_q;
256*4882a593Smuzhiyun struct device *dev;
257*4882a593Smuzhiyun int instance;
258*4882a593Smuzhiyun int numa_node;
259*4882a593Smuzhiyun struct blk_mq_tag_set *tagset;
260*4882a593Smuzhiyun struct blk_mq_tag_set *admin_tagset;
261*4882a593Smuzhiyun struct list_head namespaces;
262*4882a593Smuzhiyun struct rw_semaphore namespaces_rwsem;
263*4882a593Smuzhiyun struct device ctrl_device;
264*4882a593Smuzhiyun struct device *device; /* char device */
265*4882a593Smuzhiyun #ifdef CONFIG_NVME_HWMON
266*4882a593Smuzhiyun struct device *hwmon_device;
267*4882a593Smuzhiyun #endif
268*4882a593Smuzhiyun struct cdev cdev;
269*4882a593Smuzhiyun struct work_struct reset_work;
270*4882a593Smuzhiyun struct work_struct delete_work;
271*4882a593Smuzhiyun wait_queue_head_t state_wq;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun struct nvme_subsystem *subsys;
274*4882a593Smuzhiyun struct list_head subsys_entry;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun struct opal_dev *opal_dev;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun char name[12];
279*4882a593Smuzhiyun u16 cntlid;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun u32 ctrl_config;
282*4882a593Smuzhiyun u16 mtfa;
283*4882a593Smuzhiyun u32 queue_count;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun u64 cap;
286*4882a593Smuzhiyun u32 max_hw_sectors;
287*4882a593Smuzhiyun u32 max_segments;
288*4882a593Smuzhiyun u32 max_integrity_segments;
289*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
290*4882a593Smuzhiyun u32 max_zone_append;
291*4882a593Smuzhiyun #endif
292*4882a593Smuzhiyun u16 crdt[3];
293*4882a593Smuzhiyun u16 oncs;
294*4882a593Smuzhiyun u16 oacs;
295*4882a593Smuzhiyun u16 nssa;
296*4882a593Smuzhiyun u16 nr_streams;
297*4882a593Smuzhiyun u16 sqsize;
298*4882a593Smuzhiyun u32 max_namespaces;
299*4882a593Smuzhiyun atomic_t abort_limit;
300*4882a593Smuzhiyun u8 vwc;
301*4882a593Smuzhiyun u32 vs;
302*4882a593Smuzhiyun u32 sgls;
303*4882a593Smuzhiyun u16 kas;
304*4882a593Smuzhiyun u8 npss;
305*4882a593Smuzhiyun u8 apsta;
306*4882a593Smuzhiyun u16 wctemp;
307*4882a593Smuzhiyun u16 cctemp;
308*4882a593Smuzhiyun u32 oaes;
309*4882a593Smuzhiyun u32 aen_result;
310*4882a593Smuzhiyun u32 ctratt;
311*4882a593Smuzhiyun unsigned int shutdown_timeout;
312*4882a593Smuzhiyun unsigned int kato;
313*4882a593Smuzhiyun bool subsystem;
314*4882a593Smuzhiyun unsigned long quirks;
315*4882a593Smuzhiyun struct nvme_id_power_state psd[32];
316*4882a593Smuzhiyun struct nvme_effects_log *effects;
317*4882a593Smuzhiyun struct xarray cels;
318*4882a593Smuzhiyun struct work_struct scan_work;
319*4882a593Smuzhiyun struct work_struct async_event_work;
320*4882a593Smuzhiyun struct delayed_work ka_work;
321*4882a593Smuzhiyun struct nvme_command ka_cmd;
322*4882a593Smuzhiyun struct work_struct fw_act_work;
323*4882a593Smuzhiyun unsigned long events;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun #ifdef CONFIG_NVME_MULTIPATH
326*4882a593Smuzhiyun /* asymmetric namespace access: */
327*4882a593Smuzhiyun u8 anacap;
328*4882a593Smuzhiyun u8 anatt;
329*4882a593Smuzhiyun u32 anagrpmax;
330*4882a593Smuzhiyun u32 nanagrpid;
331*4882a593Smuzhiyun struct mutex ana_lock;
332*4882a593Smuzhiyun struct nvme_ana_rsp_hdr *ana_log_buf;
333*4882a593Smuzhiyun size_t ana_log_size;
334*4882a593Smuzhiyun struct timer_list anatt_timer;
335*4882a593Smuzhiyun struct work_struct ana_work;
336*4882a593Smuzhiyun #endif
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Power saving configuration */
339*4882a593Smuzhiyun u64 ps_max_latency_us;
340*4882a593Smuzhiyun bool apst_enabled;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* PCIe only: */
343*4882a593Smuzhiyun u32 hmpre;
344*4882a593Smuzhiyun u32 hmmin;
345*4882a593Smuzhiyun u32 hmminds;
346*4882a593Smuzhiyun u16 hmmaxd;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* Fabrics only */
349*4882a593Smuzhiyun u32 ioccsz;
350*4882a593Smuzhiyun u32 iorcsz;
351*4882a593Smuzhiyun u16 icdoff;
352*4882a593Smuzhiyun u16 maxcmd;
353*4882a593Smuzhiyun int nr_reconnects;
354*4882a593Smuzhiyun struct nvmf_ctrl_options *opts;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun struct page *discard_page;
357*4882a593Smuzhiyun unsigned long discard_page_busy;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun struct nvme_fault_inject fault_inject;
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun enum nvme_iopolicy {
363*4882a593Smuzhiyun NVME_IOPOLICY_NUMA,
364*4882a593Smuzhiyun NVME_IOPOLICY_RR,
365*4882a593Smuzhiyun };
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun struct nvme_subsystem {
368*4882a593Smuzhiyun int instance;
369*4882a593Smuzhiyun struct device dev;
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * Because we unregister the device on the last put we need
372*4882a593Smuzhiyun * a separate refcount.
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun struct kref ref;
375*4882a593Smuzhiyun struct list_head entry;
376*4882a593Smuzhiyun struct mutex lock;
377*4882a593Smuzhiyun struct list_head ctrls;
378*4882a593Smuzhiyun struct list_head nsheads;
379*4882a593Smuzhiyun char subnqn[NVMF_NQN_SIZE];
380*4882a593Smuzhiyun char serial[20];
381*4882a593Smuzhiyun char model[40];
382*4882a593Smuzhiyun char firmware_rev[8];
383*4882a593Smuzhiyun u8 cmic;
384*4882a593Smuzhiyun u16 vendor_id;
385*4882a593Smuzhiyun u16 awupf; /* 0's based awupf value. */
386*4882a593Smuzhiyun struct ida ns_ida;
387*4882a593Smuzhiyun #ifdef CONFIG_NVME_MULTIPATH
388*4882a593Smuzhiyun enum nvme_iopolicy iopolicy;
389*4882a593Smuzhiyun #endif
390*4882a593Smuzhiyun };
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * Container structure for uniqueue namespace identifiers.
394*4882a593Smuzhiyun */
395*4882a593Smuzhiyun struct nvme_ns_ids {
396*4882a593Smuzhiyun u8 eui64[8];
397*4882a593Smuzhiyun u8 nguid[16];
398*4882a593Smuzhiyun uuid_t uuid;
399*4882a593Smuzhiyun u8 csi;
400*4882a593Smuzhiyun };
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * Anchor structure for namespaces. There is one for each namespace in a
404*4882a593Smuzhiyun * NVMe subsystem that any of our controllers can see, and the namespace
405*4882a593Smuzhiyun * structure for each controller is chained of it. For private namespaces
406*4882a593Smuzhiyun * there is a 1:1 relation to our namespace structures, that is ->list
407*4882a593Smuzhiyun * only ever has a single entry for private namespaces.
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun struct nvme_ns_head {
410*4882a593Smuzhiyun struct list_head list;
411*4882a593Smuzhiyun struct srcu_struct srcu;
412*4882a593Smuzhiyun struct nvme_subsystem *subsys;
413*4882a593Smuzhiyun unsigned ns_id;
414*4882a593Smuzhiyun struct nvme_ns_ids ids;
415*4882a593Smuzhiyun struct list_head entry;
416*4882a593Smuzhiyun struct kref ref;
417*4882a593Smuzhiyun bool shared;
418*4882a593Smuzhiyun int instance;
419*4882a593Smuzhiyun struct nvme_effects_log *effects;
420*4882a593Smuzhiyun #ifdef CONFIG_NVME_MULTIPATH
421*4882a593Smuzhiyun struct gendisk *disk;
422*4882a593Smuzhiyun struct bio_list requeue_list;
423*4882a593Smuzhiyun spinlock_t requeue_lock;
424*4882a593Smuzhiyun struct work_struct requeue_work;
425*4882a593Smuzhiyun struct mutex lock;
426*4882a593Smuzhiyun unsigned long flags;
427*4882a593Smuzhiyun #define NVME_NSHEAD_DISK_LIVE 0
428*4882a593Smuzhiyun struct nvme_ns __rcu *current_path[];
429*4882a593Smuzhiyun #endif
430*4882a593Smuzhiyun };
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun enum nvme_ns_features {
433*4882a593Smuzhiyun NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
434*4882a593Smuzhiyun NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
435*4882a593Smuzhiyun };
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun struct nvme_ns {
438*4882a593Smuzhiyun struct list_head list;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun struct nvme_ctrl *ctrl;
441*4882a593Smuzhiyun struct request_queue *queue;
442*4882a593Smuzhiyun struct gendisk *disk;
443*4882a593Smuzhiyun #ifdef CONFIG_NVME_MULTIPATH
444*4882a593Smuzhiyun enum nvme_ana_state ana_state;
445*4882a593Smuzhiyun u32 ana_grpid;
446*4882a593Smuzhiyun #endif
447*4882a593Smuzhiyun struct list_head siblings;
448*4882a593Smuzhiyun struct nvm_dev *ndev;
449*4882a593Smuzhiyun struct kref kref;
450*4882a593Smuzhiyun struct nvme_ns_head *head;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun int lba_shift;
453*4882a593Smuzhiyun u16 ms;
454*4882a593Smuzhiyun u16 sgs;
455*4882a593Smuzhiyun u32 sws;
456*4882a593Smuzhiyun u8 pi_type;
457*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
458*4882a593Smuzhiyun u64 zsze;
459*4882a593Smuzhiyun #endif
460*4882a593Smuzhiyun unsigned long features;
461*4882a593Smuzhiyun unsigned long flags;
462*4882a593Smuzhiyun #define NVME_NS_REMOVING 0
463*4882a593Smuzhiyun #define NVME_NS_DEAD 1
464*4882a593Smuzhiyun #define NVME_NS_ANA_PENDING 2
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun struct nvme_fault_inject fault_inject;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun };
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* NVMe ns supports metadata actions by the controller (generate/strip) */
nvme_ns_has_pi(struct nvme_ns * ns)471*4882a593Smuzhiyun static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun struct nvme_ctrl_ops {
477*4882a593Smuzhiyun const char *name;
478*4882a593Smuzhiyun struct module *module;
479*4882a593Smuzhiyun unsigned int flags;
480*4882a593Smuzhiyun #define NVME_F_FABRICS (1 << 0)
481*4882a593Smuzhiyun #define NVME_F_METADATA_SUPPORTED (1 << 1)
482*4882a593Smuzhiyun #define NVME_F_PCI_P2PDMA (1 << 2)
483*4882a593Smuzhiyun int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
484*4882a593Smuzhiyun int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
485*4882a593Smuzhiyun int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
486*4882a593Smuzhiyun void (*free_ctrl)(struct nvme_ctrl *ctrl);
487*4882a593Smuzhiyun void (*submit_async_event)(struct nvme_ctrl *ctrl);
488*4882a593Smuzhiyun void (*delete_ctrl)(struct nvme_ctrl *ctrl);
489*4882a593Smuzhiyun void (*stop_ctrl)(struct nvme_ctrl *ctrl);
490*4882a593Smuzhiyun int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
491*4882a593Smuzhiyun };
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun * nvme command_id is constructed as such:
495*4882a593Smuzhiyun * | xxxx | xxxxxxxxxxxx |
496*4882a593Smuzhiyun * gen request tag
497*4882a593Smuzhiyun */
498*4882a593Smuzhiyun #define nvme_genctr_mask(gen) (gen & 0xf)
499*4882a593Smuzhiyun #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
500*4882a593Smuzhiyun #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
501*4882a593Smuzhiyun #define nvme_tag_from_cid(cid) (cid & 0xfff)
502*4882a593Smuzhiyun
nvme_cid(struct request * rq)503*4882a593Smuzhiyun static inline u16 nvme_cid(struct request *rq)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
nvme_find_rq(struct blk_mq_tags * tags,u16 command_id)508*4882a593Smuzhiyun static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
509*4882a593Smuzhiyun u16 command_id)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun u8 genctr = nvme_genctr_from_cid(command_id);
512*4882a593Smuzhiyun u16 tag = nvme_tag_from_cid(command_id);
513*4882a593Smuzhiyun struct request *rq;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun rq = blk_mq_tag_to_rq(tags, tag);
516*4882a593Smuzhiyun if (unlikely(!rq)) {
517*4882a593Smuzhiyun pr_err("could not locate request for tag %#x\n",
518*4882a593Smuzhiyun tag);
519*4882a593Smuzhiyun return NULL;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
522*4882a593Smuzhiyun dev_err(nvme_req(rq)->ctrl->device,
523*4882a593Smuzhiyun "request %#x genctr mismatch (got %#x expected %#x)\n",
524*4882a593Smuzhiyun tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
525*4882a593Smuzhiyun return NULL;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun return rq;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
nvme_cid_to_rq(struct blk_mq_tags * tags,u16 command_id)530*4882a593Smuzhiyun static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
531*4882a593Smuzhiyun u16 command_id)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
537*4882a593Smuzhiyun void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
538*4882a593Smuzhiyun const char *dev_name);
539*4882a593Smuzhiyun void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
540*4882a593Smuzhiyun void nvme_should_fail(struct request *req);
541*4882a593Smuzhiyun #else
nvme_fault_inject_init(struct nvme_fault_inject * fault_inj,const char * dev_name)542*4882a593Smuzhiyun static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
543*4882a593Smuzhiyun const char *dev_name)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun }
nvme_fault_inject_fini(struct nvme_fault_inject * fault_inj)546*4882a593Smuzhiyun static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun }
nvme_should_fail(struct request * req)549*4882a593Smuzhiyun static inline void nvme_should_fail(struct request *req) {}
550*4882a593Smuzhiyun #endif
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun bool nvme_wait_reset(struct nvme_ctrl *ctrl);
553*4882a593Smuzhiyun int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
554*4882a593Smuzhiyun
nvme_reset_subsystem(struct nvme_ctrl * ctrl)555*4882a593Smuzhiyun static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun int ret;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (!ctrl->subsystem)
560*4882a593Smuzhiyun return -ENOTTY;
561*4882a593Smuzhiyun if (!nvme_wait_reset(ctrl))
562*4882a593Smuzhiyun return -EBUSY;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
565*4882a593Smuzhiyun if (ret)
566*4882a593Smuzhiyun return ret;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return nvme_try_sched_reset(ctrl);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * Convert a 512B sector number to a device logical block number.
573*4882a593Smuzhiyun */
nvme_sect_to_lba(struct nvme_ns * ns,sector_t sector)574*4882a593Smuzhiyun static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun return sector >> (ns->lba_shift - SECTOR_SHIFT);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /*
580*4882a593Smuzhiyun * Convert a device logical block number to a 512B sector number.
581*4882a593Smuzhiyun */
nvme_lba_to_sect(struct nvme_ns * ns,u64 lba)582*4882a593Smuzhiyun static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun return lba << (ns->lba_shift - SECTOR_SHIFT);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /*
588*4882a593Smuzhiyun * Convert byte length to nvme's 0-based num dwords
589*4882a593Smuzhiyun */
nvme_bytes_to_numd(size_t len)590*4882a593Smuzhiyun static inline u32 nvme_bytes_to_numd(size_t len)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun return (len >> 2) - 1;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
nvme_is_ana_error(u16 status)595*4882a593Smuzhiyun static inline bool nvme_is_ana_error(u16 status)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun switch (status & 0x7ff) {
598*4882a593Smuzhiyun case NVME_SC_ANA_TRANSITION:
599*4882a593Smuzhiyun case NVME_SC_ANA_INACCESSIBLE:
600*4882a593Smuzhiyun case NVME_SC_ANA_PERSISTENT_LOSS:
601*4882a593Smuzhiyun return true;
602*4882a593Smuzhiyun default:
603*4882a593Smuzhiyun return false;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
nvme_is_path_error(u16 status)607*4882a593Smuzhiyun static inline bool nvme_is_path_error(u16 status)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun /* check for a status code type of 'path related status' */
610*4882a593Smuzhiyun return (status & 0x700) == 0x300;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun * Fill in the status and result information from the CQE, and then figure out
615*4882a593Smuzhiyun * if blk-mq will need to use IPI magic to complete the request, and if yes do
616*4882a593Smuzhiyun * so. If not let the caller complete the request without an indirect function
617*4882a593Smuzhiyun * call.
618*4882a593Smuzhiyun */
nvme_try_complete_req(struct request * req,__le16 status,union nvme_result result)619*4882a593Smuzhiyun static inline bool nvme_try_complete_req(struct request *req, __le16 status,
620*4882a593Smuzhiyun union nvme_result result)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun struct nvme_request *rq = nvme_req(req);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun rq->status = le16_to_cpu(status) >> 1;
625*4882a593Smuzhiyun rq->result = result;
626*4882a593Smuzhiyun /* inject error when permitted by fault injection framework */
627*4882a593Smuzhiyun nvme_should_fail(req);
628*4882a593Smuzhiyun if (unlikely(blk_should_fake_timeout(req->q)))
629*4882a593Smuzhiyun return true;
630*4882a593Smuzhiyun return blk_mq_complete_request_remote(req);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
nvme_get_ctrl(struct nvme_ctrl * ctrl)633*4882a593Smuzhiyun static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun get_device(ctrl->device);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
nvme_put_ctrl(struct nvme_ctrl * ctrl)638*4882a593Smuzhiyun static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun put_device(ctrl->device);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
nvme_is_aen_req(u16 qid,__u16 command_id)643*4882a593Smuzhiyun static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun return !qid &&
646*4882a593Smuzhiyun nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun void nvme_complete_rq(struct request *req);
650*4882a593Smuzhiyun bool nvme_cancel_request(struct request *req, void *data, bool reserved);
651*4882a593Smuzhiyun void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
652*4882a593Smuzhiyun void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
653*4882a593Smuzhiyun bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
654*4882a593Smuzhiyun enum nvme_ctrl_state new_state);
655*4882a593Smuzhiyun int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
656*4882a593Smuzhiyun int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
657*4882a593Smuzhiyun int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
658*4882a593Smuzhiyun int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
659*4882a593Smuzhiyun const struct nvme_ctrl_ops *ops, unsigned long quirks);
660*4882a593Smuzhiyun void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
661*4882a593Smuzhiyun void nvme_start_ctrl(struct nvme_ctrl *ctrl);
662*4882a593Smuzhiyun void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
663*4882a593Smuzhiyun int nvme_init_identify(struct nvme_ctrl *ctrl);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
668*4882a593Smuzhiyun bool send);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
671*4882a593Smuzhiyun volatile union nvme_result *res);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun void nvme_stop_queues(struct nvme_ctrl *ctrl);
674*4882a593Smuzhiyun void nvme_start_queues(struct nvme_ctrl *ctrl);
675*4882a593Smuzhiyun void nvme_kill_queues(struct nvme_ctrl *ctrl);
676*4882a593Smuzhiyun void nvme_sync_queues(struct nvme_ctrl *ctrl);
677*4882a593Smuzhiyun void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
678*4882a593Smuzhiyun void nvme_unfreeze(struct nvme_ctrl *ctrl);
679*4882a593Smuzhiyun void nvme_wait_freeze(struct nvme_ctrl *ctrl);
680*4882a593Smuzhiyun int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
681*4882a593Smuzhiyun void nvme_start_freeze(struct nvme_ctrl *ctrl);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun #define NVME_QID_ANY -1
684*4882a593Smuzhiyun struct request *nvme_alloc_request(struct request_queue *q,
685*4882a593Smuzhiyun struct nvme_command *cmd, blk_mq_req_flags_t flags);
686*4882a593Smuzhiyun struct request *nvme_alloc_request_qid(struct request_queue *q,
687*4882a593Smuzhiyun struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
688*4882a593Smuzhiyun void nvme_cleanup_cmd(struct request *req);
689*4882a593Smuzhiyun blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
690*4882a593Smuzhiyun struct nvme_command *cmd);
691*4882a593Smuzhiyun int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
692*4882a593Smuzhiyun void *buf, unsigned bufflen);
693*4882a593Smuzhiyun int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
694*4882a593Smuzhiyun union nvme_result *result, void *buffer, unsigned bufflen,
695*4882a593Smuzhiyun unsigned timeout, int qid, int at_head,
696*4882a593Smuzhiyun blk_mq_req_flags_t flags, bool poll);
697*4882a593Smuzhiyun int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
698*4882a593Smuzhiyun unsigned int dword11, void *buffer, size_t buflen,
699*4882a593Smuzhiyun u32 *result);
700*4882a593Smuzhiyun int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
701*4882a593Smuzhiyun unsigned int dword11, void *buffer, size_t buflen,
702*4882a593Smuzhiyun u32 *result);
703*4882a593Smuzhiyun int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
704*4882a593Smuzhiyun void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
705*4882a593Smuzhiyun int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
706*4882a593Smuzhiyun int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
707*4882a593Smuzhiyun int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
710*4882a593Smuzhiyun void *log, size_t size, u64 offset);
711*4882a593Smuzhiyun struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
712*4882a593Smuzhiyun struct nvme_ns_head **head, int *srcu_idx);
713*4882a593Smuzhiyun void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun extern const struct attribute_group *nvme_ns_id_attr_groups[];
716*4882a593Smuzhiyun extern const struct block_device_operations nvme_ns_head_ops;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun #ifdef CONFIG_NVME_MULTIPATH
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)719*4882a593Smuzhiyun static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun return ctrl->ana_log_buf != NULL;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
725*4882a593Smuzhiyun void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
726*4882a593Smuzhiyun void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
727*4882a593Smuzhiyun void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
728*4882a593Smuzhiyun struct nvme_ctrl *ctrl, int *flags);
729*4882a593Smuzhiyun void nvme_failover_req(struct request *req);
730*4882a593Smuzhiyun void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
731*4882a593Smuzhiyun int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
732*4882a593Smuzhiyun void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
733*4882a593Smuzhiyun void nvme_mpath_remove_disk(struct nvme_ns_head *head);
734*4882a593Smuzhiyun int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
735*4882a593Smuzhiyun void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
736*4882a593Smuzhiyun void nvme_mpath_update(struct nvme_ctrl *ctrl);
737*4882a593Smuzhiyun void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
738*4882a593Smuzhiyun void nvme_mpath_stop(struct nvme_ctrl *ctrl);
739*4882a593Smuzhiyun bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
740*4882a593Smuzhiyun void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
741*4882a593Smuzhiyun struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
742*4882a593Smuzhiyun blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
743*4882a593Smuzhiyun
nvme_mpath_check_last_path(struct nvme_ns * ns)744*4882a593Smuzhiyun static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun struct nvme_ns_head *head = ns->head;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (head->disk && list_empty(&head->list))
749*4882a593Smuzhiyun kblockd_schedule_work(&head->requeue_work);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
nvme_trace_bio_complete(struct request * req,blk_status_t status)752*4882a593Smuzhiyun static inline void nvme_trace_bio_complete(struct request *req,
753*4882a593Smuzhiyun blk_status_t status)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct nvme_ns *ns = req->q->queuedata;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (req->cmd_flags & REQ_NVME_MPATH)
758*4882a593Smuzhiyun trace_block_bio_complete(ns->head->disk->queue, req->bio);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun extern struct device_attribute dev_attr_ana_grpid;
762*4882a593Smuzhiyun extern struct device_attribute dev_attr_ana_state;
763*4882a593Smuzhiyun extern struct device_attribute subsys_attr_iopolicy;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun #else
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)766*4882a593Smuzhiyun static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun return false;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun /*
771*4882a593Smuzhiyun * Without the multipath code enabled, multiple controller per subsystems are
772*4882a593Smuzhiyun * visible as devices and thus we cannot use the subsystem instance.
773*4882a593Smuzhiyun */
nvme_set_disk_name(char * disk_name,struct nvme_ns * ns,struct nvme_ctrl * ctrl,int * flags)774*4882a593Smuzhiyun static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
775*4882a593Smuzhiyun struct nvme_ctrl *ctrl, int *flags)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
nvme_failover_req(struct request * req)780*4882a593Smuzhiyun static inline void nvme_failover_req(struct request *req)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun }
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)783*4882a593Smuzhiyun static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun }
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)786*4882a593Smuzhiyun static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
787*4882a593Smuzhiyun struct nvme_ns_head *head)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun return 0;
790*4882a593Smuzhiyun }
nvme_mpath_add_disk(struct nvme_ns * ns,struct nvme_id_ns * id)791*4882a593Smuzhiyun static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
792*4882a593Smuzhiyun struct nvme_id_ns *id)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun }
nvme_mpath_remove_disk(struct nvme_ns_head * head)795*4882a593Smuzhiyun static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun }
nvme_mpath_clear_current_path(struct nvme_ns * ns)798*4882a593Smuzhiyun static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun return false;
801*4882a593Smuzhiyun }
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)802*4882a593Smuzhiyun static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun }
nvme_mpath_check_last_path(struct nvme_ns * ns)805*4882a593Smuzhiyun static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun }
nvme_trace_bio_complete(struct request * req,blk_status_t status)808*4882a593Smuzhiyun static inline void nvme_trace_bio_complete(struct request *req,
809*4882a593Smuzhiyun blk_status_t status)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun }
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)812*4882a593Smuzhiyun static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun }
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)815*4882a593Smuzhiyun static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
816*4882a593Smuzhiyun struct nvme_id_ctrl *id)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun if (ctrl->subsys->cmic & (1 << 3))
819*4882a593Smuzhiyun dev_warn(ctrl->device,
820*4882a593Smuzhiyun "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
821*4882a593Smuzhiyun return 0;
822*4882a593Smuzhiyun }
nvme_mpath_update(struct nvme_ctrl * ctrl)823*4882a593Smuzhiyun static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun }
nvme_mpath_uninit(struct nvme_ctrl * ctrl)826*4882a593Smuzhiyun static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun }
nvme_mpath_stop(struct nvme_ctrl * ctrl)829*4882a593Smuzhiyun static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun }
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)832*4882a593Smuzhiyun static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun }
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)835*4882a593Smuzhiyun static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun }
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)838*4882a593Smuzhiyun static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun #endif /* CONFIG_NVME_MULTIPATH */
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun int nvme_revalidate_zones(struct nvme_ns *ns);
844*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
845*4882a593Smuzhiyun int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
846*4882a593Smuzhiyun int nvme_report_zones(struct gendisk *disk, sector_t sector,
847*4882a593Smuzhiyun unsigned int nr_zones, report_zones_cb cb, void *data);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
850*4882a593Smuzhiyun struct nvme_command *cmnd,
851*4882a593Smuzhiyun enum nvme_zone_mgmt_action action);
852*4882a593Smuzhiyun #else
853*4882a593Smuzhiyun #define nvme_report_zones NULL
854*4882a593Smuzhiyun
nvme_setup_zone_mgmt_send(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd,enum nvme_zone_mgmt_action action)855*4882a593Smuzhiyun static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
856*4882a593Smuzhiyun struct request *req, struct nvme_command *cmnd,
857*4882a593Smuzhiyun enum nvme_zone_mgmt_action action)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun return BLK_STS_NOTSUPP;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
nvme_update_zone_info(struct nvme_ns * ns,unsigned lbaf)862*4882a593Smuzhiyun static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun dev_warn(ns->ctrl->device,
865*4882a593Smuzhiyun "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
866*4882a593Smuzhiyun return -EPROTONOSUPPORT;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun #endif
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun #ifdef CONFIG_NVM
871*4882a593Smuzhiyun int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
872*4882a593Smuzhiyun void nvme_nvm_unregister(struct nvme_ns *ns);
873*4882a593Smuzhiyun extern const struct attribute_group nvme_nvm_attr_group;
874*4882a593Smuzhiyun int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
875*4882a593Smuzhiyun #else
nvme_nvm_register(struct nvme_ns * ns,char * disk_name,int node)876*4882a593Smuzhiyun static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
877*4882a593Smuzhiyun int node)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun return 0;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
nvme_nvm_unregister(struct nvme_ns * ns)882*4882a593Smuzhiyun static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
nvme_nvm_ioctl(struct nvme_ns * ns,unsigned int cmd,unsigned long arg)883*4882a593Smuzhiyun static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
884*4882a593Smuzhiyun unsigned long arg)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun return -ENOTTY;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun #endif /* CONFIG_NVM */
889*4882a593Smuzhiyun
nvme_get_ns_from_dev(struct device * dev)890*4882a593Smuzhiyun static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun return dev_to_disk(dev)->private_data;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun #ifdef CONFIG_NVME_HWMON
896*4882a593Smuzhiyun int nvme_hwmon_init(struct nvme_ctrl *ctrl);
897*4882a593Smuzhiyun void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
898*4882a593Smuzhiyun #else
nvme_hwmon_init(struct nvme_ctrl * ctrl)899*4882a593Smuzhiyun static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun return 0;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
nvme_hwmon_exit(struct nvme_ctrl * ctrl)904*4882a593Smuzhiyun static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun #endif
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
910*4882a593Smuzhiyun u8 opcode);
911*4882a593Smuzhiyun void nvme_execute_passthru_rq(struct request *rq);
912*4882a593Smuzhiyun struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
913*4882a593Smuzhiyun struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
914*4882a593Smuzhiyun void nvme_put_ns(struct nvme_ns *ns);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun #endif /* _NVME_H */
917