1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef NVM_H
3*4882a593Smuzhiyun #define NVM_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/blkdev.h>
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun #include <uapi/linux/lightnvm.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun enum {
10*4882a593Smuzhiyun NVM_IO_OK = 0,
11*4882a593Smuzhiyun NVM_IO_REQUEUE = 1,
12*4882a593Smuzhiyun NVM_IO_DONE = 2,
13*4882a593Smuzhiyun NVM_IO_ERR = 3,
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun NVM_IOTYPE_NONE = 0,
16*4882a593Smuzhiyun NVM_IOTYPE_GC = 1,
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* common format */
20*4882a593Smuzhiyun #define NVM_GEN_CH_BITS (8)
21*4882a593Smuzhiyun #define NVM_GEN_LUN_BITS (8)
22*4882a593Smuzhiyun #define NVM_GEN_BLK_BITS (16)
23*4882a593Smuzhiyun #define NVM_GEN_RESERVED (32)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* 1.2 format */
26*4882a593Smuzhiyun #define NVM_12_PG_BITS (16)
27*4882a593Smuzhiyun #define NVM_12_PL_BITS (4)
28*4882a593Smuzhiyun #define NVM_12_SEC_BITS (4)
29*4882a593Smuzhiyun #define NVM_12_RESERVED (8)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* 2.0 format */
32*4882a593Smuzhiyun #define NVM_20_SEC_BITS (24)
33*4882a593Smuzhiyun #define NVM_20_RESERVED (8)
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun enum {
36*4882a593Smuzhiyun NVM_OCSSD_SPEC_12 = 12,
37*4882a593Smuzhiyun NVM_OCSSD_SPEC_20 = 20,
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct ppa_addr {
41*4882a593Smuzhiyun /* Generic structure for all addresses */
42*4882a593Smuzhiyun union {
43*4882a593Smuzhiyun /* generic device format */
44*4882a593Smuzhiyun struct {
45*4882a593Smuzhiyun u64 ch : NVM_GEN_CH_BITS;
46*4882a593Smuzhiyun u64 lun : NVM_GEN_LUN_BITS;
47*4882a593Smuzhiyun u64 blk : NVM_GEN_BLK_BITS;
48*4882a593Smuzhiyun u64 reserved : NVM_GEN_RESERVED;
49*4882a593Smuzhiyun } a;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* 1.2 device format */
52*4882a593Smuzhiyun struct {
53*4882a593Smuzhiyun u64 ch : NVM_GEN_CH_BITS;
54*4882a593Smuzhiyun u64 lun : NVM_GEN_LUN_BITS;
55*4882a593Smuzhiyun u64 blk : NVM_GEN_BLK_BITS;
56*4882a593Smuzhiyun u64 pg : NVM_12_PG_BITS;
57*4882a593Smuzhiyun u64 pl : NVM_12_PL_BITS;
58*4882a593Smuzhiyun u64 sec : NVM_12_SEC_BITS;
59*4882a593Smuzhiyun u64 reserved : NVM_12_RESERVED;
60*4882a593Smuzhiyun } g;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* 2.0 device format */
63*4882a593Smuzhiyun struct {
64*4882a593Smuzhiyun u64 grp : NVM_GEN_CH_BITS;
65*4882a593Smuzhiyun u64 pu : NVM_GEN_LUN_BITS;
66*4882a593Smuzhiyun u64 chk : NVM_GEN_BLK_BITS;
67*4882a593Smuzhiyun u64 sec : NVM_20_SEC_BITS;
68*4882a593Smuzhiyun u64 reserved : NVM_20_RESERVED;
69*4882a593Smuzhiyun } m;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun struct {
72*4882a593Smuzhiyun u64 line : 63;
73*4882a593Smuzhiyun u64 is_cached : 1;
74*4882a593Smuzhiyun } c;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun u64 ppa;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct nvm_rq;
81*4882a593Smuzhiyun struct nvm_id;
82*4882a593Smuzhiyun struct nvm_dev;
83*4882a593Smuzhiyun struct nvm_tgt_dev;
84*4882a593Smuzhiyun struct nvm_chk_meta;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun typedef int (nvm_id_fn)(struct nvm_dev *);
87*4882a593Smuzhiyun typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
88*4882a593Smuzhiyun typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
89*4882a593Smuzhiyun typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
90*4882a593Smuzhiyun struct nvm_chk_meta *);
91*4882a593Smuzhiyun typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
92*4882a593Smuzhiyun typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
93*4882a593Smuzhiyun typedef void (nvm_destroy_dma_pool_fn)(void *);
94*4882a593Smuzhiyun typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
95*4882a593Smuzhiyun dma_addr_t *);
96*4882a593Smuzhiyun typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun struct nvm_dev_ops {
99*4882a593Smuzhiyun nvm_id_fn *identity;
100*4882a593Smuzhiyun nvm_op_bb_tbl_fn *get_bb_tbl;
101*4882a593Smuzhiyun nvm_op_set_bb_fn *set_bb_tbl;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun nvm_get_chk_meta_fn *get_chk_meta;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun nvm_submit_io_fn *submit_io;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun nvm_create_dma_pool_fn *create_dma_pool;
108*4882a593Smuzhiyun nvm_destroy_dma_pool_fn *destroy_dma_pool;
109*4882a593Smuzhiyun nvm_dev_dma_alloc_fn *dev_dma_alloc;
110*4882a593Smuzhiyun nvm_dev_dma_free_fn *dev_dma_free;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #ifdef CONFIG_NVM
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #include <linux/blkdev.h>
116*4882a593Smuzhiyun #include <linux/file.h>
117*4882a593Smuzhiyun #include <linux/dmapool.h>
118*4882a593Smuzhiyun #include <uapi/linux/lightnvm.h>
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun enum {
121*4882a593Smuzhiyun /* HW Responsibilities */
122*4882a593Smuzhiyun NVM_RSP_L2P = 1 << 0,
123*4882a593Smuzhiyun NVM_RSP_ECC = 1 << 1,
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* Physical Adressing Mode */
126*4882a593Smuzhiyun NVM_ADDRMODE_LINEAR = 0,
127*4882a593Smuzhiyun NVM_ADDRMODE_CHANNEL = 1,
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Plane programming mode for LUN */
130*4882a593Smuzhiyun NVM_PLANE_SINGLE = 1,
131*4882a593Smuzhiyun NVM_PLANE_DOUBLE = 2,
132*4882a593Smuzhiyun NVM_PLANE_QUAD = 4,
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Status codes */
135*4882a593Smuzhiyun NVM_RSP_SUCCESS = 0x0,
136*4882a593Smuzhiyun NVM_RSP_NOT_CHANGEABLE = 0x1,
137*4882a593Smuzhiyun NVM_RSP_ERR_FAILWRITE = 0x40ff,
138*4882a593Smuzhiyun NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
139*4882a593Smuzhiyun NVM_RSP_ERR_FAILECC = 0x4281,
140*4882a593Smuzhiyun NVM_RSP_ERR_FAILCRC = 0x4004,
141*4882a593Smuzhiyun NVM_RSP_WARN_HIGHECC = 0x4700,
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* Device opcodes */
144*4882a593Smuzhiyun NVM_OP_PWRITE = 0x91,
145*4882a593Smuzhiyun NVM_OP_PREAD = 0x92,
146*4882a593Smuzhiyun NVM_OP_ERASE = 0x90,
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* PPA Command Flags */
149*4882a593Smuzhiyun NVM_IO_SNGL_ACCESS = 0x0,
150*4882a593Smuzhiyun NVM_IO_DUAL_ACCESS = 0x1,
151*4882a593Smuzhiyun NVM_IO_QUAD_ACCESS = 0x2,
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* NAND Access Modes */
154*4882a593Smuzhiyun NVM_IO_SUSPEND = 0x80,
155*4882a593Smuzhiyun NVM_IO_SLC_MODE = 0x100,
156*4882a593Smuzhiyun NVM_IO_SCRAMBLE_ENABLE = 0x200,
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Block Types */
159*4882a593Smuzhiyun NVM_BLK_T_FREE = 0x0,
160*4882a593Smuzhiyun NVM_BLK_T_BAD = 0x1,
161*4882a593Smuzhiyun NVM_BLK_T_GRWN_BAD = 0x2,
162*4882a593Smuzhiyun NVM_BLK_T_DEV = 0x4,
163*4882a593Smuzhiyun NVM_BLK_T_HOST = 0x8,
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* Memory capabilities */
166*4882a593Smuzhiyun NVM_ID_CAP_SLC = 0x1,
167*4882a593Smuzhiyun NVM_ID_CAP_CMD_SUSPEND = 0x2,
168*4882a593Smuzhiyun NVM_ID_CAP_SCRAMBLE = 0x4,
169*4882a593Smuzhiyun NVM_ID_CAP_ENCRYPT = 0x8,
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Memory types */
172*4882a593Smuzhiyun NVM_ID_FMTYPE_SLC = 0,
173*4882a593Smuzhiyun NVM_ID_FMTYPE_MLC = 1,
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Device capabilities */
176*4882a593Smuzhiyun NVM_ID_DCAP_BBLKMGMT = 0x1,
177*4882a593Smuzhiyun NVM_UD_DCAP_ECC = 0x2,
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun struct nvm_id_lp_mlc {
181*4882a593Smuzhiyun u16 num_pairs;
182*4882a593Smuzhiyun u8 pairs[886];
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun struct nvm_id_lp_tbl {
186*4882a593Smuzhiyun __u8 id[8];
187*4882a593Smuzhiyun struct nvm_id_lp_mlc mlc;
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun struct nvm_addrf_12 {
191*4882a593Smuzhiyun u8 ch_len;
192*4882a593Smuzhiyun u8 lun_len;
193*4882a593Smuzhiyun u8 blk_len;
194*4882a593Smuzhiyun u8 pg_len;
195*4882a593Smuzhiyun u8 pln_len;
196*4882a593Smuzhiyun u8 sec_len;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun u8 ch_offset;
199*4882a593Smuzhiyun u8 lun_offset;
200*4882a593Smuzhiyun u8 blk_offset;
201*4882a593Smuzhiyun u8 pg_offset;
202*4882a593Smuzhiyun u8 pln_offset;
203*4882a593Smuzhiyun u8 sec_offset;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun u64 ch_mask;
206*4882a593Smuzhiyun u64 lun_mask;
207*4882a593Smuzhiyun u64 blk_mask;
208*4882a593Smuzhiyun u64 pg_mask;
209*4882a593Smuzhiyun u64 pln_mask;
210*4882a593Smuzhiyun u64 sec_mask;
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun struct nvm_addrf {
214*4882a593Smuzhiyun u8 ch_len;
215*4882a593Smuzhiyun u8 lun_len;
216*4882a593Smuzhiyun u8 chk_len;
217*4882a593Smuzhiyun u8 sec_len;
218*4882a593Smuzhiyun u8 rsv_len[2];
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun u8 ch_offset;
221*4882a593Smuzhiyun u8 lun_offset;
222*4882a593Smuzhiyun u8 chk_offset;
223*4882a593Smuzhiyun u8 sec_offset;
224*4882a593Smuzhiyun u8 rsv_off[2];
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun u64 ch_mask;
227*4882a593Smuzhiyun u64 lun_mask;
228*4882a593Smuzhiyun u64 chk_mask;
229*4882a593Smuzhiyun u64 sec_mask;
230*4882a593Smuzhiyun u64 rsv_mask[2];
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun enum {
234*4882a593Smuzhiyun /* Chunk states */
235*4882a593Smuzhiyun NVM_CHK_ST_FREE = 1 << 0,
236*4882a593Smuzhiyun NVM_CHK_ST_CLOSED = 1 << 1,
237*4882a593Smuzhiyun NVM_CHK_ST_OPEN = 1 << 2,
238*4882a593Smuzhiyun NVM_CHK_ST_OFFLINE = 1 << 3,
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Chunk types */
241*4882a593Smuzhiyun NVM_CHK_TP_W_SEQ = 1 << 0,
242*4882a593Smuzhiyun NVM_CHK_TP_W_RAN = 1 << 1,
243*4882a593Smuzhiyun NVM_CHK_TP_SZ_SPEC = 1 << 4,
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
248*4882a593Smuzhiyun * buffer can be used when converting from little endian to cpu addressing.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun struct nvm_chk_meta {
251*4882a593Smuzhiyun u8 state;
252*4882a593Smuzhiyun u8 type;
253*4882a593Smuzhiyun u8 wi;
254*4882a593Smuzhiyun u8 rsvd[5];
255*4882a593Smuzhiyun u64 slba;
256*4882a593Smuzhiyun u64 cnlb;
257*4882a593Smuzhiyun u64 wp;
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun struct nvm_target {
261*4882a593Smuzhiyun struct list_head list;
262*4882a593Smuzhiyun struct nvm_tgt_dev *dev;
263*4882a593Smuzhiyun struct nvm_tgt_type *type;
264*4882a593Smuzhiyun struct gendisk *disk;
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun #define ADDR_EMPTY (~0ULL)
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun #define NVM_TARGET_DEFAULT_OP (101)
270*4882a593Smuzhiyun #define NVM_TARGET_MIN_OP (3)
271*4882a593Smuzhiyun #define NVM_TARGET_MAX_OP (80)
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun #define NVM_VERSION_MAJOR 1
274*4882a593Smuzhiyun #define NVM_VERSION_MINOR 0
275*4882a593Smuzhiyun #define NVM_VERSION_PATCH 0
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun struct nvm_rq;
280*4882a593Smuzhiyun typedef void (nvm_end_io_fn)(struct nvm_rq *);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun struct nvm_rq {
283*4882a593Smuzhiyun struct nvm_tgt_dev *dev;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun struct bio *bio;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun union {
288*4882a593Smuzhiyun struct ppa_addr ppa_addr;
289*4882a593Smuzhiyun dma_addr_t dma_ppa_list;
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun struct ppa_addr *ppa_list;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun void *meta_list;
295*4882a593Smuzhiyun dma_addr_t dma_meta_list;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun nvm_end_io_fn *end_io;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun uint8_t opcode;
300*4882a593Smuzhiyun uint16_t nr_ppas;
301*4882a593Smuzhiyun uint16_t flags;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun u64 ppa_status; /* ppa media status */
304*4882a593Smuzhiyun int error;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun int is_seq; /* Sequential hint flag. 1.2 only */
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun void *private;
309*4882a593Smuzhiyun };
310*4882a593Smuzhiyun
nvm_rq_from_pdu(void * pdu)311*4882a593Smuzhiyun static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun return pdu - sizeof(struct nvm_rq);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
nvm_rq_to_pdu(struct nvm_rq * rqdata)316*4882a593Smuzhiyun static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun return rqdata + 1;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
nvm_rq_to_ppa_list(struct nvm_rq * rqd)321*4882a593Smuzhiyun static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun enum {
327*4882a593Smuzhiyun NVM_BLK_ST_FREE = 0x1, /* Free block */
328*4882a593Smuzhiyun NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
329*4882a593Smuzhiyun NVM_BLK_ST_BAD = 0x8, /* Bad block */
330*4882a593Smuzhiyun };
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Instance geometry */
333*4882a593Smuzhiyun struct nvm_geo {
334*4882a593Smuzhiyun /* device reported version */
335*4882a593Smuzhiyun u8 major_ver_id;
336*4882a593Smuzhiyun u8 minor_ver_id;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* kernel short version */
339*4882a593Smuzhiyun u8 version;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* instance specific geometry */
342*4882a593Smuzhiyun int num_ch;
343*4882a593Smuzhiyun int num_lun; /* per channel */
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* calculated values */
346*4882a593Smuzhiyun int all_luns; /* across channels */
347*4882a593Smuzhiyun int all_chunks; /* across channels */
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun int op; /* over-provision in instance */
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun sector_t total_secs; /* across channels */
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* chunk geometry */
354*4882a593Smuzhiyun u32 num_chk; /* chunks per lun */
355*4882a593Smuzhiyun u32 clba; /* sectors per chunk */
356*4882a593Smuzhiyun u16 csecs; /* sector size */
357*4882a593Smuzhiyun u16 sos; /* out-of-band area size */
358*4882a593Smuzhiyun bool ext; /* metadata in extended data buffer */
359*4882a593Smuzhiyun u32 mdts; /* Max data transfer size*/
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* device write constrains */
362*4882a593Smuzhiyun u32 ws_min; /* minimum write size */
363*4882a593Smuzhiyun u32 ws_opt; /* optimal write size */
364*4882a593Smuzhiyun u32 mw_cunits; /* distance required for successful read */
365*4882a593Smuzhiyun u32 maxoc; /* maximum open chunks */
366*4882a593Smuzhiyun u32 maxocpu; /* maximum open chunks per parallel unit */
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* device capabilities */
369*4882a593Smuzhiyun u32 mccap;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* device timings */
372*4882a593Smuzhiyun u32 trdt; /* Avg. Tread (ns) */
373*4882a593Smuzhiyun u32 trdm; /* Max Tread (ns) */
374*4882a593Smuzhiyun u32 tprt; /* Avg. Tprog (ns) */
375*4882a593Smuzhiyun u32 tprm; /* Max Tprog (ns) */
376*4882a593Smuzhiyun u32 tbet; /* Avg. Terase (ns) */
377*4882a593Smuzhiyun u32 tbem; /* Max Terase (ns) */
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* generic address format */
380*4882a593Smuzhiyun struct nvm_addrf addrf;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* 1.2 compatibility */
383*4882a593Smuzhiyun u8 vmnt;
384*4882a593Smuzhiyun u32 cap;
385*4882a593Smuzhiyun u32 dom;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun u8 mtype;
388*4882a593Smuzhiyun u8 fmtype;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun u16 cpar;
391*4882a593Smuzhiyun u32 mpos;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun u8 num_pln;
394*4882a593Smuzhiyun u8 pln_mode;
395*4882a593Smuzhiyun u16 num_pg;
396*4882a593Smuzhiyun u16 fpg_sz;
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* sub-device structure */
400*4882a593Smuzhiyun struct nvm_tgt_dev {
401*4882a593Smuzhiyun /* Device information */
402*4882a593Smuzhiyun struct nvm_geo geo;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* Base ppas for target LUNs */
405*4882a593Smuzhiyun struct ppa_addr *luns;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun struct request_queue *q;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun struct nvm_dev *parent;
410*4882a593Smuzhiyun void *map;
411*4882a593Smuzhiyun };
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun struct nvm_dev {
414*4882a593Smuzhiyun struct nvm_dev_ops *ops;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun struct list_head devices;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Device information */
419*4882a593Smuzhiyun struct nvm_geo geo;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun unsigned long *lun_map;
422*4882a593Smuzhiyun void *dma_pool;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* Backend device */
425*4882a593Smuzhiyun struct request_queue *q;
426*4882a593Smuzhiyun char name[DISK_NAME_LEN];
427*4882a593Smuzhiyun void *private_data;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun struct kref ref;
430*4882a593Smuzhiyun void *rmap;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun struct mutex mlock;
433*4882a593Smuzhiyun spinlock_t lock;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* target management */
436*4882a593Smuzhiyun struct list_head area_list;
437*4882a593Smuzhiyun struct list_head targets;
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun
generic_to_dev_addr(struct nvm_dev * dev,struct ppa_addr r)440*4882a593Smuzhiyun static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
441*4882a593Smuzhiyun struct ppa_addr r)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
444*4882a593Smuzhiyun struct ppa_addr l;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (geo->version == NVM_OCSSD_SPEC_12) {
447*4882a593Smuzhiyun struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
450*4882a593Smuzhiyun l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
451*4882a593Smuzhiyun l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
452*4882a593Smuzhiyun l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
453*4882a593Smuzhiyun l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
454*4882a593Smuzhiyun l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
455*4882a593Smuzhiyun } else {
456*4882a593Smuzhiyun struct nvm_addrf *lbaf = &geo->addrf;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
459*4882a593Smuzhiyun l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
460*4882a593Smuzhiyun l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
461*4882a593Smuzhiyun l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun return l;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
dev_to_generic_addr(struct nvm_dev * dev,struct ppa_addr r)467*4882a593Smuzhiyun static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
468*4882a593Smuzhiyun struct ppa_addr r)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
471*4882a593Smuzhiyun struct ppa_addr l;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun l.ppa = 0;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (geo->version == NVM_OCSSD_SPEC_12) {
476*4882a593Smuzhiyun struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
479*4882a593Smuzhiyun l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
480*4882a593Smuzhiyun l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
481*4882a593Smuzhiyun l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
482*4882a593Smuzhiyun l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
483*4882a593Smuzhiyun l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
484*4882a593Smuzhiyun } else {
485*4882a593Smuzhiyun struct nvm_addrf *lbaf = &geo->addrf;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
488*4882a593Smuzhiyun l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
489*4882a593Smuzhiyun l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
490*4882a593Smuzhiyun l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return l;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
dev_to_chunk_addr(struct nvm_dev * dev,void * addrf,struct ppa_addr p)496*4882a593Smuzhiyun static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
497*4882a593Smuzhiyun struct ppa_addr p)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
500*4882a593Smuzhiyun u64 caddr;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (geo->version == NVM_OCSSD_SPEC_12) {
503*4882a593Smuzhiyun struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun caddr = (u64)p.g.pg << ppaf->pg_offset;
506*4882a593Smuzhiyun caddr |= (u64)p.g.pl << ppaf->pln_offset;
507*4882a593Smuzhiyun caddr |= (u64)p.g.sec << ppaf->sec_offset;
508*4882a593Smuzhiyun } else {
509*4882a593Smuzhiyun caddr = p.m.sec;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return caddr;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
nvm_ppa32_to_ppa64(struct nvm_dev * dev,void * addrf,u32 ppa32)515*4882a593Smuzhiyun static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
516*4882a593Smuzhiyun void *addrf, u32 ppa32)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct ppa_addr ppa64;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun ppa64.ppa = 0;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (ppa32 == -1) {
523*4882a593Smuzhiyun ppa64.ppa = ADDR_EMPTY;
524*4882a593Smuzhiyun } else if (ppa32 & (1U << 31)) {
525*4882a593Smuzhiyun ppa64.c.line = ppa32 & ((~0U) >> 1);
526*4882a593Smuzhiyun ppa64.c.is_cached = 1;
527*4882a593Smuzhiyun } else {
528*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (geo->version == NVM_OCSSD_SPEC_12) {
531*4882a593Smuzhiyun struct nvm_addrf_12 *ppaf = addrf;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
534*4882a593Smuzhiyun ppaf->ch_offset;
535*4882a593Smuzhiyun ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
536*4882a593Smuzhiyun ppaf->lun_offset;
537*4882a593Smuzhiyun ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
538*4882a593Smuzhiyun ppaf->blk_offset;
539*4882a593Smuzhiyun ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
540*4882a593Smuzhiyun ppaf->pg_offset;
541*4882a593Smuzhiyun ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
542*4882a593Smuzhiyun ppaf->pln_offset;
543*4882a593Smuzhiyun ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
544*4882a593Smuzhiyun ppaf->sec_offset;
545*4882a593Smuzhiyun } else {
546*4882a593Smuzhiyun struct nvm_addrf *lbaf = addrf;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
549*4882a593Smuzhiyun lbaf->ch_offset;
550*4882a593Smuzhiyun ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
551*4882a593Smuzhiyun lbaf->lun_offset;
552*4882a593Smuzhiyun ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
553*4882a593Smuzhiyun lbaf->chk_offset;
554*4882a593Smuzhiyun ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
555*4882a593Smuzhiyun lbaf->sec_offset;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun return ppa64;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
nvm_ppa64_to_ppa32(struct nvm_dev * dev,void * addrf,struct ppa_addr ppa64)562*4882a593Smuzhiyun static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
563*4882a593Smuzhiyun void *addrf, struct ppa_addr ppa64)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun u32 ppa32 = 0;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (ppa64.ppa == ADDR_EMPTY) {
568*4882a593Smuzhiyun ppa32 = ~0U;
569*4882a593Smuzhiyun } else if (ppa64.c.is_cached) {
570*4882a593Smuzhiyun ppa32 |= ppa64.c.line;
571*4882a593Smuzhiyun ppa32 |= 1U << 31;
572*4882a593Smuzhiyun } else {
573*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (geo->version == NVM_OCSSD_SPEC_12) {
576*4882a593Smuzhiyun struct nvm_addrf_12 *ppaf = addrf;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun ppa32 |= ppa64.g.ch << ppaf->ch_offset;
579*4882a593Smuzhiyun ppa32 |= ppa64.g.lun << ppaf->lun_offset;
580*4882a593Smuzhiyun ppa32 |= ppa64.g.blk << ppaf->blk_offset;
581*4882a593Smuzhiyun ppa32 |= ppa64.g.pg << ppaf->pg_offset;
582*4882a593Smuzhiyun ppa32 |= ppa64.g.pl << ppaf->pln_offset;
583*4882a593Smuzhiyun ppa32 |= ppa64.g.sec << ppaf->sec_offset;
584*4882a593Smuzhiyun } else {
585*4882a593Smuzhiyun struct nvm_addrf *lbaf = addrf;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun ppa32 |= ppa64.m.grp << lbaf->ch_offset;
588*4882a593Smuzhiyun ppa32 |= ppa64.m.pu << lbaf->lun_offset;
589*4882a593Smuzhiyun ppa32 |= ppa64.m.chk << lbaf->chk_offset;
590*4882a593Smuzhiyun ppa32 |= ppa64.m.sec << lbaf->sec_offset;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun return ppa32;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
nvm_next_ppa_in_chk(struct nvm_tgt_dev * dev,struct ppa_addr * ppa)597*4882a593Smuzhiyun static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
598*4882a593Smuzhiyun struct ppa_addr *ppa)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
601*4882a593Smuzhiyun int last = 0;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (geo->version == NVM_OCSSD_SPEC_12) {
604*4882a593Smuzhiyun int sec = ppa->g.sec;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun sec++;
607*4882a593Smuzhiyun if (sec == geo->ws_min) {
608*4882a593Smuzhiyun int pg = ppa->g.pg;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun sec = 0;
611*4882a593Smuzhiyun pg++;
612*4882a593Smuzhiyun if (pg == geo->num_pg) {
613*4882a593Smuzhiyun int pl = ppa->g.pl;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun pg = 0;
616*4882a593Smuzhiyun pl++;
617*4882a593Smuzhiyun if (pl == geo->num_pln)
618*4882a593Smuzhiyun last = 1;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun ppa->g.pl = pl;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun ppa->g.pg = pg;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun ppa->g.sec = sec;
625*4882a593Smuzhiyun } else {
626*4882a593Smuzhiyun ppa->m.sec++;
627*4882a593Smuzhiyun if (ppa->m.sec == geo->clba)
628*4882a593Smuzhiyun last = 1;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun return last;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun typedef sector_t (nvm_tgt_capacity_fn)(void *);
635*4882a593Smuzhiyun typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
636*4882a593Smuzhiyun int flags);
637*4882a593Smuzhiyun typedef void (nvm_tgt_exit_fn)(void *, bool);
638*4882a593Smuzhiyun typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
639*4882a593Smuzhiyun typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun enum {
642*4882a593Smuzhiyun NVM_TGT_F_DEV_L2P = 0,
643*4882a593Smuzhiyun NVM_TGT_F_HOST_L2P = 1 << 0,
644*4882a593Smuzhiyun };
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun struct nvm_tgt_type {
647*4882a593Smuzhiyun const char *name;
648*4882a593Smuzhiyun unsigned int version[3];
649*4882a593Smuzhiyun int flags;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /* target entry points */
652*4882a593Smuzhiyun const struct block_device_operations *bops;
653*4882a593Smuzhiyun nvm_tgt_capacity_fn *capacity;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* module-specific init/teardown */
656*4882a593Smuzhiyun nvm_tgt_init_fn *init;
657*4882a593Smuzhiyun nvm_tgt_exit_fn *exit;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* sysfs */
660*4882a593Smuzhiyun nvm_tgt_sysfs_init_fn *sysfs_init;
661*4882a593Smuzhiyun nvm_tgt_sysfs_exit_fn *sysfs_exit;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* For internal use */
664*4882a593Smuzhiyun struct list_head list;
665*4882a593Smuzhiyun struct module *owner;
666*4882a593Smuzhiyun };
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun extern int nvm_register_tgt_type(struct nvm_tgt_type *);
669*4882a593Smuzhiyun extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
672*4882a593Smuzhiyun extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun extern struct nvm_dev *nvm_alloc_dev(int);
675*4882a593Smuzhiyun extern int nvm_register(struct nvm_dev *);
676*4882a593Smuzhiyun extern void nvm_unregister(struct nvm_dev *);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
679*4882a593Smuzhiyun int, struct nvm_chk_meta *);
680*4882a593Smuzhiyun extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
681*4882a593Smuzhiyun int, int);
682*4882a593Smuzhiyun extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
683*4882a593Smuzhiyun extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
684*4882a593Smuzhiyun extern void nvm_end_io(struct nvm_rq *);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun #else /* CONFIG_NVM */
687*4882a593Smuzhiyun struct nvm_dev_ops;
688*4882a593Smuzhiyun
nvm_alloc_dev(int node)689*4882a593Smuzhiyun static inline struct nvm_dev *nvm_alloc_dev(int node)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
692*4882a593Smuzhiyun }
nvm_register(struct nvm_dev * dev)693*4882a593Smuzhiyun static inline int nvm_register(struct nvm_dev *dev)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun return -EINVAL;
696*4882a593Smuzhiyun }
nvm_unregister(struct nvm_dev * dev)697*4882a593Smuzhiyun static inline void nvm_unregister(struct nvm_dev *dev) {}
698*4882a593Smuzhiyun #endif /* CONFIG_NVM */
699*4882a593Smuzhiyun #endif /* LIGHTNVM.H */
700